aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2021-12-17 14:00:35 +0200
committerGitHub <noreply@github.com>2021-12-17 14:00:35 +0200
commitc3789c8e6ec34faee0b59080c1ab187cff1dc356 (patch)
treeb19561c2fdf7e26201237004b0344d21b0038ef2
parent2ed1489ccc354300037bad2e718fe63cd6fdd7ba (diff)
parenta4f277eb71029ecf61c6b4dda7894d5e949c4852 (diff)
Merge ODP v1.33.0.0v1.33.0.0_DPDK_19.11
Merge ODP linux-generic v1.33.0.0 into linux-dpdk
-rw-r--r--.checkpatch.conf6
-rw-r--r--CHANGELOG54
-rw-r--r--config/odp-linux-generic.conf10
-rw-r--r--configure.ac25
-rw-r--r--doc/users-guide/users-guide.adoc9
-rw-r--r--example/debug/odp_debug.c2
-rw-r--r--example/timer/odp_timer_accuracy.c2
-rw-r--r--example/timer/odp_timer_simple.c3
-rw-r--r--example/timer/odp_timer_test.c2
-rw-r--r--helper/cli.c2
-rw-r--r--helper/cuckootable.c5
-rw-r--r--helper/hashtable.c2
-rw-r--r--helper/iplookuptable.c4
-rw-r--r--helper/lineartable.c2
-rw-r--r--include/Makefile.am30
-rw-r--r--include/odp/api/abi-default/dma_types.h48
-rw-r--r--include/odp/api/abi-default/event.h1
-rw-r--r--include/odp/api/abi-default/timer_types.h (renamed from include/odp/api/abi-default/timer.h)4
-rw-r--r--include/odp/api/dma.h26
-rw-r--r--include/odp/api/dma_types.h28
-rw-r--r--include/odp/api/spec/classification.h10
-rw-r--r--include/odp/api/spec/crypto.h3
-rw-r--r--include/odp/api/spec/dma.h354
-rw-r--r--include/odp/api/spec/dma_types.h547
-rw-r--r--include/odp/api/spec/event.h2
-rw-r--r--include/odp/api/spec/ipsec.h16
-rw-r--r--include/odp/api/spec/packet_io.h7
-rw-r--r--include/odp/api/spec/pool_types.h12
-rw-r--r--include/odp/api/spec/random.h2
-rw-r--r--include/odp/api/spec/shared_memory.h24
-rw-r--r--include/odp/api/spec/std_types.h3
-rw-r--r--include/odp/api/spec/timer.h337
-rw-r--r--include/odp/api/spec/timer_types.h359
-rw-r--r--include/odp/api/timer.h6
-rw-r--r--include/odp/api/timer_types.h28
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/timer_types.h (renamed from include/odp/arch/mips64-linux/odp/api/abi/timer.h)2
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/timer_types.h (renamed from include/odp/arch/arm32-linux/odp/api/abi/timer.h)2
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/timer_types.h (renamed from include/odp/arch/default-linux/odp/api/abi/timer.h)2
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/timer_types.h (renamed from include/odp/arch/arm64-linux/odp/api/abi/timer.h)2
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/timer_types.h (renamed from include/odp/arch/power64-linux/odp/api/abi/timer.h)2
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/timer.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/timer_types.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/dma_types.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/timer.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/timer_types.h7
-rw-r--r--include/odp/autoheader_internal.h.in3
-rw-r--r--include/odp_api.h1
-rw-r--r--platform/linux-dpdk/Makefile.am30
l---------platform/linux-dpdk/arch/default/odp_random.c1
l---------platform/linux-dpdk/arch/default/odp_random.h1
l---------platform/linux-dpdk/arch/x86/odp_random.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/timer.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h1
-rw-r--r--platform/linux-dpdk/include/odp_buffer_internal.h79
-rw-r--r--platform/linux-dpdk/include/odp_config_internal.h10
-rw-r--r--platform/linux-dpdk/include/odp_event_internal.h102
-rw-r--r--platform/linux-dpdk/include/odp_event_vector_internal.h60
-rw-r--r--platform/linux-dpdk/include/odp_packet_internal.h34
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h8
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h36
-rw-r--r--platform/linux-dpdk/m4/configure.m43
l---------platform/linux-dpdk/m4/odp_pthread.m41
l---------platform/linux-dpdk/m4/odp_timer.m41
-rw-r--r--platform/linux-dpdk/odp_buffer.c23
-rw-r--r--platform/linux-dpdk/odp_crypto.c6
-rw-r--r--platform/linux-dpdk/odp_init.c40
-rw-r--r--platform/linux-dpdk/odp_packet.c126
-rw-r--r--platform/linux-dpdk/odp_pool.c71
-rw-r--r--platform/linux-dpdk/odp_queue_basic.c72
-rw-r--r--platform/linux-dpdk/odp_queue_eventdev.c68
-rw-r--r--platform/linux-dpdk/odp_queue_spsc.c32
-rw-r--r--platform/linux-dpdk/odp_shared_memory.c21
-rw-r--r--platform/linux-dpdk/odp_timer.c20
-rw-r--r--platform/linux-generic/Makefile.am27
-rw-r--r--platform/linux-generic/README18
-rw-r--r--platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c16
-rw-r--r--platform/linux-generic/arch/default/odp_random.c33
-rw-r--r--platform/linux-generic/arch/default/odp_random.h41
-rw-r--r--platform/linux-generic/arch/x86/odp_random.h160
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/dma_types.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event.h3
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/timer_types.h (renamed from platform/linux-generic/include-abi/odp/api/abi/timer.h)4
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h72
-rw-r--r--platform/linux-generic/include/odp_config_internal.h10
-rw-r--r--platform/linux-generic/include/odp_event_internal.h103
-rw-r--r--platform/linux-generic/include/odp_event_vector_internal.h24
-rw-r--r--platform/linux-generic/include/odp_forward_typedefs_internal.h3
-rw-r--r--platform/linux-generic/include/odp_global_data.h2
-rw-r--r--platform/linux-generic/include/odp_init_internal.h3
-rw-r--r--platform/linux-generic/include/odp_ipsec_internal.h24
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h34
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h46
-rw-r--r--platform/linux-generic/include/odp_queue_if.h11
-rw-r--r--platform/linux-generic/include/odp_queue_scalable_internal.h6
-rw-r--r--platform/linux-generic/include/odp_random_openssl_internal.h5
-rw-r--r--platform/linux-generic/include/odp_random_std_internal.h5
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h11
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable.h5
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_ordered.h5
-rw-r--r--platform/linux-generic/include/odp_sysinfo_internal.h1
-rw-r--r--platform/linux-generic/include/odp_timer_internal.h15
-rw-r--r--platform/linux-generic/m4/configure.m43
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m42
-rw-r--r--platform/linux-generic/m4/odp_openssl.m436
-rw-r--r--platform/linux-generic/odp_buffer.c17
-rw-r--r--platform/linux-generic/odp_cpumask_task.c20
-rw-r--r--platform/linux-generic/odp_dma.c926
-rw-r--r--platform/linux-generic/odp_event.c19
-rw-r--r--platform/linux-generic/odp_init.c41
-rw-r--r--platform/linux-generic/odp_ipsec.c32
-rw-r--r--platform/linux-generic/odp_ipsec_events.c4
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c123
-rw-r--r--platform/linux-generic/odp_ishm.c3
-rw-r--r--platform/linux-generic/odp_packet.c188
-rw-r--r--platform/linux-generic/odp_packet_io.c64
-rw-r--r--platform/linux-generic/odp_packet_vector.c30
-rw-r--r--platform/linux-generic/odp_pool.c221
-rw-r--r--platform/linux-generic/odp_queue_basic.c105
-rw-r--r--platform/linux-generic/odp_queue_lf.c29
-rw-r--r--platform/linux-generic/odp_queue_scalable.c91
-rw-r--r--platform/linux-generic/odp_queue_spsc.c52
-rw-r--r--platform/linux-generic/odp_random.c35
-rw-r--r--platform/linux-generic/odp_random_openssl.c33
-rw-r--r--platform/linux-generic/odp_random_std.c11
-rw-r--r--platform/linux-generic/odp_schedule_basic.c30
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c12
-rw-r--r--platform/linux-generic/odp_schedule_scalable_ordered.c10
-rw-r--r--platform/linux-generic/odp_schedule_sp.c6
-rw-r--r--platform/linux-generic/odp_shared_memory.c16
-rw-r--r--platform/linux-generic/odp_stash.c6
-rw-r--r--platform/linux-generic/odp_system_info.c50
-rw-r--r--platform/linux-generic/odp_timer.c217
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c9
-rw-r--r--platform/linux-generic/pktio/dpdk.c2
-rw-r--r--platform/linux-generic/pktio/ipc.c2
-rw-r--r--platform/linux-generic/pktio/loop.c9
-rw-r--r--platform/linux-generic/test/inline-timer.conf2
-rw-r--r--platform/linux-generic/test/packet_align.conf2
-rw-r--r--platform/linux-generic/test/process-mode.conf2
-rw-r--r--platform/linux-generic/test/sched-basic.conf2
-rw-r--r--test/m4/configure.m41
-rw-r--r--test/performance/.gitignore2
-rw-r--r--test/performance/Makefile.am7
-rw-r--r--test/performance/odp_l2fwd.c317
-rw-r--r--test/performance/odp_lock_perf.c663
-rw-r--r--test/performance/odp_random.c377
-rw-r--r--test/performance/odp_sched_perf.c96
-rwxr-xr-xtest/performance/odp_sched_perf_run.sh33
-rw-r--r--test/performance/odp_sched_pktio.c3
-rw-r--r--test/performance/odp_timer_perf.c3
-rw-r--r--test/validation/api/Makefile.am2
-rw-r--r--test/validation/api/atomic/atomic.c3
-rw-r--r--test/validation/api/barrier/barrier.c3
-rw-r--r--test/validation/api/classification/odp_classification_basic.c18
-rw-r--r--test/validation/api/classification/odp_classification_common.c2
-rw-r--r--test/validation/api/cpumask/cpumask.c29
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c29
-rw-r--r--test/validation/api/dma/.gitignore1
-rw-r--r--test/validation/api/dma/Makefile.am4
-rw-r--r--test/validation/api/dma/dma.c1202
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c1
-rw-r--r--test/validation/api/lock/lock.c3
-rw-r--r--test/validation/api/pktio/pktio.c54
-rw-r--r--test/validation/api/shmem/shmem.c144
-rw-r--r--test/validation/api/thread/thread.c2
-rw-r--r--test/validation/api/timer/timer.c33
173 files changed, 7323 insertions, 1815 deletions
diff --git a/.checkpatch.conf b/.checkpatch.conf
index b276680bf..3a0190deb 100644
--- a/.checkpatch.conf
+++ b/.checkpatch.conf
@@ -1,17 +1,15 @@
--no-tree
--strict
--ignore=SPLIT_STRING
---ignore SSCANF_TO_KSTRTO
+--ignore=SSCANF_TO_KSTRTO
--ignore=NEW_TYPEDEFS
--ignore=DEPRECATED_VARIABLE
--ignore=COMPARISON_TO_NULL
--ignore=BIT_MACRO
---ignore=PREFER_PRINTF
---ignore=PREFER_SCANF
--ignore=VOLATILE
--ignore=AVOID_EXTERNS
--ignore=CONST_STRUCT
---ignore=PREFER_ARRAY_SIZE
+--ignore=ARRAY_SIZE
--ignore=PREFER_KERNEL_TYPES
--ignore=CONSTANT_COMPARISON
--ignore=BLOCK_COMMENT_STYLE
diff --git a/CHANGELOG b/CHANGELOG
index ce92f58dd..3a2f8a049 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,57 @@
+== OpenDataPlane (1.33.0.0)
+
+=== Backward incompatible API changes
+==== Shared Memory
+* Added a bit mask capability `odp_shm_capability_t.flags` for ODP_SHM_* flags
+
+==== Timer
+* Added initialization function `odp_timer_pool_param_init()` for timer pool
+parameters. Application must use it to initialize parameters to their
+default values.
+
+=== Backward compatible API changes
+==== Classifier
+* Added missing default values for `odp_cls_cos_param_t.num_queue`,
+`odp_cls_cos_param_t.red.enable`, `odp_cls_cos_param_t.bp.enable`, and
+`odp_pmr_param_t.range_term`.
+
+==== Crypto
+* Clarified that `odp_crypto_session_create()` parameters, including the key and
+IV data, can be freed after session creation.
+
+==== DMA
+* Added new DMA module which enables applications to offload memory transfers
+(copies) to DMA hardware. See `include/odp/api/spec/dma.h` for more information.
+
+==== IPsec
+* Added possibility to request completed packets as packet vector events
+instead of packet events. Packet vector capabilities are provided by
+`odp_ipsec_capability_t.vector` and the configuration is done using
+`odp_ipsec_config_t.vector`.
+* Clarified that `odp_ipsec_sa_create()` parameters, including the various
+memory buffers pointed to by the parameters, can be freed after SA creation.
+
+==== Packet IO
+* Clarified `odp_pktin_vector_config_t.enable` documentation to state that when
+packet vectors are enabled, packets may be delivered both as packet vector
+events and packet events. Packet vectors are disabled by default.
+* Clarified that the type of input queues (scheduled versus plain) is deduced
+from the pktio input mode in `odp_pktin_queue_config()`, and that the default
+queue type value and the queue type value passed in
+`odp_pktin_queue_param_t.queue_param` are ignored.
+
+=== Pool
+* Added new pool type for DMA completion event pools. These pools are created
+with `odp_dma_pool_create()` and pool capability is included in
+`odp_dma_capability_t`. Otherwise, pool APIs are used normally for these pools.
+
+==== Shared Memory
+* Added `ODP_SHM_NO_HP` flag which can be used to prevent the implementation
+from allocating the shared memory block from huge pages
+
+==== Std
+* Added DMA flag to `odp_feature_t`
+
== OpenDataPlane (1.32.1.0)
=== Backward compatible API changes
==== Init
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf
index 01c622fe8..ce2412a07 100644
--- a/config/odp-linux-generic.conf
+++ b/config/odp-linux-generic.conf
@@ -16,7 +16,7 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.18"
+config_file_version = "0.1.19"
# System options
system: {
@@ -30,6 +30,14 @@ system: {
# available using standard Linux methods.
cpu_mhz_max = 1400
+ # When enabled (1), implementation reads the CPU frequency values from
+ # OS only once during ODP initialization. Enabling this option removes
+ # system calls from odp_cpu_hz() and odp_cpu_hz_id() implementations.
+ #
+ # NOTE: This option should only be used on systems where CPU frequency
+ # scaling is disabled.
+ cpu_hz_static = 0
+
# Maximum number of ODP threads that can be created.
# odp_thread_count_max() returns this value or the build time
# maximum ODP_THREAD_COUNT_MAX, whichever is lower. This setting
diff --git a/configure.ac b/configure.ac
index 127e7c591..d02c6624a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,8 +3,8 @@ AC_PREREQ([2.5])
# ODP API version
##########################################################################
m4_define([odpapi_generation_version], [1])
-m4_define([odpapi_major_version], [32])
-m4_define([odpapi_minor_version], [1])
+m4_define([odpapi_major_version], [33])
+m4_define([odpapi_minor_version], [0])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
@@ -324,22 +324,6 @@ then
fi
##########################################################################
-# Configure/disable usage of OpenSSL library
-##########################################################################
-AC_ARG_WITH([openssl],
- [AS_HELP_STRING([--without-openssl],
- [compile without OpenSSL (may result in disabled crypto and random support)]
- [[default=with]])],
- [],
- [with_openssl=yes])
-AS_IF([test "$with_openssl" != "no"],
- [ODP_OPENSSL
- have_openssl=1], [have_openssl=0])
-AM_CONDITIONAL([WITH_OPENSSL], [test x$with_openssl != xno])
-AC_DEFINE_UNQUOTED([_ODP_OPENSSL], [$have_openssl],
- [Define to 1 to enable OpenSSL support])
-
-##########################################################################
# Include m4 files
##########################################################################
m4_include([./doc/m4/configure.m4])
@@ -524,7 +508,6 @@ AC_MSG_RESULT([
includedir: ${includedir}
testdir: ${testdir}
WITH_ARCH: ${WITH_ARCH}
- with_openssl: ${with_openssl}
cc: ${CC}
cc version: ${CC_VERSION}
@@ -555,7 +538,3 @@ AC_MSG_RESULT([
user_guides: ${user_guides}
${PLAT_CFG_TEXT}
])
-
-AS_IF([test "${with_openssl}" = "no"],
- [AC_MSG_WARN([Strong cryptography is not available without OpenSSL])]
- )
diff --git a/doc/users-guide/users-guide.adoc b/doc/users-guide/users-guide.adoc
index ecaf70022..eaff6867e 100644
--- a/doc/users-guide/users-guide.adoc
+++ b/doc/users-guide/users-guide.adoc
@@ -940,15 +940,6 @@ to other ODP instances running on the same OS.
Other ODP instances willing to see this exported memory should use the
`odp_shm_import()` ODP function.
-==== ODP_SHM_SW_ONLY
-This flag tells ODP that the shared memory will be used by the ODP application
-software only: no HW (such as DMA, or other accelerator) will ever
-try to access the memory. No other ODP call will be involved on this memory
-(as ODP calls could implicitly involve HW, depending on the ODP
-implementation), except for `odp_shm_lookup()` and `odp_shm_free()`.
-ODP implementations may use this flag as a hint for performance optimization,
-or may as well ignore this flag.
-
==== ODP_SHM_SINGLE_VA
This flag is used to guarantee the uniqueness of the address at which
the shared memory is mapped: without this flag, a given memory block may be
diff --git a/example/debug/odp_debug.c b/example/debug/odp_debug.c
index 5c0beef69..88f5d3cdb 100644
--- a/example/debug/odp_debug.c
+++ b/example/debug/odp_debug.c
@@ -409,7 +409,7 @@ static int timer_debug(void)
if (timer_res_capa.res_ns > res)
res = timer_res_capa.res_ns;
- memset(&timer_param, 0, sizeof(timer_param));
+ odp_timer_pool_param_init(&timer_param);
timer_param.res_ns = res;
timer_param.min_tmo = max_tmo / 10;
timer_param.max_tmo = max_tmo;
diff --git a/example/timer/odp_timer_accuracy.c b/example/timer/odp_timer_accuracy.c
index df1fd5919..9318e1090 100644
--- a/example/timer/odp_timer_accuracy.c
+++ b/example/timer/odp_timer_accuracy.c
@@ -331,7 +331,7 @@ static int start_timers(test_global_t *test_global)
return -1;
}
- memset(&timer_param, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&timer_param);
if (res_ns)
timer_param.res_ns = res_ns;
diff --git a/example/timer/odp_timer_simple.c b/example/timer/odp_timer_simple.c
index 4e28807c6..fcda64ea4 100644
--- a/example/timer/odp_timer_simple.c
+++ b/example/timer/odp_timer_simple.c
@@ -69,7 +69,8 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
ret += 1;
goto err_tp;
}
- memset(&tparams, 0, sizeof(tparams));
+
+ odp_timer_pool_param_init(&tparams);
tparams.res_ns = MAX(10 * ODP_TIME_MSEC_IN_NS,
timer_capa.highest_res_ns);
tparams.min_tmo = 10 * ODP_TIME_MSEC_IN_NS;
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 2efe55e5a..124d6368e 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -440,12 +440,14 @@ int main(int argc, char *argv[])
goto err;
}
+ odp_timer_pool_param_init(&tparams);
tparams.res_ns = gbls->args.resolution_us * ODP_TIME_USEC_IN_NS;
tparams.min_tmo = gbls->args.min_us * ODP_TIME_USEC_IN_NS;
tparams.max_tmo = gbls->args.max_us * ODP_TIME_USEC_IN_NS;
tparams.num_timers = num_workers; /* One timer per worker */
tparams.priv = 0; /* Shared */
tparams.clk_src = ODP_CLOCK_DEFAULT;
+
gbls->tp = odp_timer_pool_create("timer_pool", &tparams);
if (gbls->tp == ODP_TIMER_POOL_INVALID) {
err = 1;
diff --git a/helper/cli.c b/helper/cli.c
index 97fa232a3..0503da230 100644
--- a/helper/cli.c
+++ b/helper/cli.c
@@ -85,7 +85,7 @@ int odph_cli_init(const odph_cli_param_t *param)
int shm_size = sizeof(cli_shm_t) +
param->max_user_commands * sizeof(user_cmd_t);
odp_shm_t shm_hdl =
- odp_shm_reserve(shm_name, shm_size, 64, ODP_SHM_SW_ONLY);
+ odp_shm_reserve(shm_name, shm_size, 64, 0);
if (shm_hdl != ODP_SHM_INVALID)
shm = (cli_shm_t *)odp_shm_addr(shm_hdl);
diff --git a/helper/cuckootable.c b/helper/cuckootable.c
index 49b8a5c86..ecf95a83a 100644
--- a/helper/cuckootable.c
+++ b/helper/cuckootable.c
@@ -236,9 +236,8 @@ odph_cuckoo_table_create(
bucket_num = align32pow2(capacity) / HASH_BUCKET_ENTRIES;
bucket_size = bucket_num * sizeof(struct cuckoo_table_bucket);
- shm_tbl = odp_shm_reserve(
- name, impl_size + bucket_size,
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ shm_tbl = odp_shm_reserve(name, impl_size + bucket_size,
+ ODP_CACHE_LINE_SIZE, 0);
if (shm_tbl == ODP_SHM_INVALID) {
ODPH_DBG(
diff --git a/helper/hashtable.c b/helper/hashtable.c
index 39da586a8..0c571db91 100644
--- a/helper/hashtable.c
+++ b/helper/hashtable.c
@@ -81,7 +81,7 @@ odph_table_t odph_hash_table_create(const char *name, uint32_t capacity,
ODPH_DBG("name already exist\n");
return NULL;
}
- shmem = odp_shm_reserve(name, capacity << 20, 64, ODP_SHM_SW_ONLY);
+ shmem = odp_shm_reserve(name, capacity << 20, 64, 0);
if (shmem == ODP_SHM_INVALID) {
ODPH_DBG("shm reserve fail\n");
return NULL;
diff --git a/helper/iplookuptable.c b/helper/iplookuptable.c
index c514ee362..f8f8e9cc5 100644
--- a/helper/iplookuptable.c
+++ b/helper/iplookuptable.c
@@ -513,9 +513,7 @@ odph_table_t odph_iplookup_table_create(const char *name,
impl_size = sizeof(odph_iplookup_table_impl);
l1_size = ENTRY_SIZE * ENTRY_NUM_L1;
- shm_tbl = odp_shm_reserve(
- name, impl_size + l1_size,
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ shm_tbl = odp_shm_reserve(name, impl_size + l1_size, ODP_CACHE_LINE_SIZE, 0);
if (shm_tbl == ODP_SHM_INVALID) {
ODPH_DBG(
diff --git a/helper/lineartable.c b/helper/lineartable.c
index 0999569cc..5362d80f5 100644
--- a/helper/lineartable.c
+++ b/helper/lineartable.c
@@ -61,7 +61,7 @@ odph_table_t odph_linear_table_create(const char *name, uint32_t capacity,
}
/* alloc memory from shm */
- shmem = odp_shm_reserve(name, capacity << 20, 64, ODP_SHM_SW_ONLY);
+ shmem = odp_shm_reserve(name, capacity << 20, 64, 0);
if (shmem == ODP_SHM_INVALID) {
ODPH_DBG("shm reserve fail\n");
return NULL;
diff --git a/include/Makefile.am b/include/Makefile.am
index de9d898b6..6865b08aa 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -23,6 +23,8 @@ odpapiinclude_HEADERS = \
odp/api/crypto.h \
odp/api/debug.h \
odp/api/deprecated.h \
+ odp/api/dma.h \
+ odp/api/dma_types.h \
odp/api/errno.h \
odp/api/event.h \
odp/api/hash.h \
@@ -60,6 +62,7 @@ odpapiinclude_HEADERS = \
odp/api/ticketlock.h \
odp/api/time.h \
odp/api/timer.h \
+ odp/api/timer_types.h \
odp/api/traffic_mngr.h \
odp/api/version.h
@@ -77,6 +80,8 @@ odpapispecinclude_HEADERS = \
odp/api/spec/cpumask.h \
odp/api/spec/crypto.h \
odp/api/spec/debug.h \
+ odp/api/spec/dma.h \
+ odp/api/spec/dma_types.h \
odp/api/spec/errno.h \
odp/api/spec/event.h \
odp/api/spec/hash.h \
@@ -116,6 +121,7 @@ odpapispecinclude_HEADERS = \
odp/api/spec/ticketlock.h \
odp/api/spec/time.h \
odp/api/spec/timer.h \
+ odp/api/spec/timer_types.h \
odp/api/spec/traffic_mngr.h
nodist_odpapispecinclude_HEADERS = \
@@ -135,6 +141,7 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/cpumask.h \
odp/api/abi-default/crypto.h \
odp/api/abi-default/debug.h \
+ odp/api/abi-default/dma_types.h \
odp/api/abi-default/errno.h \
odp/api/abi-default/event.h \
odp/api/abi-default/hash.h \
@@ -164,7 +171,7 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/thrmask.h \
odp/api/abi-default/ticketlock.h \
odp/api/abi-default/time.h \
- odp/api/abi-default/timer.h \
+ odp/api/abi-default/timer_types.h \
odp/api/abi-default/traffic_mngr.h \
odp/api/abi-default/version.h
@@ -185,6 +192,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/cpumask.h \
odp/arch/arm32-linux/odp/api/abi/crypto.h \
odp/arch/arm32-linux/odp/api/abi/debug.h \
+ odp/arch/arm32-linux/odp/api/abi/dma_types.h \
odp/arch/arm32-linux/odp/api/abi/errno.h \
odp/arch/arm32-linux/odp/api/abi/event.h \
odp/arch/arm32-linux/odp/api/abi/hash.h \
@@ -214,7 +222,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/thrmask.h \
odp/arch/arm32-linux/odp/api/abi/ticketlock.h \
odp/arch/arm32-linux/odp/api/abi/time.h \
- odp/arch/arm32-linux/odp/api/abi/timer.h \
+ odp/arch/arm32-linux/odp/api/abi/timer_types.h \
odp/arch/arm32-linux/odp/api/abi/traffic_mngr.h \
odp/arch/arm32-linux/odp/api/abi/version.h
endif
@@ -231,6 +239,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/cpumask.h \
odp/arch/arm64-linux/odp/api/abi/crypto.h \
odp/arch/arm64-linux/odp/api/abi/debug.h \
+ odp/arch/arm64-linux/odp/api/abi/dma_types.h \
odp/arch/arm64-linux/odp/api/abi/errno.h \
odp/arch/arm64-linux/odp/api/abi/event.h \
odp/arch/arm64-linux/odp/api/abi/hash.h \
@@ -260,7 +269,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/thrmask.h \
odp/arch/arm64-linux/odp/api/abi/ticketlock.h \
odp/arch/arm64-linux/odp/api/abi/time.h \
- odp/arch/arm64-linux/odp/api/abi/timer.h \
+ odp/arch/arm64-linux/odp/api/abi/timer_types.h \
odp/arch/arm64-linux/odp/api/abi/traffic_mngr.h \
odp/arch/arm64-linux/odp/api/abi/version.h
endif
@@ -277,6 +286,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/cpumask.h \
odp/arch/default-linux/odp/api/abi/crypto.h \
odp/arch/default-linux/odp/api/abi/debug.h \
+ odp/arch/default-linux/odp/api/abi/dma_types.h \
odp/arch/default-linux/odp/api/abi/errno.h \
odp/arch/default-linux/odp/api/abi/event.h \
odp/arch/default-linux/odp/api/abi/hash.h \
@@ -306,7 +316,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/thrmask.h \
odp/arch/default-linux/odp/api/abi/ticketlock.h \
odp/arch/default-linux/odp/api/abi/time.h \
- odp/arch/default-linux/odp/api/abi/timer.h \
+ odp/arch/default-linux/odp/api/abi/timer_types.h \
odp/arch/default-linux/odp/api/abi/traffic_mngr.h \
odp/arch/default-linux/odp/api/abi/version.h
endif
@@ -323,6 +333,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/mips64-linux/odp/api/abi/cpumask.h \
odp/arch/mips64-linux/odp/api/abi/crypto.h \
odp/arch/mips64-linux/odp/api/abi/debug.h \
+ odp/arch/mips64-linux/odp/api/abi/dma_types.h \
odp/arch/mips64-linux/odp/api/abi/errno.h \
odp/arch/mips64-linux/odp/api/abi/event.h \
odp/arch/mips64-linux/odp/api/abi/hash.h \
@@ -352,7 +363,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/mips64-linux/odp/api/abi/thrmask.h \
odp/arch/mips64-linux/odp/api/abi/ticketlock.h \
odp/arch/mips64-linux/odp/api/abi/time.h \
- odp/arch/mips64-linux/odp/api/abi/timer.h \
+ odp/arch/mips64-linux/odp/api/abi/timer_types.h \
odp/arch/mips64-linux/odp/api/abi/traffic_mngr.h \
odp/arch/mips64-linux/odp/api/abi/version.h
endif
@@ -369,6 +380,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/cpumask.h \
odp/arch/power64-linux/odp/api/abi/crypto.h \
odp/arch/power64-linux/odp/api/abi/debug.h \
+ odp/arch/power64-linux/odp/api/abi/dma_types.h \
odp/arch/power64-linux/odp/api/abi/errno.h \
odp/arch/power64-linux/odp/api/abi/event.h \
odp/arch/power64-linux/odp/api/abi/hash.h \
@@ -398,7 +410,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/thrmask.h \
odp/arch/power64-linux/odp/api/abi/ticketlock.h \
odp/arch/power64-linux/odp/api/abi/time.h \
- odp/arch/power64-linux/odp/api/abi/timer.h \
+ odp/arch/power64-linux/odp/api/abi/timer_types.h \
odp/arch/power64-linux/odp/api/abi/traffic_mngr.h \
odp/arch/power64-linux/odp/api/abi/version.h
endif
@@ -415,6 +427,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/cpumask.h \
odp/arch/x86_32-linux/odp/api/abi/crypto.h \
odp/arch/x86_32-linux/odp/api/abi/debug.h \
+ odp/arch/x86_32-linux/odp/api/abi/dma_types.h \
odp/arch/x86_32-linux/odp/api/abi/errno.h \
odp/arch/x86_32-linux/odp/api/abi/event.h \
odp/arch/x86_32-linux/odp/api/abi/hash.h \
@@ -444,7 +457,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/thrmask.h \
odp/arch/x86_32-linux/odp/api/abi/ticketlock.h \
odp/arch/x86_32-linux/odp/api/abi/time.h \
- odp/arch/x86_32-linux/odp/api/abi/timer.h \
+ odp/arch/x86_32-linux/odp/api/abi/timer_types.h \
odp/arch/x86_32-linux/odp/api/abi/traffic_mngr.h \
odp/arch/x86_32-linux/odp/api/abi/version.h
endif
@@ -461,6 +474,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/cpumask.h \
odp/arch/x86_64-linux/odp/api/abi/crypto.h \
odp/arch/x86_64-linux/odp/api/abi/debug.h \
+ odp/arch/x86_64-linux/odp/api/abi/dma_types.h \
odp/arch/x86_64-linux/odp/api/abi/errno.h \
odp/arch/x86_64-linux/odp/api/abi/event.h \
odp/arch/x86_64-linux/odp/api/abi/hash.h \
@@ -490,7 +504,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/thrmask.h \
odp/arch/x86_64-linux/odp/api/abi/ticketlock.h \
odp/arch/x86_64-linux/odp/api/abi/time.h \
- odp/arch/x86_64-linux/odp/api/abi/timer.h \
+ odp/arch/x86_64-linux/odp/api/abi/timer_types.h \
odp/arch/x86_64-linux/odp/api/abi/traffic_mngr.h \
odp/arch/x86_64-linux/odp/api/abi/version.h
endif
diff --git a/include/odp/api/abi-default/dma_types.h b/include/odp/api/abi-default/dma_types.h
new file mode 100644
index 000000000..c2b0eca64
--- /dev/null
+++ b/include/odp/api/abi-default/dma_types.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_DMA_TYPES_H_
+#define ODP_ABI_DMA_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_dma_t;
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_dma_compl_t;
+
+/** @ingroup odp_dma
+ * @{
+ */
+
+typedef _odp_abi_dma_t *odp_dma_t;
+
+#define ODP_DMA_INVALID ((odp_dma_t)0)
+
+typedef _odp_abi_dma_t *odp_dma_compl_t;
+
+#define ODP_DMA_COMPL_INVALID ((odp_dma_compl_t)0)
+
+typedef uint32_t odp_dma_transfer_id_t;
+
+#define ODP_DMA_TRANSFER_ID_INVALID ((odp_dma_transfer_id_t)0)
+
+#define ODP_DMA_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/abi-default/event.h b/include/odp/api/abi-default/event.h
index 8976252f3..ecedda3bd 100644
--- a/include/odp/api/abi-default/event.h
+++ b/include/odp/api/abi-default/event.h
@@ -32,6 +32,7 @@ typedef enum {
ODP_EVENT_IPSEC_STATUS = 5,
ODP_EVENT_PACKET_VECTOR = 6,
ODP_EVENT_PACKET_TX_COMPL = 7,
+ ODP_EVENT_DMA_COMPL = 8,
} odp_event_type_t;
typedef enum {
diff --git a/include/odp/api/abi-default/timer.h b/include/odp/api/abi-default/timer_types.h
index 566d199e0..a653dcbaa 100644
--- a/include/odp/api/abi-default/timer.h
+++ b/include/odp/api/abi-default/timer_types.h
@@ -10,8 +10,8 @@
* ODP timer service
*/
-#ifndef ODP_ABI_TIMER_H_
-#define ODP_ABI_TIMER_H_
+#ifndef ODP_ABI_TIMER_TYPES_H_
+#define ODP_ABI_TIMER_TYPES_H_
#ifdef __cplusplus
extern "C" {
diff --git a/include/odp/api/dma.h b/include/odp/api/dma.h
new file mode 100644
index 000000000..4720f57c9
--- /dev/null
+++ b/include/odp/api/dma.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP DMA
+ */
+
+#ifndef ODP_API_DMA_H_
+#define ODP_API_DMA_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/dma.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/dma_types.h b/include/odp/api/dma_types.h
new file mode 100644
index 000000000..4a51371b4
--- /dev/null
+++ b/include/odp/api/dma_types.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP DMA
+ */
+
+#ifndef ODP_API_DMA_TYPES_H_
+#define ODP_API_DMA_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/dma_types.h>
+
+#include <odp/api/spec/dma_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index ec15c8962..633963692 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -149,7 +149,8 @@ typedef union odp_cls_pmr_terms_t {
typedef struct odp_red_param_t {
/** A boolean to enable RED
* When true, RED is enabled and configured with RED parameters.
- * Otherwise, RED parameters are ignored. */
+ * Otherwise, RED parameters are ignored. Default value is false.
+ */
odp_bool_t enable;
/** Threshold parameters for RED
@@ -166,7 +167,8 @@ typedef struct odp_red_param_t {
typedef struct odp_bp_param_t {
/** A boolean to enable Back pressure
* When true, back pressure is enabled and configured with the BP
- * parameters. Otherwise BP parameters are ignored.
+ * parameters. Otherwise BP parameters are ignored. Default value
+ * is false.
*/
odp_bool_t enable;
@@ -329,6 +331,8 @@ typedef struct odp_cls_cos_param {
* the class of service.
* Depending on the implementation this number might be rounded-off to
* nearest supported value (e.g power of 2)
+ *
+ * Default value is 1.
*/
uint32_t num_queue;
@@ -695,7 +699,7 @@ typedef struct odp_pmr_param_t {
/** Packet Matching Rule term */
odp_cls_pmr_term_t term;
- /** True if the value is range and false if match */
+ /** True if the value is range and false if match. Default is false. */
odp_bool_t range_term;
/** Variant mappings for types of matches */
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index bfda0bcee..d3e3c9a07 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -1053,6 +1053,9 @@ int odp_crypto_auth_capability(odp_auth_alg_t auth,
* default values. If call ends up with an error no new session will be
* created.
*
+ * The parameter structure as well as the key and IV data pointed to by it
+ * can be freed after the call.
+ *
* @param param Session parameters
* @param[out] session Created session else ODP_CRYPTO_SESSION_INVALID
* @param[out] status Failure code if unsuccessful
diff --git a/include/odp/api/spec/dma.h b/include/odp/api/spec/dma.h
new file mode 100644
index 000000000..8ca506291
--- /dev/null
+++ b/include/odp/api/spec/dma.h
@@ -0,0 +1,354 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP DMA
+ */
+
+#ifndef ODP_API_SPEC_DMA_H_
+#define ODP_API_SPEC_DMA_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/dma_types.h>
+
+/** @addtogroup odp_dma
+ * @{
+ */
+
+/**
+ * Query DMA capabilities
+ *
+ * Outputs DMA capabilities on success.
+ *
+ * @param[out] capa Pointer to a capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_dma_capability(odp_dma_capability_t *capa);
+
+/**
+ * Initialize DMA session parameters
+ *
+ * Initialize an odp_dma_param_t to its default values.
+ *
+ * @param[out] param Parameter structure to be initialized
+ */
+void odp_dma_param_init(odp_dma_param_t *param);
+
+/**
+ * Create DMA session
+ *
+ * @param name DMA session name or NULL. Maximum string length is ODP_DMA_NAME_LEN.
+ * @param param DMA session parameters
+ *
+ * @return DMA session handle on success
+ * @retval ODP_DMA_INVALID on failure
+ */
+odp_dma_t odp_dma_create(const char *name, const odp_dma_param_t *param);
+
+/**
+ * Destroy DMA session
+ *
+ * A DMA session may be destroyed only when there are no active transfers in the session (all
+ * previously started transfers have completed).
+ *
+ * @param dma DMA session to be destroyed
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_dma_destroy(odp_dma_t dma);
+
+/**
+ * Find DMA session by name
+ *
+ * @param name DMA session name
+ *
+ * @return Handle of the first matching DMA session
+ * @retval ODP_DMA_INVALID DMA session could not be found
+ */
+odp_dma_t odp_dma_lookup(const char *name);
+
+/**
+ * Initialize DMA transfer parameters
+ *
+ * Initialize an odp_dma_transfer_param_t to its default values.
+ *
+ * @param[out] trs_param Parameter structure to be initialized
+ */
+void odp_dma_transfer_param_init(odp_dma_transfer_param_t *trs_param);
+
+/**
+ * Initialize DMA transfer completion parameters
+ *
+ * Initialize an odp_dma_compl_param_t to its default values.
+ *
+ * @param[out] compl_param Parameter structure to be initialized
+ */
+void odp_dma_compl_param_init(odp_dma_compl_param_t *compl_param);
+
+/**
+ * Perform DMA transfer
+ *
+ * Performs DMA transfer according to the session and transfer parameters. Returns 1 when
+ * the transfer was completed successfully. Returns 0 when the transfer was not performed
+ * due to resources being temporarily busy. In this case, the same transfer is likely to succeed
+ * after enough resources are available. Returns <0 on failure.
+ *
+ * The call outputs optionally transfer results on a non-zero return value. Use NULL as 'result'
+ * pointer if results are not required.
+ *
+ * @param dma DMA session
+ * @param trs_param Transfer parameters
+ * @param[out] result Pointer to transfer result structure for output, or NULL when not used
+ *
+ * @retval 1 when transfer completed successfully
+ * @retval 0 when resources are busy and transfer was not performed
+ * @retval <0 on failure
+ */
+int odp_dma_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param,
+ odp_dma_result_t *result);
+
+/**
+ * Perform multiple DMA transfers
+ *
+ * Like odp_dma_transfer(), but performs 'num' transfers.
+ *
+ * @param dma DMA session
+ * @param trs_param Array of transfer parameter pointers
+ * @param[out] result Array of transfer result pointers for output, or NULL when not used
+ * @param num Number of transfers to perform. Both arrays have this many elements.
+ *
+ * @return Number of transfers completed successfully (1 ... num)
+ * @retval 0 when resources are busy and no transfers were performed
+ * @retval <0 on failure
+ */
+int odp_dma_transfer_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ odp_dma_result_t *result[], int num);
+
+/**
+ * Start DMA transfer
+ *
+ * Starts asynchronous DMA transfer according to the session and transfer parameters.
+ * Completion parameters specify how transfer completion is reported. Returns 1 when the transfer
+ * was started successfully. Returns 0 when the transfer was not started due to resources being
+ * temporarily busy. In this case, the same transfer is likely to start successfully after enough
+ * resources are available. Returns <0 on failure.
+ *
+ * @param dma DMA session
+ * @param trs_param Transfer parameters
+ * @param compl_param Transfer completion parameters
+ *
+ * @retval 1 when transfer started successfully
+ * @retval 0 when resources are busy and transfer was not started
+ * @retval <0 on failure
+ *
+ * @see odp_dma_transfer_id_alloc(), odp_dma_transfer_done(), odp_dma_compl_result()
+ */
+int odp_dma_transfer_start(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param,
+ const odp_dma_compl_param_t *compl_param);
+
+/**
+ * Start multiple DMA transfers
+ *
+ * Like odp_dma_transfer_start(), but starts 'num' transfers.
+ *
+ * @param dma DMA session
+ * @param trs_param Array of transfer parameter pointers
+ * @param compl_param Array of transfer completion parameter pointers
+ * @param num Number of transfers to start. Both parameter arrays have this many elements.
+ *
+ * @return Number of transfers started successfully (1 ... num)
+ * @retval 0 when resources are busy and no transfers were started
+ * @retval <0 on failure
+ */
+int odp_dma_transfer_start_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ const odp_dma_compl_param_t *compl_param[], int num);
+
+/**
+ * Check if DMA transfer has completed
+ *
+ * Application must call this function for every transfer that was started in ODP_DMA_COMPL_POLL
+ * mode until a non-zero value is returned. The transfer identifier from completion parameters of
+ * the transfer start call is used. When a non-zero value is returned, the transfer is complete
+ * and the identifier may be freed or reused for another transfer.
+ *
+ * The call outputs optionally transfer results on a non-zero return value. Use NULL as 'result'
+ * pointer if results are not required.
+ *
+ * @param dma DMA session
+ * @param transfer_id Transfer identifier
+ * @param[out] result Pointer to transfer result structure for output, or NULL when not used.
+ *
+ * @retval 0 transfer has not finished
+ * @retval >0 transfer has finished successfully
+ * @retval <0 on failure
+ */
+int odp_dma_transfer_done(odp_dma_t dma, odp_dma_transfer_id_t transfer_id,
+ odp_dma_result_t *result);
+
+/**
+ * Allocate DMA transfer identifier
+ *
+ * Transfer identifiers are used in #ODP_DMA_COMPL_POLL mode. It identifies a previously started
+ * transfer for an odp_dma_transfer_done() call. The maximum number of transfer identifiers is
+ * implementation specific, but there are at least odp_dma_capability_t::max_transfers identifiers
+ * per session.
+ *
+ * @param dma DMA session
+ *
+ * @return Transfer identifier
+ * @retval ODP_DMA_TRANSFER_ID_INVALID Transfer identifier could not be allocated
+ */
+odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma);
+
+/**
+ * Free DMA transfer identifier
+ *
+ * @param dma DMA session
+ * @param transfer_id DMA transfer identifier to be freed
+ */
+void odp_dma_transfer_id_free(odp_dma_t dma, odp_dma_transfer_id_t transfer_id);
+
+/**
+ * Get printable value for DMA session handle
+ *
+ * @param dma Handle to be converted for debugging
+ *
+ * @return uint64_t value that can be used to print/display this handle
+ */
+uint64_t odp_dma_to_u64(odp_dma_t dma);
+
+/**
+ * Print debug info about DMA session
+ *
+ * Print implementation defined information about DMA session to the ODP log.
+ * The information is intended to be used for debugging.
+ *
+ * @param dma DMA session handle
+ */
+void odp_dma_print(odp_dma_t dma);
+
+/**
+ * Check DMA completion event
+ *
+ * Reads DMA completion event (ODP_EVENT_DMA_COMPL), and returns if the transfer succeeded or
+ * failed. The call outputs optionally transfer results. Use NULL as 'result' pointer if results
+ * are not required.
+ *
+ * @param dma_compl DMA completion event
+ * @param[out] result Pointer to transfer result structure for output, or NULL when not used.
+ *
+ * @retval 0 Transfer was successful
+ * @retval <0 Transfer failed
+ */
+int odp_dma_compl_result(odp_dma_compl_t dma_compl, odp_dma_result_t *result);
+
+/**
+ * Convert event to DMA completion event
+ *
+ * Converts an ODP_EVENT_DMA_COMPL type event to a DMA completion event.
+ *
+ * @param ev Event handle
+ *
+ * @return DMA completion event handle
+ */
+odp_dma_compl_t odp_dma_compl_from_event(odp_event_t ev);
+
+/**
+ * Convert DMA completion event to event
+ *
+ * @param dma_compl DMA completion event handle
+ *
+ * @return Event handle
+ */
+odp_event_t odp_dma_compl_to_event(odp_dma_compl_t dma_compl);
+
+/**
+ * Get printable value for DMA completion event handle
+ *
+ * @param dma_compl Handle to be converted for debugging
+ *
+ * @return uint64_t value that can be used to print/display this handle
+ */
+uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl);
+
+/**
+ * Allocate DMA completion event
+ *
+ * Allocates a DMA completion event from a pool. The pool must have been created with
+ * odp_dma_pool_create() call. All completion event metadata are set to their default values.
+ *
+ * @param pool Pool handle
+ *
+ * @return DMA completion event handle
+ * @retval ODP_DMA_COMPL_INVALID Completion event could not be allocated
+ */
+odp_dma_compl_t odp_dma_compl_alloc(odp_pool_t pool);
+
+/**
+ * Free DMA completion event
+ *
+ * Frees a DMA completion event into the pool it was allocated from.
+ *
+ * @param dma_compl DMA completion event handle
+ */
+void odp_dma_compl_free(odp_dma_compl_t dma_compl);
+
+/**
+ * Print DMA completion event debug information
+ *
+ * Prints implementation specific debug information about
+ * the completion event to the ODP log.
+ *
+ * @param dma_compl DMA completion event handle
+ */
+void odp_dma_compl_print(odp_dma_compl_t dma_compl);
+
+/**
+ * Initialize DMA completion event pool parameters
+ *
+ * Initialize an odp_dma_pool_param_t to its default values.
+ *
+ * @param[out] pool_param Parameter structure to be initialized
+ */
+void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param);
+
+/**
+ * Create DMA completion event pool
+ *
+ * Creates a pool of DMA completion events (ODP_EVENT_DMA_COMPL). Pool type is ODP_POOL_DMA_COMPL.
+ * The use of pool name is optional. Unique names are not required. However, odp_pool_lookup()
+ * returns only a single matching pool. Use odp_dma_pool_param_init() to initialize pool parameters
+ * into their default values. Parameters values must not exceed pool capabilities
+ * (@see odp_dma_pool_capability_t)
+ *
+ * @param name Name of the pool or NULL. Maximum string length is ODP_POOL_NAME_LEN.
+ * @param pool_param Pool parameters
+ *
+ * @return Handle of the created pool
+ * @retval ODP_POOL_INVALID Pool could not be created
+ */
+odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *pool_param);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
+
diff --git a/include/odp/api/spec/dma_types.h b/include/odp/api/spec/dma_types.h
new file mode 100644
index 000000000..563955395
--- /dev/null
+++ b/include/odp/api/spec/dma_types.h
@@ -0,0 +1,547 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP DMA
+ */
+
+#ifndef ODP_API_SPEC_DMA_TYPES_H_
+#define ODP_API_SPEC_DMA_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+/** @defgroup odp_dma ODP DMA
+ * DMA offload
+ * @{
+ */
+
+/**
+ * @typedef odp_dma_t
+ * DMA session
+ */
+
+/**
+ * @typedef odp_dma_transfer_id_t
+ * DMA transfer identifier
+ */
+
+/**
+ * @typedef odp_dma_compl_t
+ * DMA completion event
+ */
+
+/**
+ * @def ODP_DMA_INVALID
+ * Invalid DMA session
+ */
+
+/**
+ * @def ODP_DMA_TRANSFER_ID_INVALID
+ * Invalid DMA transfer identifier
+ */
+
+/**
+ * @def ODP_DMA_COMPL_INVALID
+ * Invalid DMA completion event
+ */
+
+/**
+ * @def ODP_DMA_NAME_LEN
+ * Maximum DMA name length in chars including null char
+ */
+
+/**
+ * DMA completion event pool capabilities
+ *
+ * Pool statistics are not supported with DMA completion event pools.
+ */
+typedef struct odp_dma_pool_capability_t {
+ /** Maximum number of DMA completion event pools */
+ uint32_t max_pools;
+
+ /** Maximum number of DMA completion events in a pool */
+ uint32_t max_num;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+} odp_dma_pool_capability_t;
+
+/**
+ * DMA completion event pool parameters
+ */
+typedef struct odp_dma_pool_param_t {
+ /** Number of DMA completion events in the pool
+ *
+ * Maximum value is defined by 'max_num' pool capability */
+ uint32_t num;
+
+ /** Maximum number of events cached locally per thread
+ *
+ * See odp_pool_param_t::cache_size documentation for details. Valid values range from
+ * 'min_cache_size' to 'max_cache_size' capability. The default value is implementation
+ * specific and set by odp_dma_pool_param_init().
+ */
+ uint32_t cache_size;
+
+} odp_dma_pool_param_t;
+
+/* Includes pool_types.h, which depends on odp_dma_pool_param_t. */
+#include <odp/api/queue.h>
+
+/**
+ * DMA transfer direction
+ *
+ * Transfer direction defines source and destination memory type of DMA transfers. API specification
+ * defines only one option (#ODP_DMA_MAIN_TO_MAIN) for the transfer direction. It is used for
+ * transfers within the main memory. Some implementations may extend this enumeration with
+ * implementation specific directions and memory types (e.g. from main memory to a device, etc.).
+ */
+typedef uint32_t odp_dma_direction_t;
+
+/** DMA transfer within the main memory */
+#define ODP_DMA_MAIN_TO_MAIN 0x1u
+
+/**
+ * DMA transfer type
+ *
+ * Transfer type defines how DMA transfers operate data. Currently, only one transfer type is
+ * defined (#ODP_DMA_TYPE_COPY).
+ *
+ */
+typedef uint32_t odp_dma_transfer_type_t;
+
+/** Copy data
+ *
+ * Copy all data from source segment(s) to destination segment(s). There may be different
+ * number of source and destination segments in a transfer, but total length of all source
+ * segments must be equal to total length of all destination segments. Segments must not
+ * point to overlapping memory addresses. There are no alignment requirements for
+ * segment addresses or lengths. Data transfer from source to destination may happen
+ * in any segment and byte order.
+ */
+#define ODP_DMA_TYPE_COPY 0x1u
+
+/**
+ * DMA transfer completion mode
+ *
+ * Transfer completion mode defines how transfer completion is reported to the application.
+ * Completion modes are: #ODP_DMA_COMPL_NONE, #ODP_DMA_COMPL_SYNC, #ODP_DMA_COMPL_EVENT, and
+ * #ODP_DMA_COMPL_POLL
+ *
+ * If not otherwise specified, a DMA transfer is complete when memory reads and writes are complete
+ * for all its segments, and writes are visible to all memory observers (threads and
+ * HW accelerators).
+ */
+typedef uint32_t odp_dma_compl_mode_t;
+
+/** No completion indication
+ *
+ * Application uses odp_dma_transfer_start() call to start a DMA transfer, but does
+ * not request a completion notification for it. This can be useful for example when application
+ * starts a burst of transfers, but requests a completion event only on the last one
+ * (none on others).
+ */
+#define ODP_DMA_COMPL_NONE 0x1u
+
+/** Synchronous transfer
+ *
+ * Application uses odp_dma_transfer() call for DMA transfers. Each call performs
+ * the requested transfer and returns when the transfer is complete.
+ */
+#define ODP_DMA_COMPL_SYNC 0x2u
+
+/** Asynchronous transfer with completion event
+ *
+ * Application uses odp_dma_transfer_start() call to start a DMA transfer. The
+ * transfer is complete when application receives the completion event.
+ */
+#define ODP_DMA_COMPL_EVENT 0x4u
+
+/** Asynchronous transfer with completion polling
+ *
+ * Application uses odp_dma_transfer_start() call to start a DMA transfer and uses
+ * odp_dma_transfer_done() call to check if the transfer has completed.
+ */
+#define ODP_DMA_COMPL_POLL 0x8u
+
+/**
+ * DMA transfer data format
+ */
+typedef enum {
+ /** Data format is raw memory address */
+ ODP_DMA_FORMAT_ADDR = 0,
+
+ /** Data format is odp_packet_t */
+ ODP_DMA_FORMAT_PACKET
+
+} odp_dma_data_format_t;
+
+/**
+ * DMA transfer ordering
+ *
+ * These options specify ordering of consecutive DMA transfers within a session. Transfer order
+ * is defined by the order of consecutive transfer (start) calls and the order of transfers
+ * within each multi-transfer call. Note that ordering option matters also when using
+ * odp_dma_transfer_multi() call, as ODP_DMA_ORDER_NONE allows implementation to perform transfers
+ * in parallel.
+ *
+ * These options do not apply to data (segment or byte) processing order within a transfer.
+ * If two transfers read/write overlapping memory areas, an appropriate transfer ordering option
+ * (e.g. ODP_DMA_ORDER_ALL) needs to be used for correct operation.
+ */
+typedef enum {
+ /** No specific ordering between transfers
+ *
+ * This may result the best performance (maximum implementation parallelism) as
+ * transfers may start and complete in any order. */
+ ODP_DMA_ORDER_NONE = 0,
+
+ /** Report transfer completions in order
+ *
+ * Transfers may be performed in any order, but transfer completions must be reported
+ * in the same order they were started within a session. This allows application to
+ * start multiple transfers and wait only completion of the last one. */
+ ODP_DMA_ORDER_COMPL,
+
+ /** Perform all transfers in order
+ *
+ * Perform transfers and report their completions in the same order they were started
+ * within a session. This enables for example a subsequent transfer to read data
+ * written by a previous transfer. */
+ ODP_DMA_ORDER_ALL
+
+} odp_dma_transfer_order_t;
+
+/**
+ * DMA transfer multi-thread safeness
+ */
+typedef enum {
+ /** Multi-thread safe operation
+ *
+ * Multiple threads may perform DMA transfers concurrently on the same DMA session.
+ */
+ ODP_DMA_MT_SAFE = 0,
+
+ /** Application serializes operations
+ *
+ * Multiple threads may perform DMA transfers on the same DMA session, but application
+ * serializes all transfer related calls (odp_dma_transfer(), odp_dma_transfer_start(),
+ * _start_multi(), _done() and _result()). Threads do not call any of these operations
+ * concurrently.
+ */
+ ODP_DMA_MT_SERIAL
+
+} odp_dma_mt_mode_t;
+
+/**
+ * DMA capabilities
+ */
+typedef struct odp_dma_capability_t {
+ /** Maximum number of DMA sessions
+ *
+ * The value of zero means that DMA offload is not available.
+ */
+ uint32_t max_sessions;
+
+ /** Maximum number of transfers per DMA session
+ *
+ * Maximum number of transfers that can be in-flight (started but not yet completed)
+ * per session. When this limit is reached, new transfer requests may not be accepted
+ * until some previously started transfers are complete. */
+ uint32_t max_transfers;
+
+ /** Maximum number of source segments in a single transfer */
+ uint32_t max_src_segs;
+
+ /** Maximum number of destination segments in a single transfer */
+ uint32_t max_dst_segs;
+
+ /** Maximum number of destination and source segments combined in a single transfer */
+ uint32_t max_segs;
+
+ /** Maximum segment length in bytes
+ *
+ * This is the maximum length of any source or destination segment. */
+ uint32_t max_seg_len;
+
+ /** Supported completion modes
+ *
+ * Each supported completion mode has a corresponding flag set in the mask.
+ * Synchronous transfer (ODP_DMA_COMPL_SYNC) is always supported.
+ */
+ odp_dma_compl_mode_t compl_mode_mask;
+
+ /**
+ * Scheduled queue support
+ *
+ * 0: Scheduled queues are not supported as DMA completion queues
+ * 1: Scheduled queues are supported as DMA completion queues
+ */
+ odp_bool_t queue_type_sched;
+
+ /**
+ * Plain queue support
+ *
+ * 0: Plain queues are not supported as DMA completion queues
+ * 1: Plain queues are supported as DMA completion queues
+ */
+ odp_bool_t queue_type_plain;
+
+ /** DMA completion event pool capabilities */
+ odp_dma_pool_capability_t pool;
+
+} odp_dma_capability_t;
+
+/**
+ * DMA session parameters
+ */
+typedef struct odp_dma_param_t {
+ /** Transfer direction
+ *
+ * The default value is ODP_DMA_MAIN_TO_MAIN.
+ */
+ odp_dma_direction_t direction;
+
+ /** Transfer type
+ *
+ * The default value is ODP_DMA_TYPE_COPY.
+ */
+ odp_dma_transfer_type_t type;
+
+ /** Transfer completion modes
+ *
+ * Specify the completion modes application will use within the session.
+ *
+ * Multiple modes may be selected, but it is implementation specific which combinations
+ * are supported. If an unsupported combination is requested odp_dma_create() returns
+ * a failure. See odp_dma_capability_t::compl_mode_mask for the supported modes.
+ */
+ odp_dma_compl_mode_t compl_mode_mask;
+
+ /** Transfer operation multi-thread safeness
+ *
+ * The default value is ODP_DMA_MT_SAFE.
+ */
+ odp_dma_mt_mode_t mt_mode;
+
+ /** Transfer ordering
+ *
+ * The default value is ODP_DMA_ORDER_NONE.
+ */
+ odp_dma_transfer_order_t order;
+
+} odp_dma_param_t;
+
+/**
+ * DMA segment
+ */
+typedef struct odp_dma_seg_t {
+ /** Segment start */
+ union {
+ /** Segment start address in memory
+ *
+ * Defines segment start when data format is ODP_DMA_FORMAT_ADDR. Ignored with
+ * other data formats.
+ */
+ void *addr;
+
+ /** Segment start as an offset into a packet */
+ struct {
+ /** Packet handle
+ *
+ * Defines the packet when data format is ODP_DMA_FORMAT_PACKET. Ignored
+ * with other data formats. */
+ odp_packet_t packet;
+
+ /** Segment start offset into the packet
+ *
+ * Defines segment start when data format is ODP_DMA_FORMAT_PACKET.
+ * The offset is calculated from odp_packet_data() position, and the value
+ * must not exceed odp_packet_len().
+ */
+ uint32_t offset;
+ };
+ };
+
+ /** Segment length in bytes
+ *
+ * Defines segment length with all data formats. The maximum value is defined by
+ * max_seg_len capability. When data format is ODP_DMA_FORMAT_PACKET, the value must not
+ * exceed odp_packet_len() - 'offset'.
+ */
+ uint32_t len;
+
+ /** Segment hints
+ *
+ * Depending on the implementation, setting these hints may improve performance.
+ * Initialize all unused bits to zero.
+ */
+ union {
+ /** Segment hints bit field */
+ struct {
+ /** Allow full cache line access
+ *
+ * When set to 1, data on the same cache line with the destination segment
+ * is allowed to be overwritten. This hint is ignored on source segments.
+ */
+ uint16_t full_lines : 1;
+ };
+
+ /** All bits of the bit field structure
+ *
+ * This can be used to set/clear all bits, or to perform bitwise operations
+ * on those.
+ */
+ uint16_t all_hints;
+ };
+
+} odp_dma_seg_t;
+
+/**
+ * DMA transfer parameters
+ *
+ * These parameters define data sources and destinations for a DMA transfer. Capabilities specify
+ * the maximum number of segments and the maximum segment length that are supported.
+ *
+ * The selected data format specifies how segment structure fields are used. When data format is
+ * ODP_DMA_FORMAT_ADDR, set segment start address (odp_dma_seg_t::addr) and
+ * length (odp_dma_seg_t::len). When data format is ODP_DMA_FORMAT_PACKET, set packet
+ * handle (odp_dma_seg_t::packet), segment start offset (odp_dma_seg_t::offset) and length.
+ * If a DMA segment spans over multiple packet segments, it is considered as equally many
+ * DMA segments. So, take packet segmentation into account when making sure that the maximum
+ * number of DMA segments capabilities are not exceeded.
+ */
+typedef struct odp_dma_transfer_param_t {
+ /** Source data format
+ *
+ * The default value is ODP_DMA_FORMAT_ADDR.
+ */
+ odp_dma_data_format_t src_format;
+
+ /** Destination data format
+ *
+ * The default value is ODP_DMA_FORMAT_ADDR.
+ */
+ odp_dma_data_format_t dst_format;
+
+ /** Number of source segments
+ *
+ * The default value is 1.
+ */
+ uint32_t num_src;
+
+ /** Number of destination segments
+ *
+ * The default value is 1.
+ */
+ uint32_t num_dst;
+
+ /** Table of source segments
+ *
+ * The table has 'num_src' entries. Data format is defined by 'src_format'.
+ */
+ odp_dma_seg_t *src_seg;
+
+ /** Table of destination segments
+ *
+ * The table has 'num_dst' entries. Data format is defined by 'dst_format'.
+ */
+ odp_dma_seg_t *dst_seg;
+
+} odp_dma_transfer_param_t;
+
+/**
+ * DMA transfer completion parameters
+ */
+typedef struct odp_dma_compl_param_t {
+ /** Completion mode
+ *
+ * Select a completion mode: #ODP_DMA_COMPL_EVENT, #ODP_DMA_COMPL_POLL or
+ * #ODP_DMA_COMPL_NONE. The mode must match one of the modes selected in session creation
+ * parameters (odp_dma_param_t::compl_mode_mask).
+ *
+ * ODP_DMA_COMPL_NONE can be used to specify that completion indication is not requested.
+ * Application may for example start a series of transfers and request completion
+ * indication only on the last one.
+ */
+ odp_dma_compl_mode_t compl_mode;
+
+ /** Transfer identifier
+ *
+ * Transfer identifier is used in ODP_DMA_COMPL_POLL mode. Application passes the same
+ * identifier here and to a later odp_dma_transfer_done() call to check transfer
+ * completion status. Identifiers are allocated with odp_dma_transfer_id_alloc().
+ * The identifier of a completed transfer may be reused for another transfer.
+ */
+ odp_dma_transfer_id_t transfer_id;
+
+ /** Completion event
+ *
+ * When a transfer is started in ODP_DMA_COMPL_EVENT mode, this event is sent to
+ * the completion queue when the transfer is complete. The event type must be
+ * ODP_EVENT_DMA_COMPL. Use odp_dma_compl_result() to retrieve transfer results from
+ * the event.
+ */
+ odp_event_t event;
+
+ /** Completion queue
+ *
+ * The completion event is sent into this queue in ODP_DMA_COMPL_EVENT mode.
+ */
+ odp_queue_t queue;
+
+ /** User context pointer
+ *
+ * User defined context pointer which is copied to transfer results
+ * (@see odp_dma_result_t). The value does not need to represent a valid address
+ * (any intptr_t value is allowed).
+ *
+ * The default value is NULL.
+ */
+ void *user_ptr;
+
+} odp_dma_compl_param_t;
+
+/** DMA transfer results */
+typedef struct odp_dma_result_t {
+ /** DMA transfer success
+ *
+ * true: DMA transfer was successful
+ * false: DMA transfer failed
+ */
+ odp_bool_t success;
+
+ /** User context pointer
+ *
+ * User defined context pointer value from transfer completion parameters
+ * (@see odp_dma_compl_param_t). The default value is NULL.
+ */
+ void *user_ptr;
+
+} odp_dma_result_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
+
diff --git a/include/odp/api/spec/event.h b/include/odp/api/spec/event.h
index 32fd37c29..dc99f35c7 100644
--- a/include/odp/api/spec/event.h
+++ b/include/odp/api/spec/event.h
@@ -63,6 +63,8 @@ extern "C" {
* - ODP_EVENT_PACKET_TX_COMPL
* - Packet Tx completion event (odp_packet_tx_compl_t) generated as a result of a Packet Tx
* completion.
+ * - ODP_EVENT_DMA_COMPL
+ * - DMA completion event (odp_dma_compl_t) indicates that a DMA transfer has finished
*/
/**
diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h
index f66758341..11cbfe65d 100644
--- a/include/odp/api/spec/ipsec.h
+++ b/include/odp/api/spec/ipsec.h
@@ -364,6 +364,9 @@ typedef struct odp_ipsec_capability_t {
* be used for many SAs. */
uint32_t max_queues;
+ /** Support for returning completion packets as vectors */
+ odp_pktin_vector_capability_t vector;
+
/** Maximum anti-replay window size. */
uint32_t max_antireplay_ws;
@@ -455,6 +458,16 @@ typedef struct odp_ipsec_config_t {
*/
odp_bool_t stats_en;
+ /**
+ * Packet vector configuration for async and inline operations
+ *
+ * This packet vector configuration affects packets delivered to
+ * the application through the default queue and the SA destination
+ * queues. It does not affect packets delivered through pktio
+ * input queues.
+ */
+ odp_pktin_vector_config_t vector;
+
} odp_ipsec_config_t;
/**
@@ -1178,6 +1191,9 @@ void odp_ipsec_sa_param_init(odp_ipsec_sa_param_t *param);
*
* Create a new IPSEC SA according to the parameters.
*
+ * The parameter structure as well as all key, address and other memory
+ * buffers pointed to by it can be freed after the call.
+ *
* @param param IPSEC SA parameters
*
* @return IPSEC SA handle
diff --git a/include/odp/api/spec/packet_io.h b/include/odp/api/spec/packet_io.h
index 0d5938e9e..b2bad8621 100644
--- a/include/odp/api/spec/packet_io.h
+++ b/include/odp/api/spec/packet_io.h
@@ -181,7 +181,9 @@ typedef struct odp_pktin_vector_config_t {
*
* When true, packet input vector is enabled and configured with vector
* config parameters. Otherwise, packet input vector configuration
- * parameters are ignored.
+ * parameters are ignored. When vectors are enabled, packets may
+ * be delivered both as packet vector events and packet events.
+ * The default value is false.
*/
odp_bool_t enable;
@@ -267,7 +269,8 @@ typedef struct odp_pktin_queue_param_t {
* These are used for input queue creation in ODP_PKTIN_MODE_QUEUE
* or ODP_PKTIN_MODE_SCHED modes. Scheduler parameters are considered
* only in ODP_PKTIN_MODE_SCHED mode. Default values are defined in
- * odp_queue_param_t documentation.
+ * odp_queue_param_t documentation. The type field is ignored
+ * and the queue type is deduced from the pktio input mode.
* When classifier is enabled in odp_pktin_queue_config() this
* value is ignored. */
odp_queue_param_t queue_param;
diff --git a/include/odp/api/spec/pool_types.h b/include/odp/api/spec/pool_types.h
index 583a81b67..b0d5b37c9 100644
--- a/include/odp/api/spec/pool_types.h
+++ b/include/odp/api/spec/pool_types.h
@@ -19,6 +19,7 @@ extern "C" {
#endif
#include <odp/api/std_types.h>
+#include <odp/api/dma_types.h>
/** @addtogroup odp_pool
* @{
@@ -324,7 +325,10 @@ typedef enum odp_pool_type_t {
* Each vector holds an array of generic types of the same type.
* @see ODP_EVENT_PACKET_VECTOR
*/
- ODP_POOL_VECTOR = (ODP_POOL_TIMEOUT + 1)
+ ODP_POOL_VECTOR,
+
+ /** DMA completion event pool */
+ ODP_POOL_DMA_COMPL
} odp_pool_type_t;
@@ -710,6 +714,9 @@ typedef struct odp_pool_ext_param_t {
* Used to get information about a pool.
*/
typedef struct odp_pool_info_t {
+ /** Pool type */
+ odp_pool_type_t type;
+
/** Pool name */
const char *name;
@@ -727,6 +734,9 @@ typedef struct odp_pool_info_t {
/** Copy of external memory pool parameters. This is set when pool_ext is 1. */
odp_pool_ext_param_t pool_ext_param;
+
+ /** Copy of pool parameters when pool type is ODP_POOL_DMA_COMPL. */
+ odp_dma_pool_param_t dma_pool_param;
};
/** Additional info for packet pools */
diff --git a/include/odp/api/spec/random.h b/include/odp/api/spec/random.h
index 80f71c473..3cd297249 100644
--- a/include/odp/api/spec/random.h
+++ b/include/odp/api/spec/random.h
@@ -18,6 +18,8 @@
extern "C" {
#endif
+#include <odp/api/std_types.h>
+
/** @defgroup odp_random ODP RANDOM
* Random number generation.
* @{
diff --git a/include/odp/api/spec/shared_memory.h b/include/odp/api/spec/shared_memory.h
index 6d4066f15..6ba6f7fc2 100644
--- a/include/odp/api/spec/shared_memory.h
+++ b/include/odp/api/spec/shared_memory.h
@@ -1,5 +1,5 @@
-/* Copyright (c) 2019, Nokia
- * Copyright (c) 2013-2018, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -92,12 +92,20 @@ extern "C" {
* When set, this flag guarantees that the reserved memory is accessible
* by both CPUs and HW accelerators of the device. This may require e.g. that
* the odp_shm_reserve() call configures the memory to be accessible through
- * an Input-Output Memory Management Unit (IOMMU). The reserve call will return
- * failure if such configuration is not supported.
+ * an Input-Output Memory Management Unit (IOMMU).
*/
#define ODP_SHM_HW_ACCESS 0x20
/**
+ * Don't use huge pages
+ *
+ * When set, this flag guarantees that the memory reserved by odp_shm_reserve()
+ * is not allocated from huge pages. This flag must not be combined with
+ * ODP_SHM_HP.
+ */
+#define ODP_SHM_NO_HP 0x40
+
+/**
* Shared memory block info
*/
typedef struct odp_shm_info_t {
@@ -139,6 +147,14 @@ typedef struct odp_shm_capability_t {
* available memory size. */
uint64_t max_align;
+ /** Supported shared memory flags
+ *
+ * A bit mask of supported ODP_SHM_* flags. Depending on the
+ * implementation some flag combinations may not be supported. In this
+ * case odp_shm_reserve() will fail.
+ */
+ uint32_t flags;
+
} odp_shm_capability_t;
/**
diff --git a/include/odp/api/spec/std_types.h b/include/odp/api/spec/std_types.h
index 41f436065..accbd81d8 100644
--- a/include/odp/api/spec/std_types.h
+++ b/include/odp/api/spec/std_types.h
@@ -134,6 +134,9 @@ typedef union odp_feature_t {
/** Crypto APIs, e.g., odp_crypto_xxx() */
uint32_t crypto:1;
+ /** DMA APIs, e.g., odp_dma_xxx() */
+ uint32_t dma:1;
+
/** IPsec APIs, e.g., odp_ipsec_xxx() */
uint32_t ipsec:1;
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index 319f5e029..c16ac6d64 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -20,258 +20,14 @@
extern "C" {
#endif
-/** @defgroup odp_timer ODP TIMER
- * Timer generating timeout events.
- * @{
- */
-
-/**
- * @typedef odp_timer_pool_t
- * ODP timer pool handle
- */
-
-/**
- * @def ODP_TIMER_POOL_INVALID
- * Invalid timer pool handle
- */
-
-/**
- * Clock sources for timer pools
- *
- * ODP_CLOCK_DEFAULT is the default clock source and it is supported always. It is implementation
- * defined which other clock sources are supported. See from implementation documentation how the
- * supported clock sources are mapped into these enumerations.
- */
-typedef enum {
- /** Clock source number 0 */
- ODP_CLOCK_SRC_0,
-
- /** Clock source number 1 */
- ODP_CLOCK_SRC_1,
-
- /** Clock source number 2 */
- ODP_CLOCK_SRC_2,
-
- /** Clock source number 3 */
- ODP_CLOCK_SRC_3,
-
- /** Clock source number 4 */
- ODP_CLOCK_SRC_4,
-
- /** Clock source number 5 */
- ODP_CLOCK_SRC_5,
-
- /** Number of clock source enumerations */
- ODP_CLOCK_NUM_SRC
-
-} odp_timer_clk_src_t;
-
-/** The default clock source */
-#define ODP_CLOCK_DEFAULT ODP_CLOCK_SRC_0
-
-/** For backwards compatibility, ODP_CLOCK_CPU is synonym of ODP_CLOCK_DEFAULT.
- * This will be deprecated in the future. */
-#define ODP_CLOCK_CPU ODP_CLOCK_DEFAULT
-
-/** For backwards compatibility, ODP_CLOCK_EXT is synonym of ODP_CLOCK_SRC_1.
- * This will be deprecated in the future. */
-#define ODP_CLOCK_EXT ODP_CLOCK_SRC_1
-
-/**
- * @typedef odp_timer_t
- * ODP timer handle
- */
-
-/**
- * @def ODP_TIMER_INVALID
- * Invalid timer handle
- */
-
-/**
- * @typedef odp_timeout_t
- * ODP timeout handle
- */
-
-/**
- * @def ODP_TIMEOUT_INVALID
- * Invalid timeout handle
- */
-
-/**
- * Return values of timer set calls.
- */
-typedef enum {
- /** Timer set operation succeeded */
- ODP_TIMER_SUCCESS = 0,
+#include <odp/api/timer_types.h>
+#include <odp/api/event.h>
+#include <odp/api/queue_types.h>
+#include <odp/api/pool.h>
- /** Timer set operation failed because expiration time is too near to
- * the current time. */
- ODP_TIMER_TOO_NEAR = -1,
-
- /** Timer set operation failed because expiration time is too far from
- * the current time. */
- ODP_TIMER_TOO_FAR = -2,
-
- /** Timer set operation failed */
- ODP_TIMER_FAIL = -3
-
-} odp_timer_set_t;
-
-/** For backwards compatibility, ODP_TIMER_TOOEARLY is synonym of ODP_TIMER_TOO_NEAR.
- * This will be deprecated in the future. */
-#define ODP_TIMER_TOOEARLY ODP_TIMER_TOO_NEAR
-
-/** For backwards compatibility, ODP_TIMER_TOOLATE is synonym of ODP_TIMER_TOO_FAR.
- * This will be deprecated in the future. */
-#define ODP_TIMER_TOOLATE ODP_TIMER_TOO_FAR
-
-/** For backwards compatibility, ODP_TIMER_NOEVENT is synonym of ODP_TIMER_FAIL.
- * This will be deprecated in the future. */
-#define ODP_TIMER_NOEVENT ODP_TIMER_FAIL
-
-/**
- * @def ODP_TIMER_POOL_NAME_LEN
- * Maximum timer pool name length in chars including null char
- */
-
-/**
- * Timer pool parameters
- */
-typedef struct {
- /** Timeout resolution in nanoseconds. Timer pool must serve timeouts
- * with this or higher resolution. The minimum valid value (highest
- * resolution) is defined by timer resolution capability. When this
- * parameter is used, set 'res_hz' to zero. */
- uint64_t res_ns;
-
- /** Timeout resolution in hertz. This may be used to specify the highest
- * required resolution in hertz instead of nanoseconds. When this
- * parameter is used, set 'res_ns' to zero. */
- uint64_t res_hz;
-
- /** Minimum relative timeout in nanoseconds. All requested timeouts
- * will be at least this many nanoseconds after the current
- * time of the timer pool. Timer set functions return an error, if too
- * short timeout was requested. The value may be also smaller than
- * the requested resolution. */
- uint64_t min_tmo;
-
- /** Maximum relative timeout in nanoseconds. All requested timeouts
- * will be at most this many nanoseconds after the current
- * time of the timer pool. Timer set functions return an error, if too
- * long timeout was requested. */
- uint64_t max_tmo;
-
- /** Number of timers needed. Application will create in maximum this
- * many concurrent timers from the timer pool. */
- uint32_t num_timers;
-
- /** Thread private timer pool. When zero, multiple thread may use the
- * timer pool concurrently. When non-zero, only single thread uses the
- * timer pool (concurrently). */
- int priv;
-
- /** Clock source for timers */
- odp_timer_clk_src_t clk_src;
-
-} odp_timer_pool_param_t;
-
-/**
- * Timer resolution capability
+/** @addtogroup odp_timer
+ * @{
*/
-typedef struct {
- /** Timeout resolution in nanoseconds */
- uint64_t res_ns;
-
- /** Timeout resolution in hertz */
- uint64_t res_hz;
-
- /** Minimum relative timeout in nanoseconds */
- uint64_t min_tmo;
-
- /** Maximum relative timeout in nanoseconds */
- uint64_t max_tmo;
-
-} odp_timer_res_capability_t;
-
-/**
- * Timer capability
- */
-typedef struct {
- /** Maximum number of timer pools over all clock sources
- *
- * The total number of timer pools that can be created combining
- * different clock sources.
- */
- uint32_t max_pools_combined;
-
- /** Maximum number of timer pools for the requested clock source */
- uint32_t max_pools;
-
- /** Maximum number of timers in a pool
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_timers;
-
- /** Highest timer resolution in nanoseconds.
- *
- * This defines the highest resolution supported by a timer.
- * It's the minimum valid value for 'res_ns' timer pool
- * parameter.
- *
- * This value is equal to 'max_res.res_ns' capability.
- */
- uint64_t highest_res_ns;
-
- /**
- * Maximum resolution
- *
- * This defines the highest resolution supported by a timer, with
- * limits to min/max timeout values. The highest resolution for a timer
- * pool is defined by 'max_res.res_ns' in nanoseconds and
- * 'max_res.res_hz' in hertz.
- * When this resolution is used:
- * - 'min_tmo' parameter value must be in minimum 'max_res.min_tmo'
- * - 'max_tmo' parameter value must be in maximum 'max_res.max_tmo'
- */
- odp_timer_res_capability_t max_res;
-
- /**
- * Maximum timeout length
- *
- * This defines the maximum relative timeout value supported by a timer,
- * with limits to min timeout and max resolution values. The maximum
- * value for 'max_tmo' timer pool parameter is defined by
- * 'max_tmo.max_tmo'. When this max timeout value is used:
- * - 'min_tmo' parameter value must be in minimum 'max_tmo.min_tmo'
- * - 'res_ns' parameter value must be in minimum 'max_tmo.res_ns' or
- * - 'res_hz' parameter value must be in maximum 'max_tmo.res_hz'
- */
- odp_timer_res_capability_t max_tmo;
-
- /**
- * Scheduled queue destination support
- *
- * This defines whether schedule queues are supported as timeout
- * destination queues.
- * 0: Scheduled queues are not supported as timeout destination queues
- * 1: Scheduled queues are supported as timeout destination queues
- * @see odp_timer_alloc()
- */
- odp_bool_t queue_type_sched;
-
- /**
- * Plain queue destination support
- *
- * This defines whether plain queues are supported as timeout
- * destination queues.
- * 0: Plain queues are not supported as timeout destination queues
- * 1: Plain queues are supported as timeout destination queues
- * @see odp_timer_alloc()
- */
- odp_bool_t queue_type_plain;
-} odp_timer_capability_t;
/**
* Query timer capabilities per clock source
@@ -311,9 +67,19 @@ int odp_timer_res_capability(odp_timer_clk_src_t clk_src,
odp_timer_res_capability_t *res_capa);
/**
+ * Initialize timer pool parameters
+ *
+ * Initialize an odp_timer_pool_param_t to its default values for all fields.
+ *
+ * @param[out] param Pointer to the odp_timer_pool_param_t structure to be initialized
+ */
+void odp_timer_pool_param_init(odp_timer_pool_param_t *param);
+
+/**
* Create a timer pool
*
- * The use of pool name is optional. Unique names are not required.
+ * The use of pool name is optional. Unique names are not required. Use odp_timer_pool_param_init()
+ * to initialize timer pool parameters into their default values.
*
* @param name Name of the timer pool or NULL. Maximum string length is
* ODP_TIMER_POOL_NAME_LEN.
@@ -375,75 +141,6 @@ uint64_t odp_timer_ns_to_tick(odp_timer_pool_t timer_pool, uint64_t ns);
uint64_t odp_timer_current_tick(odp_timer_pool_t timer_pool);
/**
- * Timer tick information
- */
-typedef struct odp_timer_tick_info_t {
- /**
- * Timer tick frequency in hertz
- *
- * Timer tick frequency expressed as a fractional number. The integer part contains
- * full hertz. The fraction part (numerator / denominator) contains parts of
- * a hertz to be added with the integer.
- *
- * For example, a timer tick frequency of 333 333 and 1/3 Hz could be presented with
- * these values: integer = 333 333, numer = 1, denom = 3. Implementation may choose numer
- * and denom values freely.
- */
- odp_fract_u64_t freq;
-
- /**
- * One timer tick in nanoseconds
- *
- * Nanoseconds per tick is expressed as a fractional number. The integer part contains
- * full nanoseconds. The fraction part (numerator / denominator) contains parts of
- * a nanosecond to be added with the integer.
- *
- * For example, a timer tick period of 3.125 nanoseconds (320MHz) could be presented with
- * these values: integer = 3, numer = 125 000 000, denom = 1 000 000 000. Implementation
- * may choose numer and denom values freely.
- */
- odp_fract_u64_t nsec;
-
- /**
- * One timer tick in source clock cycles
- *
- * The clock cycle count is expressed as a fractional number. The integer part contains
- * full clock cycles. The fraction part (numerator / denominator) contains parts of
- * a clock cycle to be added with the integer.
- *
- * For example, a timer tick period of 42 and 1/3 source clock cycles could be presented
- * with these values: integer = 42, numer = 1, denom = 3. Implementation may choose numer
- * and denom values freely.
- *
- * The value is zero, when there is no direct connection between tick and the source
- * clock signal.
- */
- odp_fract_u64_t clk_cycle;
-
-} odp_timer_tick_info_t;
-
-/**
- * ODP timer pool information and configuration
- */
-typedef struct {
- /** Parameters specified at creation */
- odp_timer_pool_param_t param;
-
- /** Number of currently allocated timers */
- uint32_t cur_timers;
-
- /** High watermark of allocated timers */
- uint32_t hwm_timers;
-
- /** Name of timer pool */
- const char *name;
-
- /** Timer pool tick information */
- odp_timer_tick_info_t tick_info;
-
-} odp_timer_pool_info_t;
-
-/**
* Query timer pool configuration and current state
*
* @param timer_pool Timer pool
diff --git a/include/odp/api/spec/timer_types.h b/include/odp/api/spec/timer_types.h
new file mode 100644
index 000000000..2fad1b372
--- /dev/null
+++ b/include/odp/api/spec/timer_types.h
@@ -0,0 +1,359 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP timer API type definitions
+ */
+
+#ifndef ODP_API_SPEC_TIMER_TYPES_H_
+#define ODP_API_SPEC_TIMER_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+/** @defgroup odp_timer ODP TIMER
+ * Timer generating timeout events.
+ * @{
+ */
+
+/**
+ * @typedef odp_timer_pool_t
+ * ODP timer pool handle
+ */
+
+/**
+ * @def ODP_TIMER_POOL_INVALID
+ * Invalid timer pool handle
+ */
+
+/**
+ * @typedef odp_timer_t
+ * ODP timer handle
+ */
+
+/**
+ * @def ODP_TIMER_INVALID
+ * Invalid timer handle
+ */
+
+/**
+ * @typedef odp_timeout_t
+ * ODP timeout handle
+ */
+
+/**
+ * @def ODP_TIMEOUT_INVALID
+ * Invalid timeout handle
+ */
+
+/**
+ * @def ODP_TIMER_POOL_NAME_LEN
+ * Maximum timer pool name length in chars including null char
+ */
+
+/**
+ * Timer resolution capability
+ */
+typedef struct {
+ /** Timeout resolution in nanoseconds */
+ uint64_t res_ns;
+
+ /** Timeout resolution in hertz */
+ uint64_t res_hz;
+
+ /** Minimum relative timeout in nanoseconds */
+ uint64_t min_tmo;
+
+ /** Maximum relative timeout in nanoseconds */
+ uint64_t max_tmo;
+
+} odp_timer_res_capability_t;
+
+/**
+ * Timer capability
+ */
+typedef struct {
+ /** Maximum number of timer pools over all clock sources
+ *
+ * The total number of timer pools that can be created combining
+ * different clock sources.
+ */
+ uint32_t max_pools_combined;
+
+ /** Maximum number of timer pools for the requested clock source */
+ uint32_t max_pools;
+
+ /** Maximum number of timers in a pool
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_timers;
+
+ /** Highest timer resolution in nanoseconds.
+ *
+ * This defines the highest resolution supported by a timer.
+ * It's the minimum valid value for 'res_ns' timer pool
+ * parameter.
+ *
+ * This value is equal to 'max_res.res_ns' capability.
+ */
+ uint64_t highest_res_ns;
+
+ /**
+ * Maximum resolution
+ *
+ * This defines the highest resolution supported by a timer, with
+ * limits to min/max timeout values. The highest resolution for a timer
+ * pool is defined by 'max_res.res_ns' in nanoseconds and
+ * 'max_res.res_hz' in hertz.
+ * When this resolution is used:
+ * - 'min_tmo' parameter value must be in minimum 'max_res.min_tmo'
+ * - 'max_tmo' parameter value must be in maximum 'max_res.max_tmo'
+ */
+ odp_timer_res_capability_t max_res;
+
+ /**
+ * Maximum timeout length
+ *
+ * This defines the maximum relative timeout value supported by a timer,
+ * with limits to min timeout and max resolution values. The maximum
+ * value for 'max_tmo' timer pool parameter is defined by
+ * 'max_tmo.max_tmo'. When this max timeout value is used:
+ * - 'min_tmo' parameter value must be in minimum 'max_tmo.min_tmo'
+ * - 'res_ns' parameter value must be in minimum 'max_tmo.res_ns' or
+ * - 'res_hz' parameter value must be in maximum 'max_tmo.res_hz'
+ */
+ odp_timer_res_capability_t max_tmo;
+
+ /**
+ * Scheduled queue destination support
+ *
+ * This defines whether schedule queues are supported as timeout
+ * destination queues.
+ * 0: Scheduled queues are not supported as timeout destination queues
+ * 1: Scheduled queues are supported as timeout destination queues
+ * @see odp_timer_alloc()
+ */
+ odp_bool_t queue_type_sched;
+
+ /**
+ * Plain queue destination support
+ *
+ * This defines whether plain queues are supported as timeout
+ * destination queues.
+ * 0: Plain queues are not supported as timeout destination queues
+ * 1: Plain queues are supported as timeout destination queues
+ * @see odp_timer_alloc()
+ */
+ odp_bool_t queue_type_plain;
+
+} odp_timer_capability_t;
+
+/**
+ * Clock sources for timer pools
+ *
+ * ODP_CLOCK_DEFAULT is the default clock source and it is supported always. It is implementation
+ * defined which other clock sources are supported. See from implementation documentation how the
+ * supported clock sources are mapped into these enumerations.
+ */
+typedef enum {
+ /** Clock source number 0 */
+ ODP_CLOCK_SRC_0,
+
+ /** Clock source number 1 */
+ ODP_CLOCK_SRC_1,
+
+ /** Clock source number 2 */
+ ODP_CLOCK_SRC_2,
+
+ /** Clock source number 3 */
+ ODP_CLOCK_SRC_3,
+
+ /** Clock source number 4 */
+ ODP_CLOCK_SRC_4,
+
+ /** Clock source number 5 */
+ ODP_CLOCK_SRC_5,
+
+ /** Number of clock source enumerations */
+ ODP_CLOCK_NUM_SRC
+
+} odp_timer_clk_src_t;
+
+/** The default clock source */
+#define ODP_CLOCK_DEFAULT ODP_CLOCK_SRC_0
+
+/** For backwards compatibility, ODP_CLOCK_CPU is synonym of ODP_CLOCK_DEFAULT.
+ * This will be deprecated in the future. */
+#define ODP_CLOCK_CPU ODP_CLOCK_DEFAULT
+
+/** For backwards compatibility, ODP_CLOCK_EXT is synonym of ODP_CLOCK_SRC_1.
+ * This will be deprecated in the future. */
+#define ODP_CLOCK_EXT ODP_CLOCK_SRC_1
+
+/**
+ * Timer pool parameters
+ */
+typedef struct {
+ /** Timeout resolution in nanoseconds. Timer pool must serve timeouts
+ * with this or higher resolution. The minimum valid value (highest
+ * resolution) is defined by timer resolution capability. When this
+ * parameter is used, set 'res_hz' to zero. The default value is zero. */
+ uint64_t res_ns;
+
+ /** Timeout resolution in hertz. This may be used to specify the highest
+ * required resolution in hertz instead of nanoseconds. When this
+ * parameter is used, set 'res_ns' to zero. The default value is zero. */
+ uint64_t res_hz;
+
+ /** Minimum relative timeout in nanoseconds. All requested timeouts
+ * will be at least this many nanoseconds after the current
+ * time of the timer pool. Timer set functions return an error, if too
+ * short timeout was requested. The value may be also smaller than
+ * the requested resolution. The default value is zero. */
+ uint64_t min_tmo;
+
+ /** Maximum relative timeout in nanoseconds. All requested timeouts
+ * will be at most this many nanoseconds after the current
+ * time of the timer pool. Timer set functions return an error, if too
+ * long timeout was requested. */
+ uint64_t max_tmo;
+
+ /** Number of timers needed. Application will create in maximum this
+ * many concurrent timers from the timer pool. */
+ uint32_t num_timers;
+
+ /** Thread private timer pool. When zero, multiple thread may use the
+ * timer pool concurrently. When non-zero, only single thread uses the
+ * timer pool (concurrently). The default value is zero. */
+ int priv;
+
+ /** Clock source for timers
+ *
+ * The default value is ODP_CLOCK_DEFAULT. */
+ odp_timer_clk_src_t clk_src;
+
+} odp_timer_pool_param_t;
+
+/**
+ * Return values of timer set calls.
+ */
+typedef enum {
+ /** Timer set operation succeeded */
+ ODP_TIMER_SUCCESS = 0,
+
+ /** Timer set operation failed because expiration time is too near to
+ * the current time. */
+ ODP_TIMER_TOO_NEAR = -1,
+
+ /** Timer set operation failed because expiration time is too far from
+ * the current time. */
+ ODP_TIMER_TOO_FAR = -2,
+
+ /** Timer set operation failed */
+ ODP_TIMER_FAIL = -3
+
+} odp_timer_set_t;
+
+/** For backwards compatibility, ODP_TIMER_TOOEARLY is synonym of ODP_TIMER_TOO_NEAR.
+ * This will be deprecated in the future. */
+#define ODP_TIMER_TOOEARLY ODP_TIMER_TOO_NEAR
+
+/** For backwards compatibility, ODP_TIMER_TOOLATE is synonym of ODP_TIMER_TOO_FAR.
+ * This will be deprecated in the future. */
+#define ODP_TIMER_TOOLATE ODP_TIMER_TOO_FAR
+
+/** For backwards compatibility, ODP_TIMER_NOEVENT is synonym of ODP_TIMER_FAIL.
+ * This will be deprecated in the future. */
+#define ODP_TIMER_NOEVENT ODP_TIMER_FAIL
+
+/**
+ * Timer tick information
+ */
+typedef struct odp_timer_tick_info_t {
+ /**
+ * Timer tick frequency in hertz
+ *
+ * Timer tick frequency expressed as a fractional number. The integer part contains
+ * full hertz. The fraction part (numerator / denominator) contains parts of
+ * a hertz to be added with the integer.
+ *
+ * For example, a timer tick frequency of 333 333 and 1/3 Hz could be presented with
+ * these values: integer = 333 333, numer = 1, denom = 3. Implementation may choose numer
+ * and denom values freely.
+ */
+ odp_fract_u64_t freq;
+
+ /**
+ * One timer tick in nanoseconds
+ *
+ * Nanoseconds per tick is expressed as a fractional number. The integer part contains
+ * full nanoseconds. The fraction part (numerator / denominator) contains parts of
+ * a nanosecond to be added with the integer.
+ *
+ * For example, a timer tick period of 3.125 nanoseconds (320MHz) could be presented with
+ * these values: integer = 3, numer = 125 000 000, denom = 1 000 000 000. Implementation
+ * may choose numer and denom values freely.
+ */
+ odp_fract_u64_t nsec;
+
+ /**
+ * One timer tick in source clock cycles
+ *
+ * The clock cycle count is expressed as a fractional number. The integer part contains
+ * full clock cycles. The fraction part (numerator / denominator) contains parts of
+ * a clock cycle to be added with the integer.
+ *
+ * For example, a timer tick period of 42 and 1/3 source clock cycles could be presented
+ * with these values: integer = 42, numer = 1, denom = 3. Implementation may choose numer
+ * and denom values freely.
+ *
+ * The value is zero, when there is no direct connection between tick and the source
+ * clock signal.
+ */
+ odp_fract_u64_t clk_cycle;
+
+} odp_timer_tick_info_t;
+
+/**
+ * ODP timer pool information and configuration
+ */
+typedef struct {
+ /** Parameters specified at creation */
+ odp_timer_pool_param_t param;
+
+ /** Number of currently allocated timers */
+ uint32_t cur_timers;
+
+ /** High watermark of allocated timers */
+ uint32_t hwm_timers;
+
+ /** Name of timer pool */
+ const char *name;
+
+ /** Timer pool tick information */
+ odp_timer_tick_info_t tick_info;
+
+} odp_timer_pool_info_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/timer.h b/include/odp/api/timer.h
index 7c0dd95b6..3041594f5 100644
--- a/include/odp/api/timer.h
+++ b/include/odp/api/timer.h
@@ -17,12 +17,6 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/abi/event.h>
-#include <odp/api/abi/pool.h>
-#include <odp/api/abi/queue_types.h>
-#include <odp/api/abi/timer.h>
-
#include <odp/api/spec/timer.h>
#ifdef __cplusplus
diff --git a/include/odp/api/timer_types.h b/include/odp/api/timer_types.h
new file mode 100644
index 000000000..8d3385594
--- /dev/null
+++ b/include/odp/api/timer_types.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP timer service
+ */
+
+#ifndef ODP_API_TIMER_TYPES_H_
+#define ODP_API_TIMER_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/timer_types.h>
+
+#include <odp/api/spec/timer_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/dma_types.h b/include/odp/arch/arm32-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/timer.h b/include/odp/arch/arm32-linux/odp/api/abi/timer_types.h
index 1a5b5bb04..cd384c2bc 100644
--- a/include/odp/arch/mips64-linux/odp/api/abi/timer.h
+++ b/include/odp/arch/arm32-linux/odp/api/abi/timer_types.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/timer.h>
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/dma_types.h b/include/odp/arch/arm64-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/timer.h b/include/odp/arch/arm64-linux/odp/api/abi/timer_types.h
index 1a5b5bb04..cd384c2bc 100644
--- a/include/odp/arch/arm32-linux/odp/api/abi/timer.h
+++ b/include/odp/arch/arm64-linux/odp/api/abi/timer_types.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/timer.h>
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/dma_types.h b/include/odp/arch/default-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/timer.h b/include/odp/arch/default-linux/odp/api/abi/timer_types.h
index 7ba2115ac..3050e4a61 100644
--- a/include/odp/arch/default-linux/odp/api/abi/timer.h
+++ b/include/odp/arch/default-linux/odp/api/abi/timer_types.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/timer.h>
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/dma_types.h b/include/odp/arch/mips64-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/timer.h b/include/odp/arch/mips64-linux/odp/api/abi/timer_types.h
index 1a5b5bb04..cd384c2bc 100644
--- a/include/odp/arch/arm64-linux/odp/api/abi/timer.h
+++ b/include/odp/arch/mips64-linux/odp/api/abi/timer_types.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/timer.h>
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/dma_types.h b/include/odp/arch/power64-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/timer.h b/include/odp/arch/power64-linux/odp/api/abi/timer_types.h
index 1a5b5bb04..cd384c2bc 100644
--- a/include/odp/arch/power64-linux/odp/api/abi/timer.h
+++ b/include/odp/arch/power64-linux/odp/api/abi/timer_types.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/timer.h>
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/dma_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/timer.h b/include/odp/arch/x86_32-linux/odp/api/abi/timer.h
deleted file mode 100644
index 1a5b5bb04..000000000
--- a/include/odp/arch/x86_32-linux/odp/api/abi/timer.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/abi-default/timer.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/timer_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/timer_types.h
new file mode 100644
index 000000000..cd384c2bc
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/timer_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/dma_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..76ccd895d
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/dma_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/dma_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/timer.h b/include/odp/arch/x86_64-linux/odp/api/abi/timer.h
deleted file mode 100644
index 1a5b5bb04..000000000
--- a/include/odp/arch/x86_64-linux/odp/api/abi/timer.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/abi-default/timer.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/timer_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/timer_types.h
new file mode 100644
index 000000000..cd384c2bc
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/timer_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/timer_types.h>
diff --git a/include/odp/autoheader_internal.h.in b/include/odp/autoheader_internal.h.in
index b9766e33e..952675fb5 100644
--- a/include/odp/autoheader_internal.h.in
+++ b/include/odp/autoheader_internal.h.in
@@ -29,4 +29,7 @@
/* Define to 1 to enable OpenSSL support */
#undef _ODP_OPENSSL
+/* Define to 1 to enable OpenSSL random data */
+#undef _ODP_OPENSSL_RAND
+
#endif
diff --git a/include/odp_api.h b/include/odp_api.h
index 8b129f939..00d2c243a 100644
--- a/include/odp_api.h
+++ b/include/odp_api.h
@@ -63,6 +63,7 @@ extern "C" {
#include <odp/api/ipsec.h>
#include <odp/api/stash.h>
#include <odp/api/reassembly.h>
+#include <odp/api/dma.h>
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index da1775993..2f257044e 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -61,6 +61,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/cpumask.h \
include-abi/odp/api/abi/crypto.h \
include-abi/odp/api/abi/debug.h \
+ include-abi/odp/api/abi/dma_types.h \
include-abi/odp/api/abi/errno.h \
include-abi/odp/api/abi/event.h \
include-abi/odp/api/abi/hash.h \
@@ -90,7 +91,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/thrmask.h \
include-abi/odp/api/abi/ticketlock.h \
include-abi/odp/api/abi/time.h \
- include-abi/odp/api/abi/timer.h \
+ include-abi/odp/api/abi/timer_types.h \
include-abi/odp/api/abi/traffic_mngr.h \
include-abi/odp/api/abi/version.h
endif
@@ -118,6 +119,7 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_name_table_internal.h \
include/odp_packet_io_internal.h \
include/odp_errno_define.h \
+ include/odp_event_internal.h \
include/odp_packet_dpdk.h \
${top_srcdir}/platform/linux-generic/include/odp_pcapng.h \
${top_srcdir}/platform/linux-generic/include/odp_pkt_queue_internal.h \
@@ -145,7 +147,7 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_timer_wheel_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_traffic_mngr_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_event_vector_internal.h \
+ include/odp_event_vector_internal.h \
include/protocols/eth.h \
include/protocols/ip.h \
include/protocols/ipsec.h \
@@ -169,6 +171,7 @@ __LIB__libodp_dpdk_la_SOURCES = \
../linux-generic/miniz/miniz_tinfl.c ../linux-generic/miniz/miniz_tinfl.h \
../linux-generic/odp_cpumask.c \
../linux-generic/odp_cpumask_task.c \
+ ../linux-generic/odp_dma.c \
odp_crypto.c \
odp_errno.c \
../linux-generic/odp_event.c \
@@ -245,6 +248,7 @@ __LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/arm/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -261,7 +265,9 @@ noinst_HEADERS += arch/arm/odp_atomic.h \
arch/arm/odp_llsc.h \
arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
+
endif
if ARCH_IS_AARCH64
__LIB__libodp_dpdk_la_SOURCES += arch/aarch64/odp_atomic.c \
@@ -269,6 +275,7 @@ __LIB__libodp_dpdk_la_SOURCES += arch/aarch64/odp_atomic.c \
arch/aarch64/cpu_flags.c \
arch/aarch64/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/aarch64/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/aarch64/odp/api/abi/cpu_time.h \
arch/aarch64/odp/api/abi/hash_crc32.h
@@ -284,13 +291,15 @@ noinst_HEADERS += arch/aarch64/odp_atomic.h \
arch/aarch64/odp_cpu.h \
arch/aarch64/cpu_flags.h \
arch/aarch64/odp_cpu_idling.h \
- arch/aarch64/odp_llsc.h
+ arch/aarch64/odp_llsc.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_DEFAULT
__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/default/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -303,12 +312,14 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_MIPS64
__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/mips64/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -321,13 +332,15 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_POWERPC
__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/powerpc/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -340,7 +353,8 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_X86
__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
@@ -348,6 +362,7 @@ __LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
arch/x86/odp_cpu_cycles.c \
arch/x86/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/x86/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/x86/odp/api/abi/cpu_rdtsc.h \
arch/x86/odp/api/abi/cpu_time.h \
@@ -360,6 +375,7 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/x86/cpu_flags.h \
arch/x86/odp_cpu.h \
+ arch/x86/odp_random.h \
arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
arch/default/odp_cpu_idling.h
diff --git a/platform/linux-dpdk/arch/default/odp_random.c b/platform/linux-dpdk/arch/default/odp_random.c
new file mode 120000
index 000000000..a1889b546
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_random.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_random.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_random.h b/platform/linux-dpdk/arch/default/odp_random.h
new file mode 120000
index 000000000..232858671
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_random.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp_random.h b/platform/linux-dpdk/arch/x86/odp_random.h
new file mode 120000
index 000000000..50a20427d
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp_random.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/odp_random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h
new file mode 120000
index 000000000..40558269d
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/dma_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/timer.h b/platform/linux-dpdk/include-abi/odp/api/abi/timer.h
deleted file mode 120000
index e65d4faf5..000000000
--- a/platform/linux-dpdk/include-abi/odp/api/abi/timer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include-abi/odp/api/abi/timer.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h
new file mode 120000
index 000000000..4b815a27b
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/timer_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp_buffer_internal.h b/platform/linux-dpdk/include/odp_buffer_internal.h
index 674c6d716..42b686a2a 100644
--- a/platform/linux-dpdk/include/odp_buffer_internal.h
+++ b/platform/linux-dpdk/include/odp_buffer_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,18 +18,20 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/pool.h>
+#include <odp/api/align.h>
#include <odp/api/buffer.h>
+#include <odp/api/byteorder.h>
#include <odp/api/debug.h>
-#include <odp/api/align.h>
+#include <odp/api/event.h>
+#include <odp/api/pool.h>
+#include <odp/api/std_types.h>
+#include <odp/api/thread.h>
+
#include <odp_align_internal.h>
#include <odp_config_internal.h>
-#include <odp/api/byteorder.h>
-#include <odp/api/thread.h>
+#include <odp_event_internal.h>
+
#include <sys/types.h>
-#include <odp/api/event.h>
-#include <odp_forward_typedefs_internal.h>
#include <stddef.h>
/* DPDK */
@@ -45,36 +48,12 @@ extern "C" {
/* Type size limits number of flow IDs supported */
#define BUF_HDR_MAX_FLOW_ID 255
-struct odp_buffer_hdr_t {
- /* Underlying DPDK rte_mbuf */
- struct rte_mbuf mb;
-
- /* Buffer index in the pool */
- uint32_t index;
-
- /* Total size of all allocated segs */
- uint32_t totsize;
-
- /* Pool type */
- int8_t type;
+/* Internal buffer header */
+typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
- /* Event type. Maybe different than pool type (crypto compl event) */
- int8_t event_type;
-
- /* Event flow id */
- uint8_t flow_id;
-
- /* --- Mostly read only data --- */
-
- /* User pointer */
- const void *user_ptr;
-
- /* Pool pointer */
- void *pool_ptr;
-
- /* User area pointer */
- void *uarea_addr;
-};
+} odp_buffer_hdr_t;
/*
* Buffer type
@@ -94,48 +73,28 @@ int _odp_buffer_type(odp_buffer_t buf);
*/
void _odp_buffer_type_set(odp_buffer_t buf, int type);
-static inline struct rte_mbuf *buf_to_mbuf(odp_buffer_t buf)
+static inline struct rte_mbuf *_odp_buf_to_mbuf(odp_buffer_t buf)
{
return (struct rte_mbuf *)(uintptr_t)buf;
}
-static inline odp_buffer_hdr_t *mbuf_to_buf_hdr(struct rte_mbuf *mbuf)
-{
- return (odp_buffer_hdr_t *)(uintptr_t)mbuf;
-}
-
-static inline odp_buffer_t buf_from_buf_hdr(odp_buffer_hdr_t *hdr)
-{
- return (odp_buffer_t)hdr;
-}
-
-static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
+static inline odp_buffer_hdr_t *_odp_buf_hdr(odp_buffer_t buf)
{
return (odp_buffer_hdr_t *)(uintptr_t)buf;
}
-static inline odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
-{
- return buf_hdl_to_hdr(buf)->event_type;
-}
-
-static inline void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
-{
- buf_hdl_to_hdr(buf)->event_type = ev;
-}
-
static inline uint32_t event_flow_id(odp_event_t ev)
{
odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
- return buf_hdr->flow_id;
+ return buf_hdr->event_hdr.flow_id;
}
static inline void event_flow_id_set(odp_event_t ev, uint32_t flow_id)
{
odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
- buf_hdr->flow_id = flow_id;
+ buf_hdr->event_hdr.flow_id = flow_id;
}
#ifdef __cplusplus
diff --git a/platform/linux-dpdk/include/odp_config_internal.h b/platform/linux-dpdk/include/odp_config_internal.h
index 0bbda1a4e..18754184e 100644
--- a/platform/linux-dpdk/include/odp_config_internal.h
+++ b/platform/linux-dpdk/include/odp_config_internal.h
@@ -58,6 +58,16 @@ extern "C" {
#define CONFIG_QUEUE_MAX_ORD_LOCKS 2
/*
+ * Maximum number of DMA sessions
+ */
+#define CONFIG_MAX_DMA_SESSIONS 32
+
+/*
+ * Stashes reserved for internal usage
+ */
+#define CONFIG_INTERNAL_STASHES CONFIG_MAX_DMA_SESSIONS
+
+/*
* Maximum number of stashes
*/
#define CONFIG_MAX_STASHES 128
diff --git a/platform/linux-dpdk/include/odp_event_internal.h b/platform/linux-dpdk/include/odp_event_internal.h
new file mode 100644
index 000000000..9221def98
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_event_internal.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_INTERNAL_H_
+#define ODP_EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/event.h>
+
+#include <stdint.h>
+
+/* DPDK */
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
+#include <rte_mbuf.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
+
+/* Common header for all event types. */
+typedef struct _odp_event_hdr_t {
+ /* Underlying DPDK rte_mbuf */
+ struct rte_mbuf mb;
+
+ /* Buffer index in the pool */
+ uint32_t index;
+
+ /* Total size of all allocated segs */
+ uint32_t totsize;
+
+ /* Pool type */
+ int8_t type;
+
+ /* Event type. Maybe different than pool type (crypto compl event) */
+ int8_t event_type;
+
+ /* Event flow id */
+ uint8_t flow_id;
+
+ /* --- Mostly read only data --- */
+
+ /* User pointer */
+ const void *user_ptr;
+
+ /* Pool pointer */
+ void *pool_ptr;
+
+ /* User area pointer */
+ void *uarea_addr;
+
+} _odp_event_hdr_t;
+
+static inline odp_event_t _odp_event_from_hdr(_odp_event_hdr_t *hdr)
+{
+ return (odp_event_t)hdr;
+}
+
+static inline _odp_event_hdr_t *_odp_event_hdr(odp_event_t event)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)event;
+}
+
+static inline odp_event_t _odp_event_from_mbuf(struct rte_mbuf *mbuf)
+{
+ return (odp_event_t)(uintptr_t)mbuf;
+}
+
+static inline struct rte_mbuf *_odp_event_to_mbuf(odp_event_t event)
+{
+ return (struct rte_mbuf *)(uintptr_t)event;
+}
+
+static inline odp_event_type_t _odp_event_type(odp_event_t event)
+{
+ return _odp_event_hdr(event)->event_type;
+}
+
+static inline void _odp_event_type_set(odp_event_t event, int ev)
+{
+ _odp_event_hdr(event)->event_type = ev;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_event_vector_internal.h b/platform/linux-dpdk/include/odp_event_vector_internal.h
new file mode 100644
index 000000000..c866d9036
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_event_vector_internal.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2020-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event vector descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_VECTOR_INTERNAL_H_
+#define ODP_EVENT_VECTOR_INTERNAL_H_
+
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/api/packet.h>
+
+#include <odp_event_internal.h>
+
+#include <stdint.h>
+
+/**
+ * Internal event vector header
+ */
+typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
+
+ /* Event vector size */
+ uint32_t size;
+
+ /* Vector of packet handles */
+ odp_packet_t packet[];
+
+} odp_event_vector_hdr_t;
+
+/**
+ * Return the vector header
+ */
+static inline odp_event_vector_hdr_t *_odp_packet_vector_hdr(odp_packet_vector_t pktv)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)pktv;
+}
+
+/**
+ * Free packet vector and contained packets
+ */
+static inline void _odp_packet_vector_free_full(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ if (pktv_hdr->size)
+ odp_packet_free_multi(pktv_hdr->packet, pktv_hdr->size);
+
+ odp_packet_vector_free(pktv);
+}
+
+#endif /* ODP_EVENT_VECTOR_INTERNAL_H_ */
diff --git a/platform/linux-dpdk/include/odp_packet_internal.h b/platform/linux-dpdk/include/odp_packet_internal.h
index ca789a3a1..a3e806f54 100644
--- a/platform/linux-dpdk/include/odp_packet_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,20 +19,19 @@ extern "C" {
#endif
#include <odp/api/align.h>
-#include <odp_debug_internal.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
#include <odp/api/packet.h>
#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
#include <odp/api/comp.h>
-#include <odp_ipsec_internal.h>
#include <odp/api/abi/packet.h>
-#include <protocols/eth.h>
-#include <odp_queue_if.h>
+
#include <odp_config_internal.h>
+#include <odp_event_internal.h>
+#include <odp_pool_internal.h>
+
+#include <protocols/eth.h>
#include <rte_config.h>
#if defined(__clang__)
@@ -112,9 +112,9 @@ ODP_STATIC_ASSERT(CONFIG_PACKET_MAX_SEG_LEN <= UINT16_MAX,
* packet_init(). Because of this any new fields added must be reviewed for
* initialization requirements.
*/
-typedef struct {
- /* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct odp_packet_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
packet_parser_t p;
@@ -179,19 +179,19 @@ static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr)
return (odp_packet_t)pkt_hdr;
}
-static inline struct rte_mbuf *pkt_to_mbuf(odp_packet_t pkt)
+static inline _odp_event_hdr_t *packet_to_event_hdr(odp_packet_t pkt)
{
- return (struct rte_mbuf *)(uintptr_t)pkt;
+ return (_odp_event_hdr_t *)(uintptr_t)&packet_hdr(pkt)->event_hdr;
}
-static inline odp_buffer_hdr_t *packet_to_buf_hdr(odp_packet_t pkt)
+static inline odp_packet_t packet_from_event_hdr(_odp_event_hdr_t *event_hdr)
{
- return &packet_hdr(pkt)->buf_hdr;
+ return (odp_packet_t)(uintptr_t)event_hdr;
}
-static inline odp_packet_t packet_from_buf_hdr(odp_buffer_hdr_t *buf_hdr)
+static inline struct rte_mbuf *pkt_to_mbuf(odp_packet_t pkt)
{
- return (odp_packet_t)(odp_packet_hdr_t *)buf_hdr;
+ return (struct rte_mbuf *)(uintptr_t)pkt;
}
static inline void packet_subtype_set(odp_packet_t pkt, int ev)
@@ -236,12 +236,12 @@ static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
{
- return rte_pktmbuf_pkt_len(&pkt_hdr->buf_hdr.mb);
+ return rte_pktmbuf_pkt_len(&pkt_hdr->event_hdr.mb);
}
static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
- rte_pktmbuf_pkt_len(&pkt_hdr->buf_hdr.mb) = len;
+ rte_pktmbuf_pkt_len(&pkt_hdr->event_hdr.mb) = len;
}
/* Forward declarations */
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index 898709008..85900e791 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -18,18 +18,18 @@
extern "C" {
#endif
+#include <odp/api/hints.h>
#include <odp/api/packet_io.h>
#include <odp/api/plat/pktio_inlines.h>
#include <odp/api/spinlock.h>
#include <odp/api/ticketlock.h>
-#include <odp_classification_datamodel.h>
+
#include <odp_align_internal.h>
+#include <odp_classification_datamodel.h>
+#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp_queue_if.h>
-#include <odp_config_internal.h>
-#include <odp/api/hints.h>
-
#include <linux/if_ether.h>
#include <sys/select.h>
#include <inttypes.h>
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 39ca0f002..107ba26db 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -20,7 +20,7 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/pool.h>
-#include <odp_buffer_internal.h>
+#include <odp_event_internal.h>
#include <odp/api/packet_io.h>
#include <odp/api/align.h>
#include <odp/api/hints.h>
@@ -72,6 +72,7 @@ typedef struct ODP_ALIGNED_CACHE {
uint32_t hdr_size;
uint32_t num;
uint32_t num_populated;
+ odp_pool_type_t type_2;
uint8_t type;
uint8_t pool_ext;
odp_pool_param_t params;
@@ -106,8 +107,7 @@ static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl)
return &_odp_pool_glb->pool[_odp_typeval(pool_hdl) - 1];
}
-static inline int _odp_buffer_alloc_multi(pool_t *pool,
- odp_buffer_hdr_t *buf_hdr[], int num)
+static inline int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int num)
{
int i;
struct rte_mempool *mp = pool->rte_mempool;
@@ -119,21 +119,41 @@ static inline int _odp_buffer_alloc_multi(pool_t *pool,
if (odp_unlikely(mbuf == NULL))
return i;
- buf_hdr[i] = mbuf_to_buf_hdr(mbuf);
+ event_hdr[i] = _odp_event_hdr(_odp_event_from_mbuf(mbuf));
}
return i;
}
-static inline void _odp_buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num)
+static inline odp_event_t _odp_event_alloc(pool_t *pool)
+{
+ struct rte_mbuf *mbuf;
+ struct rte_mempool *mp = pool->rte_mempool;
+
+ mbuf = rte_mbuf_raw_alloc(mp);
+ if (odp_unlikely(mbuf == NULL))
+ return ODP_EVENT_INVALID;
+
+ return _odp_event_from_mbuf(mbuf);
+}
+
+static inline void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_free)
{
int i;
- for (i = 0; i < num; i++)
- rte_mbuf_raw_free((struct rte_mbuf *)(uintptr_t)buf_hdr[i]);
+ for (i = 0; i < num_free; i++)
+ rte_mbuf_raw_free(_odp_event_to_mbuf(_odp_event_from_hdr(event_hdr[i])));
}
-int _odp_buffer_is_valid(odp_buffer_t buf);
+static inline void _odp_event_free(odp_event_t event)
+{
+ rte_mbuf_raw_free(_odp_event_to_mbuf(event));
+}
+
+int _odp_event_is_valid(odp_event_t event);
+
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2);
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/m4/configure.m4 b/platform/linux-dpdk/m4/configure.m4
index aa369415c..90d55b6f6 100644
--- a/platform/linux-dpdk/m4/configure.m4
+++ b/platform/linux-dpdk/m4/configure.m4
@@ -6,6 +6,7 @@ ODP_ATOMIC
m4_include([platform/linux-dpdk/m4/odp_cpu.m4])
m4_include([platform/linux-dpdk/m4/odp_libconfig.m4])
+m4_include([platform/linux-dpdk/m4/odp_openssl.m4])
m4_include([platform/linux-dpdk/m4/odp_pcapng.m4])
m4_include([platform/linux-dpdk/m4/odp_scheduler.m4])
@@ -67,6 +68,8 @@ AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS
# Add text to the end of configure with platform specific settings.
# Make sure it's aligned same as other lines in configure.ac.
AS_VAR_APPEND([PLAT_CFG_TEXT], ["
+ openssl: ${with_openssl}
+ openssl_rand: ${openssl_rand}
pcap: ${have_pmd_pcap}
pcapng: ${have_pcapng}
default_config_path: ${default_config_path}"])
diff --git a/platform/linux-dpdk/m4/odp_pthread.m4 b/platform/linux-dpdk/m4/odp_pthread.m4
deleted file mode 120000
index e24304ae3..000000000
--- a/platform/linux-dpdk/m4/odp_pthread.m4
+++ /dev/null
@@ -1 +0,0 @@
-../../linux-generic/m4/odp_pthread.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_timer.m4 b/platform/linux-dpdk/m4/odp_timer.m4
deleted file mode 120000
index 2a909824b..000000000
--- a/platform/linux-dpdk/m4/odp_timer.m4
+++ /dev/null
@@ -1 +0,0 @@
-../../linux-generic/m4/odp_timer.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/odp_buffer.c b/platform/linux-dpdk/odp_buffer.c
index b8bccd3ba..21956be2f 100644
--- a/platform/linux-dpdk/odp_buffer.c
+++ b/platform/linux-dpdk/odp_buffer.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,36 +19,36 @@
/* Fill in buffer header field offsets for inline functions */
const _odp_buffer_inline_offset_t _odp_buffer_inline_offset ODP_ALIGNED_CACHE = {
- .event_type = offsetof(odp_buffer_hdr_t, event_type),
- .base_data = offsetof(odp_buffer_hdr_t, mb.buf_addr)
+ .event_type = offsetof(odp_buffer_hdr_t, event_hdr.event_type),
+ .base_data = offsetof(odp_buffer_hdr_t, event_hdr.mb.buf_addr)
};
#include <odp/visibility_end.h>
uint32_t odp_buffer_size(odp_buffer_t buf)
{
- struct rte_mbuf *mbuf = buf_to_mbuf(buf);
+ struct rte_mbuf *mbuf = _odp_buf_to_mbuf(buf);
return mbuf->buf_len;
}
int _odp_buffer_type(odp_buffer_t buf)
{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
+ odp_buffer_hdr_t *hdr = _odp_buf_hdr(buf);
- return hdr->type;
+ return hdr->event_hdr.type;
}
void _odp_buffer_type_set(odp_buffer_t buf, int type)
{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
+ odp_buffer_hdr_t *hdr = _odp_buf_hdr(buf);
- hdr->type = type;
+ hdr->event_hdr.type = type;
}
int odp_buffer_is_valid(odp_buffer_t buf)
{
- if (_odp_buffer_is_valid(buf) == 0)
+ if (odp_event_is_valid(odp_buffer_to_event(buf)) == 0)
return 0;
if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
@@ -70,12 +71,12 @@ void odp_buffer_print(odp_buffer_t buf)
return;
}
- hdr = buf_hdl_to_hdr(buf);
- pool = hdr->pool_ptr;
+ hdr = _odp_buf_hdr(buf);
+ pool = hdr->event_hdr.pool_ptr;
len += snprintf(&str[len], n - len, "Buffer\n------\n");
len += snprintf(&str[len], n - len, " pool index %u\n", pool->pool_idx);
- len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->index);
+ len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->event_hdr.index);
len += snprintf(&str[len], n - len, " addr %p\n", odp_buffer_addr(buf));
len += snprintf(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
str[len] = 0;
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
index c15e75f22..02c06be0e 100644
--- a/platform/linux-dpdk/odp_crypto.c
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -1745,7 +1745,7 @@ static uint8_t *crypto_prepare_digest(crypto_session_entry_t *session,
_odp_packet_set_data(pkt, param->hash_result_offset, 0,
session->p.auth_digest_len);
data = pkt_hdr->crypto_digest_buf;
- mb = &pkt_hdr->buf_hdr.mb;
+ mb = &pkt_hdr->event_hdr.mb;
*phys_addr =
rte_pktmbuf_iova_offset(mb, data -
rte_pktmbuf_mtod(mb, uint8_t *));
@@ -1782,9 +1782,9 @@ static void crypto_fill_aead_param(crypto_session_entry_t *session,
aead_xform->aead.aad_length);
op->sym->aead.aad.data = pkt_hdr->crypto_aad_buf;
op->sym->aead.aad.phys_addr =
- rte_pktmbuf_iova_offset(&pkt_hdr->buf_hdr.mb,
+ rte_pktmbuf_iova_offset(&pkt_hdr->event_hdr.mb,
op->sym->aead.aad.data -
- rte_pktmbuf_mtod(&pkt_hdr->buf_hdr.mb,
+ rte_pktmbuf_mtod(&pkt_hdr->event_hdr.mb,
uint8_t *));
iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
if (aead_xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
diff --git a/platform/linux-dpdk/odp_init.c b/platform/linux-dpdk/odp_init.c
index 546203a5d..a69aabff2 100644
--- a/platform/linux-dpdk/odp_init.c
+++ b/platform/linux-dpdk/odp_init.c
@@ -52,16 +52,21 @@ enum init_stage {
IPSEC_EVENTS_INIT,
IPSEC_SAD_INIT,
IPSEC_INIT,
+ DMA_INIT,
ALL_INIT /* All init stages completed */
};
odp_global_data_ro_t odp_global_ro;
odp_global_data_rw_t *odp_global_rw;
+/* odp_init_local() call status */
+static __thread uint8_t init_local_called;
+
static void disable_features(odp_global_data_ro_t *global_ro,
const odp_init_t *init_param)
{
int disable_ipsec, disable_crypto;
+ int disable_dma;
if (init_param == NULL)
return;
@@ -74,7 +79,13 @@ static void disable_features(odp_global_data_ro_t *global_ro,
if (disable_ipsec && disable_crypto)
global_ro->disable.crypto = 1;
- global_ro->disable.stash = init_param->not_used.feat.stash;
+ disable_dma = init_param->not_used.feat.dma;
+ global_ro->disable.dma = disable_dma;
+
+ /* DMA uses stash. Disable stash only when both are disabled. */
+ if (disable_dma && init_param->not_used.feat.stash)
+ global_ro->disable.stash = 1;
+
global_ro->disable.traffic_mngr = init_param->not_used.feat.tm;
global_ro->disable.compress = init_param->not_used.feat.compress;
}
@@ -305,6 +316,13 @@ static int term_global(enum init_stage stage)
switch (stage) {
case ALL_INIT:
+ case DMA_INIT:
+ if (_odp_dma_term_global()) {
+ ODP_ERR("ODP DMA term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
case IPSEC_INIT:
if (_odp_ipsec_term_global()) {
ODP_ERR("ODP IPsec term failed.\n");
@@ -639,6 +657,12 @@ int odp_init_global(odp_instance_t *instance,
}
stage = IPSEC_INIT;
+ if (_odp_dma_init_global()) {
+ ODP_ERR("ODP DMA init failed.\n");
+ goto init_failed;
+ }
+ stage = DMA_INIT;
+
/* Dummy support for single instance */
*instance = (odp_instance_t)odp_global_ro.main_pid;
@@ -742,6 +766,13 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
goto init_fail;
}
+ /* Detect if odp_init_local() has been already called from this thread */
+ if (getpid() == odp_global_ro.main_pid && init_local_called) {
+ ODP_ERR("%s() called multiple times by the same thread\n", __func__);
+ goto init_fail;
+ }
+ init_local_called = 1;
+
if (_odp_shm_init_local()) {
ODP_ERR("ODP shm local init failed.\n");
goto init_fail;
@@ -805,6 +836,13 @@ init_fail:
int odp_term_local(void)
{
+ /* Check that odp_init_local() has been called by this thread */
+ if (!init_local_called) {
+ ODP_ERR("%s() called by a non-initialized thread\n", __func__);
+ return -1;
+ }
+ init_local_called = 0;
+
return term_local(ALL_INIT);
}
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index 5b16428db..fa84e3b4c 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -7,18 +7,21 @@
#include <odp/api/packet.h>
#include <odp/api/plat/packet_inlines.h>
-#include <odp_packet_internal.h>
-#include <odp_debug_internal.h>
-#include <odp_macros_internal.h>
-#include <odp_chksum_internal.h>
#include <odp/api/hints.h>
#include <odp/api/byteorder.h>
#include <odp/api/plat/byteorder_inlines.h>
#include <odp/api/packet_io.h>
#include <odp/api/plat/pktio_inlines.h>
-#include <odp_errno_define.h>
#include <odp/api/proto_stats.h>
+#include <odp_align_internal.h>
+#include <odp_chksum_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_errno_define.h>
+#include <odp_event_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_macros_internal.h>
+
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -38,10 +41,10 @@
/* Fill in packet header field offsets for inline functions */
const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
- .mb = offsetof(odp_packet_hdr_t, buf_hdr.mb),
- .pool = offsetof(odp_packet_hdr_t, buf_hdr.pool_ptr),
+ .mb = offsetof(odp_packet_hdr_t, event_hdr.mb),
+ .pool = offsetof(odp_packet_hdr_t, event_hdr.pool_ptr),
.input = offsetof(odp_packet_hdr_t, input),
- .user_ptr = offsetof(odp_packet_hdr_t, buf_hdr.user_ptr),
+ .user_ptr = offsetof(odp_packet_hdr_t, event_hdr.user_ptr),
.l2_offset = offsetof(odp_packet_hdr_t, p.l2_offset),
.l3_offset = offsetof(odp_packet_hdr_t, p.l3_offset),
.l4_offset = offsetof(odp_packet_hdr_t, p.l4_offset),
@@ -49,14 +52,14 @@ const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
.input_flags = offsetof(odp_packet_hdr_t, p.input_flags),
.flags = offsetof(odp_packet_hdr_t, p.flags),
.subtype = offsetof(odp_packet_hdr_t, subtype),
- .buf_addr = offsetof(odp_packet_hdr_t, buf_hdr.mb.buf_addr),
- .data = offsetof(odp_packet_hdr_t, buf_hdr.mb.data_off),
- .pkt_len = offsetof(odp_packet_hdr_t, buf_hdr.mb.pkt_len),
- .seg_len = offsetof(odp_packet_hdr_t, buf_hdr.mb.data_len),
- .nb_segs = offsetof(odp_packet_hdr_t, buf_hdr.mb.nb_segs),
- .user_area = offsetof(odp_packet_hdr_t, buf_hdr.uarea_addr),
- .rss = offsetof(odp_packet_hdr_t, buf_hdr.mb.hash.rss),
- .ol_flags = offsetof(odp_packet_hdr_t, buf_hdr.mb.ol_flags),
+ .buf_addr = offsetof(odp_packet_hdr_t, event_hdr.mb.buf_addr),
+ .data = offsetof(odp_packet_hdr_t, event_hdr.mb.data_off),
+ .pkt_len = offsetof(odp_packet_hdr_t, event_hdr.mb.pkt_len),
+ .seg_len = offsetof(odp_packet_hdr_t, event_hdr.mb.data_len),
+ .nb_segs = offsetof(odp_packet_hdr_t, event_hdr.mb.nb_segs),
+ .user_area = offsetof(odp_packet_hdr_t, event_hdr.uarea_addr),
+ .rss = offsetof(odp_packet_hdr_t, event_hdr.mb.hash.rss),
+ .ol_flags = offsetof(odp_packet_hdr_t, event_hdr.mb.ol_flags),
.rss_flag = PKT_RX_RSS_HASH
};
@@ -95,11 +98,6 @@ ODP_STATIC_ASSERT(ODP_TIMEOUT_INVALID == 0, "Timeout invalid not 0");
#pragma GCC diagnostic pop
#endif
-static inline odp_buffer_t packet_to_buffer(odp_packet_t pkt)
-{
- return (odp_buffer_t)pkt;
-}
-
/* Calculate the number of segments */
static inline int num_segments(uint32_t len, uint32_t seg_len)
{
@@ -190,7 +188,7 @@ static odp_packet_t packet_alloc(pool_t *pool, uint32_t len)
}
}
- pkt_hdr->buf_hdr.totsize = seg_len * num_seg;
+ pkt_hdr->event_hdr.totsize = seg_len * num_seg;
pkt = packet_handle(pkt_hdr);
odp_packet_reset(pkt, len);
@@ -238,7 +236,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
int odp_packet_reset(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *const pkt_hdr = packet_hdr(pkt);
- struct rte_mbuf *ms, *mb = &pkt_hdr->buf_hdr.mb;
+ struct rte_mbuf *ms, *mb = &pkt_hdr->event_hdr.mb;
uint8_t nb_segs = 0;
int32_t lenleft = len;
@@ -310,12 +308,12 @@ int odp_event_filter_packet(const odp_event_t event[],
uint32_t odp_packet_buf_len(odp_packet_t pkt)
{
- return packet_hdr(pkt)->buf_hdr.totsize;
+ return packet_hdr(pkt)->event_hdr.totsize;
}
void *odp_packet_tail(odp_packet_t pkt)
{
- struct rte_mbuf *mb = &(packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->event_hdr.mb);
mb = rte_pktmbuf_lastseg(mb);
return (void *)(rte_pktmbuf_mtod(mb, char *) + mb->data_len);
@@ -323,7 +321,7 @@ void *odp_packet_tail(odp_packet_t pkt)
void *odp_packet_push_head(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->event_hdr.mb);
return (void *)rte_pktmbuf_prepend(mb, len);
}
@@ -339,7 +337,7 @@ static void _copy_head_metadata(struct rte_mbuf *newhead,
int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
uint32_t *seg_len)
{
- struct rte_mbuf *mb = &(packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(*pkt)->event_hdr.mb);
int addheadsize = len - rte_pktmbuf_headroom(mb);
if (addheadsize > 0) {
@@ -380,7 +378,7 @@ int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
_copy_head_metadata(newhead, mb);
mb = newhead;
*pkt = (odp_packet_t)newhead;
- packet_hdr(*pkt)->buf_hdr.totsize += totsize_change;
+ packet_hdr(*pkt)->event_hdr.totsize += totsize_change;
} else {
rte_pktmbuf_prepend(mb, len);
}
@@ -434,7 +432,7 @@ int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
rte_pktmbuf_free(mb);
mb = newhead;
*pkt = (odp_packet_t)newhead;
- packet_hdr(*pkt)->buf_hdr.totsize -= totsize_change;
+ packet_hdr(*pkt)->event_hdr.totsize -= totsize_change;
} else {
rte_pktmbuf_adj(mb, len);
}
@@ -449,7 +447,7 @@ int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->event_hdr.mb);
return (void *)rte_pktmbuf_append(mb, len);
}
@@ -457,7 +455,7 @@ void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr,
uint32_t *seg_len)
{
- struct rte_mbuf *mb = &(packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(*pkt)->event_hdr.mb);
int newtailsize = len - odp_packet_tailroom(*pkt);
uint32_t old_pkt_len = odp_packet_len(*pkt);
@@ -502,7 +500,7 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr,
/* Expand the original tail */
m_last->data_len = m_last->buf_len - m_last->data_off;
mb->pkt_len += len - newtailsize;
- packet_hdr(*pkt)->buf_hdr.totsize +=
+ packet_hdr(*pkt)->event_hdr.totsize +=
newtail->nb_segs * newtail->buf_len;
} else {
rte_pktmbuf_append(mb, len);
@@ -595,7 +593,7 @@ void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
return;
}
- pkt_hdr->buf_hdr.user_ptr = ptr;
+ pkt_hdr->event_hdr.user_ptr = ptr;
pkt_hdr->p.flags.user_ptr_set = 1;
}
@@ -709,7 +707,7 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = odp_packet_len(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
odp_packet_t newpkt;
if (offset > pktlen)
@@ -739,7 +737,7 @@ int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = odp_packet_len(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
odp_packet_t newpkt;
if (odp_unlikely(offset + len >= pktlen))
@@ -817,7 +815,7 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
uint32_t src_len;
if (odp_likely(!rte_pktmbuf_chain(mb_dst, mb_src))) {
- dst_hdr->buf_hdr.totsize += src_hdr->buf_hdr.totsize;
+ dst_hdr->event_hdr.totsize += src_hdr->event_hdr.totsize;
return 0;
}
@@ -1063,11 +1061,11 @@ void odp_packet_print(odp_packet_t pkt)
int len = 0;
int n = max_len - 1;
odp_packet_hdr_t *hdr = packet_hdr(pkt);
- pool_t *pool = hdr->buf_hdr.pool_ptr;
+ pool_t *pool = hdr->event_hdr.pool_ptr;
len += snprintf(&str[len], n - len, "Packet\n------\n");
len += snprintf(&str[len], n - len, " pool index %u\n", pool->pool_idx);
- len += snprintf(&str[len], n - len, " buf index %u\n", hdr->buf_hdr.index);
+ len += snprintf(&str[len], n - len, " buf index %u\n", hdr->event_hdr.index);
len += snprintf(&str[len], n - len, " ev subtype %i\n", hdr->subtype);
len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
hdr->p.input_flags.all);
@@ -1088,7 +1086,7 @@ void odp_packet_print(odp_packet_t pkt)
" l4_offset %" PRIu32 "\n", hdr->p.l4_offset);
len += snprintf(&str[len], n - len,
" frame_len %" PRIu32 "\n",
- hdr->buf_hdr.mb.pkt_len);
+ hdr->event_hdr.mb.pkt_len);
len += snprintf(&str[len], n - len,
" input %" PRIu64 "\n",
odp_pktio_to_u64(hdr->input));
@@ -1128,16 +1126,16 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
int len = 0;
int n = max_len - 1;
uint32_t data_len = odp_packet_len(pkt);
- pool_t *pool = hdr->buf_hdr.pool_ptr;
+ pool_t *pool = hdr->event_hdr.pool_ptr;
len += snprintf(&str[len], n - len, "Packet\n------\n");
len += snprintf(&str[len], n - len,
" pool name %s\n", pool->name);
len += snprintf(&str[len], n - len,
- " buf index %" PRIu32 "\n", hdr->buf_hdr.index);
+ " buf index %" PRIu32 "\n", hdr->event_hdr.index);
len += snprintf(&str[len], n - len,
" segcount %" PRIu8 "\n",
- hdr->buf_hdr.mb.nb_segs);
+ hdr->event_hdr.mb.nb_segs);
len += snprintf(&str[len], n - len,
" data len %" PRIu32 "\n", data_len);
len += snprintf(&str[len], n - len,
@@ -1186,11 +1184,11 @@ int odp_packet_is_valid(odp_packet_t pkt)
if (pkt == ODP_PACKET_INVALID)
return 0;
- if (_odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
- return 0;
-
ev = odp_packet_to_event(pkt);
+ if (_odp_event_is_valid(ev) == 0)
+ return 0;
+
if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
@@ -1230,15 +1228,15 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
dsthdr->cos = srchdr->cos;
dsthdr->cls_mark = srchdr->cls_mark;
- dsthdr->buf_hdr.user_ptr = srchdr->buf_hdr.user_ptr;
- dsthdr->buf_hdr.mb.port = srchdr->buf_hdr.mb.port;
- dsthdr->buf_hdr.mb.ol_flags = srchdr->buf_hdr.mb.ol_flags;
- dsthdr->buf_hdr.mb.packet_type = srchdr->buf_hdr.mb.packet_type;
- dsthdr->buf_hdr.mb.vlan_tci = srchdr->buf_hdr.mb.vlan_tci;
- dsthdr->buf_hdr.mb.hash.rss = srchdr->buf_hdr.mb.hash.rss;
- dsthdr->buf_hdr.mb.hash = srchdr->buf_hdr.mb.hash;
- dsthdr->buf_hdr.mb.vlan_tci_outer = srchdr->buf_hdr.mb.vlan_tci_outer;
- dsthdr->buf_hdr.mb.tx_offload = srchdr->buf_hdr.mb.tx_offload;
+ dsthdr->event_hdr.user_ptr = srchdr->event_hdr.user_ptr;
+ dsthdr->event_hdr.mb.port = srchdr->event_hdr.mb.port;
+ dsthdr->event_hdr.mb.ol_flags = srchdr->event_hdr.mb.ol_flags;
+ dsthdr->event_hdr.mb.packet_type = srchdr->event_hdr.mb.packet_type;
+ dsthdr->event_hdr.mb.vlan_tci = srchdr->event_hdr.mb.vlan_tci;
+ dsthdr->event_hdr.mb.hash.rss = srchdr->event_hdr.mb.hash.rss;
+ dsthdr->event_hdr.mb.hash = srchdr->event_hdr.mb.hash;
+ dsthdr->event_hdr.mb.vlan_tci_outer = srchdr->event_hdr.mb.vlan_tci_outer;
+ dsthdr->event_hdr.mb.tx_offload = srchdr->event_hdr.mb.tx_offload;
if (dst_size != 0)
memcpy(odp_packet_user_area(dstpkt),
@@ -2145,7 +2143,7 @@ int odp_packet_parse(odp_packet_t pkt, uint32_t offset,
* packet data range. Copy enough data to a temporary buffer for
* parsing if necessary.
*/
- if (odp_unlikely(pkt_hdr->buf_hdr.mb.nb_segs > 1) &&
+ if (odp_unlikely(pkt_hdr->event_hdr.mb.nb_segs > 1) &&
odp_unlikely(seg_len < min_seglen)) {
seg_len = min_seglen;
if (seg_len > packet_len - offset)
@@ -2516,7 +2514,7 @@ static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
{
odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
if (odp_unlikely(pool->pool_ext == 0)) {
ODP_ERR("Not an external memory pool\n");
@@ -2529,7 +2527,7 @@ void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
{
odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
return pool->seg_len;
}
@@ -2554,8 +2552,8 @@ void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
{
odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pkt_hdr->buf_hdr.mb.data_off = data_offset;
- pkt_hdr->buf_hdr.mb.data_len = data_len;
+ pkt_hdr->event_hdr.mb.data_off = data_offset;
+ pkt_hdr->event_hdr.mb.data_len = data_len;
}
odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl, void *head)
@@ -2581,7 +2579,7 @@ uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[],
uint32_t i;
odp_packet_seg_t seg;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
uint32_t num_segs = odp_packet_num_segs(pkt);
if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
@@ -2645,15 +2643,15 @@ odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl,
if (i < num - 1)
next_seg = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[i + 1];
- data_len += cur_seg->buf_hdr.mb.data_len;
+ data_len += cur_seg->event_hdr.mb.data_len;
mb = (struct rte_mbuf *)(uintptr_t)cur_seg;
mb->next = (struct rte_mbuf *)next_seg;
cur_seg = next_seg;
}
- pkt_hdr->buf_hdr.mb.nb_segs = num;
- pkt_hdr->buf_hdr.mb.pkt_len = data_len;
- pkt_hdr->buf_hdr.mb.data_off = headroom;
+ pkt_hdr->event_hdr.mb.nb_segs = num;
+ pkt_hdr->event_hdr.mb.pkt_len = data_len;
+ pkt_hdr->event_hdr.mb.data_off = headroom;
/* Reset metadata */
pkt_hdr->subtype = ODP_EVENT_PACKET_BASIC;
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 219f8378b..318e0071a 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -100,7 +100,7 @@ static void ptr_from_mempool(struct rte_mempool *mp ODP_UNUSED, void *opaque,
args->match = true;
}
-static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
+static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
{
int i;
@@ -111,7 +111,7 @@ static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
if (pool->rte_mempool == NULL)
continue;
- args.addr = (uint8_t *)buf_hdr;
+ args.addr = (uint8_t *)event_hdr;
args.match = false;
rte_mempool_mem_iter(pool->rte_mempool, ptr_from_mempool, &args);
@@ -179,6 +179,7 @@ int _odp_pool_init_global(void)
}
ODP_DBG("\nPool init global\n");
+ ODP_DBG(" event_hdr_t size: %zu\n", sizeof(_odp_event_hdr_t));
ODP_DBG(" odp_buffer_hdr_t size: %zu\n", sizeof(odp_buffer_hdr_t));
ODP_DBG(" odp_packet_hdr_t size: %zu\n", sizeof(odp_packet_hdr_t));
ODP_DBG(" odp_timeout_hdr_t size: %zu\n", sizeof(odp_timeout_hdr_t));
@@ -213,23 +214,23 @@ int _odp_pool_term_local(void)
return 0;
}
-int _odp_buffer_is_valid(odp_buffer_t buf)
+int _odp_event_is_valid(odp_event_t event)
{
pool_t *pool;
- odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(buf);
+ _odp_event_hdr_t *event_hdr = _odp_event_hdr(event);
- if (buf == ODP_BUFFER_INVALID)
+ if (event == ODP_EVENT_INVALID)
return 0;
/* Check that buffer header is from a known pool */
- pool = find_pool(buf_hdr);
+ pool = find_pool(event_hdr);
if (pool == NULL)
return 0;
- if (pool != buf_hdr->pool_ptr)
+ if (pool != event_hdr->pool_ptr)
return 0;
- if (buf_hdr->index >= pool->rte_mempool->size)
+ if (event_hdr->index >= pool->rte_mempool->size)
return 0;
return 1;
@@ -311,7 +312,7 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
{
struct mbuf_ctor_arg *mb_ctor_arg;
struct rte_mbuf *mb = raw_mbuf;
- struct odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
/* The rte_mbuf is at the begninning in all cases */
mb_ctor_arg = (struct mbuf_ctor_arg *)opaque_arg;
@@ -356,12 +357,12 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
mb->next = NULL;
/* Save index, might be useful for debugging purposes */
- buf_hdr = (struct odp_buffer_hdr_t *)raw_mbuf;
- buf_hdr->index = i;
- buf_hdr->pool_ptr = mb_ctor_arg->pool;
- buf_hdr->type = mb_ctor_arg->type;
- buf_hdr->event_type = mb_ctor_arg->event_type;
- buf_hdr->uarea_addr = mb_ctor_arg->pool->uarea_base_addr +
+ event_hdr = (_odp_event_hdr_t *)raw_mbuf;
+ event_hdr->index = i;
+ event_hdr->pool_ptr = mb_ctor_arg->pool;
+ event_hdr->type = mb_ctor_arg->type;
+ event_hdr->event_type = mb_ctor_arg->event_type;
+ event_hdr->uarea_addr = mb_ctor_arg->pool->uarea_base_addr +
i * mb_ctor_arg->pool->uarea_size;
/* Initialize event vector metadata */
@@ -598,7 +599,6 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt)
{
odp_shm_t shm;
char uarea_name[ODP_SHM_NAME_LEN];
- uint32_t shm_flags = 0;
pool->uarea_shm = ODP_SHM_INVALID;
@@ -615,11 +615,7 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt)
pool->uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
- if (odp_global_ro.shm_single_va)
- shm_flags |= ODP_SHM_SINGLE_VA;
-
- shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE,
- shm_flags);
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, 0);
if (shm == ODP_SHM_INVALID)
return -1;
@@ -629,7 +625,10 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt)
return 0;
}
-odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
+/* Create pool according to params. Actual type of the pool is type_2, which is recorded for pool
+ * info calls. */
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2)
{
struct rte_pktmbuf_pool_private mbp_ctor_arg;
struct mbuf_ctor_arg mb_ctor_arg;
@@ -645,9 +644,6 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
char pool_name[ODP_POOL_NAME_LEN];
char rte_name[RTE_MEMPOOL_NAMESIZE];
- if (check_params(params))
- return ODP_POOL_INVALID;
-
if (name == NULL) {
pool_name[0] = 0;
} else {
@@ -820,6 +816,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
}
pool->type = type;
+ pool->type_2 = type_2;
pool->params = *params;
if (reserve_uarea(pool, uarea_size, num)) {
@@ -846,6 +843,14 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
return pool_hdl;
}
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
+{
+ if (check_params(params))
+ return ODP_POOL_INVALID;
+
+ return _odp_pool_create(name, params, params->type);
+}
+
odp_pool_t odp_pool_lookup(const char *name)
{
uint32_t i;
@@ -878,7 +883,7 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)&buf, 1);
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)&buf, 1);
if (odp_likely(ret == 1))
return buf;
@@ -896,17 +901,17 @@ int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
- return _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)buf, num);
+ return _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)buf, num);
}
void odp_buffer_free(odp_buffer_t buf)
{
- rte_mbuf_raw_free(buf_to_mbuf(buf));
+ _odp_event_free(odp_buffer_to_event(buf));
}
void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
{
- _odp_buffer_free_multi((odp_buffer_hdr_t **)(uintptr_t)buf, num);
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)buf, num);
}
void odp_pool_print(odp_pool_t pool_hdl)
@@ -990,11 +995,17 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
memset(info, 0, sizeof(odp_pool_info_t));
+ info->type = pool->type_2;
info->name = pool->name;
if (pool->pool_ext) {
info->pool_ext = 1;
info->pool_ext_param = pool->ext_param;
+
+ } else if (pool->type_2 == ODP_POOL_DMA_COMPL) {
+ info->dma_pool_param.num = pool->params.buf.num;
+ info->dma_pool_param.cache_size = pool->params.buf.cache_size;
+
} else {
info->params = pool->params;
}
@@ -1036,7 +1047,7 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
odp_pool_t odp_buffer_pool(odp_buffer_t buf)
{
- pool_t *pool = buf_hdl_to_hdr(buf)->pool_ptr;
+ pool_t *pool = _odp_buf_hdr(buf)->event_hdr.pool_ptr;
return pool->pool_hdl;
}
diff --git a/platform/linux-dpdk/odp_queue_basic.c b/platform/linux-dpdk/odp_queue_basic.c
index e3c3241c4..bb57475a3 100644
--- a/platform/linux-dpdk/odp_queue_basic.c
+++ b/platform/linux-dpdk/odp_queue_basic.c
@@ -10,8 +10,6 @@
#include <odp_queue_if.h>
#include <odp/api/std_types.h>
#include <odp/api/align.h>
-#include <odp/api/buffer.h>
-#include <odp_buffer_internal.h>
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
#include <odp/api/shared_memory.h>
@@ -28,6 +26,8 @@
#include <odp_timer_internal.h>
#include <odp/api/plat/queue_inline_types.h>
#include <odp_global_data.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_event_internal.h>
#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->s.lock))
@@ -465,7 +465,7 @@ static odp_queue_t queue_lookup(const char *name)
}
static inline int _plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
int ret, num_enq;
@@ -474,16 +474,16 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = queue->s.ring_mpmc;
- if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
return ret;
- num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)buf_hdr, num);
+ num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)event_hdr, num);
return num_enq;
}
static inline int _plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
int num_deq;
queue_entry_t *queue;
@@ -492,22 +492,22 @@ static inline int _plain_queue_deq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = queue->s.ring_mpmc;
- num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)buf_hdr, num);
+ num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)event_hdr, num);
return num_deq;
}
static int plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_enq_multi(handle, buf_hdr, num);
+ return _plain_queue_enq_multi(handle, event_hdr, num);
}
-static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _plain_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -516,27 +516,27 @@ static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_deq_multi(handle, buf_hdr, num);
+ return _plain_queue_deq_multi(handle, event_hdr, num);
}
-static odp_buffer_hdr_t *plain_queue_deq(odp_queue_t handle)
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
{
- odp_buffer_hdr_t *buf_hdr = NULL;
+ _odp_event_hdr_t *event_hdr = NULL;
int ret;
- ret = _plain_queue_deq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
if (ret == 1)
- return buf_hdr;
+ return event_hdr;
else
return NULL;
}
-static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
- (void)buf_hdr;
+ (void)event_hdr;
ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -545,10 +545,10 @@ static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int error_enqueue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n",
@@ -557,7 +557,7 @@ static int error_enqueue_multi(odp_queue_t handle,
return -1;
}
-static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
{
ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -566,9 +566,9 @@ static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
}
static int error_dequeue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n",
@@ -841,7 +841,7 @@ static void queue_print_all(void)
}
static inline int _sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
int sched = 0;
int ret;
@@ -852,12 +852,12 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_st = queue->s.ring_st;
- if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
return ret;
LOCK(queue);
- num_enq = ring_st_enq_multi(ring_st, (void **)buf_hdr, num);
+ num_enq = ring_st_enq_multi(ring_st, (void **)event_hdr, num);
if (odp_unlikely(num_enq == 0)) {
UNLOCK(queue);
@@ -921,16 +921,16 @@ int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
}
static int sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _sched_queue_enq_multi(handle, buf_hdr, num);
+ return _sched_queue_enq_multi(handle, event_hdr, num);
}
-static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _sched_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -1116,11 +1116,11 @@ static void queue_set_enq_deq_func(odp_queue_t handle,
}
static int queue_orig_multi(odp_queue_t handle,
- odp_buffer_hdr_t **buf_hdr, int num)
+ _odp_event_hdr_t **event_hdr, int num)
{
queue_entry_t *queue = qentry_from_handle(handle);
- return queue->s.orig_dequeue_multi(handle, buf_hdr, num);
+ return queue->s.orig_dequeue_multi(handle, event_hdr, num);
}
static int queue_api_enq_multi(odp_queue_t handle,
@@ -1135,7 +1135,7 @@ static int queue_api_enq_multi(odp_queue_t handle,
num = QUEUE_MULTI_MAX;
return queue->s.enqueue_multi(handle,
- (odp_buffer_hdr_t **)(uintptr_t)ev, num);
+ (_odp_event_hdr_t **)(uintptr_t)ev, num);
}
static void queue_timer_add(odp_queue_t handle)
@@ -1157,7 +1157,7 @@ static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
queue_entry_t *queue = qentry_from_handle(handle);
return queue->s.enqueue(handle,
- (odp_buffer_hdr_t *)(uintptr_t)ev);
+ (_odp_event_hdr_t *)(uintptr_t)ev);
}
static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
@@ -1168,7 +1168,7 @@ static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
- ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+ ret = queue->s.dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
if (odp_global_rw->inline_timers &&
odp_atomic_load_u64(&queue->s.num_timers))
diff --git a/platform/linux-dpdk/odp_queue_eventdev.c b/platform/linux-dpdk/odp_queue_eventdev.c
index b650874ec..b474a515c 100644
--- a/platform/linux-dpdk/odp_queue_eventdev.c
+++ b/platform/linux-dpdk/odp_queue_eventdev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019, Nokia
+/* Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,7 +13,7 @@
#include <odp/api/plat/queue_inline_types.h>
#include <odp/api/plat/ticketlock_inlines.h>
#include <odp_config_internal.h>
-#include <odp_buffer_internal.h>
+#include <odp_event_internal.h>
#include <odp_debug_internal.h>
#include <odp_libconfig_internal.h>
#include <odp_queue_if.h>
@@ -822,7 +822,7 @@ static odp_queue_t queue_lookup(const char *name)
}
static inline int _plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
int num_enq;
@@ -831,13 +831,13 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = queue->s.ring_mpmc;
- num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)buf_hdr, num);
+ num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)event_hdr, num);
return num_enq;
}
static inline int _plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
int num_deq;
queue_entry_t *queue;
@@ -846,22 +846,22 @@ static inline int _plain_queue_deq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = queue->s.ring_mpmc;
- num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)buf_hdr, num);
+ num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)event_hdr, num);
return num_deq;
}
static int plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_enq_multi(handle, buf_hdr, num);
+ return _plain_queue_enq_multi(handle, event_hdr, num);
}
-static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _plain_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -870,27 +870,27 @@ static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_deq_multi(handle, buf_hdr, num);
+ return _plain_queue_deq_multi(handle, event_hdr, num);
}
-static odp_buffer_hdr_t *plain_queue_deq(odp_queue_t handle)
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
{
- odp_buffer_hdr_t *buf_hdr = NULL;
+ _odp_event_hdr_t *event_hdr = NULL;
int ret;
- ret = _plain_queue_deq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
if (ret == 1)
- return buf_hdr;
+ return event_hdr;
else
return NULL;
}
-static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
- (void)buf_hdr;
+ (void)event_hdr;
ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -899,10 +899,10 @@ static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int error_enqueue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n",
@@ -911,7 +911,7 @@ static int error_enqueue_multi(odp_queue_t handle,
return -1;
}
-static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
{
ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -920,9 +920,9 @@ static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
}
static int error_dequeue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n",
@@ -1068,7 +1068,7 @@ static void queue_print(odp_queue_t handle)
}
static inline int _sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
struct rte_event ev[CONFIG_BURST_SIZE];
@@ -1110,7 +1110,7 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
ev[i].event_type = RTE_EVENT_TYPE_CPU;
ev[i].sub_event_type = 0;
ev[i].priority = priority;
- ev[i].mbuf = &buf_hdr[i]->mb;
+ ev[i].mbuf = &event_hdr[i]->mb;
}
num_enq = rte_event_enqueue_new_burst(dev_id, port_id, ev, num);
@@ -1119,16 +1119,16 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
}
static int sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _sched_queue_enq_multi(handle, buf_hdr, num);
+ return _sched_queue_enq_multi(handle, event_hdr, num);
}
-static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _sched_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -1271,11 +1271,11 @@ static void queue_set_enq_deq_func(odp_queue_t handle,
}
static int queue_orig_multi(odp_queue_t handle,
- odp_buffer_hdr_t **buf_hdr, int num)
+ _odp_event_hdr_t **event_hdr, int num)
{
queue_entry_t *queue = qentry_from_handle(handle);
- return queue->s.orig_dequeue_multi(handle, buf_hdr, num);
+ return queue->s.orig_dequeue_multi(handle, event_hdr, num);
}
static int queue_api_enq_multi(odp_queue_t handle,
@@ -1290,7 +1290,7 @@ static int queue_api_enq_multi(odp_queue_t handle,
num = QUEUE_MULTI_MAX;
return queue->s.enqueue_multi(handle,
- (odp_buffer_hdr_t **)(uintptr_t)ev, num);
+ (_odp_event_hdr_t **)(uintptr_t)ev, num);
}
static void queue_timer_add(odp_queue_t handle)
@@ -1312,7 +1312,7 @@ static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
queue_entry_t *queue = qentry_from_handle(handle);
return queue->s.enqueue(handle,
- (odp_buffer_hdr_t *)(uintptr_t)ev);
+ (_odp_event_hdr_t *)(uintptr_t)ev);
}
static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
@@ -1323,7 +1323,7 @@ static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
- ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+ ret = queue->s.dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
if (odp_global_rw->inline_timers &&
odp_atomic_load_u64(&queue->s.num_timers))
diff --git a/platform/linux-dpdk/odp_queue_spsc.c b/platform/linux-dpdk/odp_queue_spsc.c
index d07451042..dc74c9595 100644
--- a/platform/linux-dpdk/odp_queue_spsc.c
+++ b/platform/linux-dpdk/odp_queue_spsc.c
@@ -1,15 +1,17 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/hints.h>
-#include <odp_queue_basic_internal.h>
#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_queue_basic_internal.h>
static inline int spsc_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
ring_spsc_t ring_spsc;
@@ -22,11 +24,11 @@ static inline int spsc_enq_multi(odp_queue_t handle,
return -1;
}
- return ring_spsc_enq_multi(ring_spsc, (void **)buf_hdr, num);
+ return ring_spsc_enq_multi(ring_spsc, (void **)event_hdr, num);
}
static inline int spsc_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
ring_spsc_t ring_spsc;
@@ -39,20 +41,20 @@ static inline int spsc_deq_multi(odp_queue_t handle,
return -1;
}
- return ring_spsc_deq_multi(ring_spsc, (void **)buf_hdr, num);
+ return ring_spsc_deq_multi(ring_spsc, (void **)event_hdr, num);
}
-static int queue_spsc_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
- return spsc_enq_multi(handle, buf_hdr, num);
+ return spsc_enq_multi(handle, event_hdr, num);
}
-static int queue_spsc_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int queue_spsc_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = spsc_enq_multi(handle, &buf_hdr, 1);
+ ret = spsc_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -60,21 +62,21 @@ static int queue_spsc_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-static int queue_spsc_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
- return spsc_deq_multi(handle, buf_hdr, num);
+ return spsc_deq_multi(handle, event_hdr, num);
}
-static odp_buffer_hdr_t *queue_spsc_deq(odp_queue_t handle)
+static _odp_event_hdr_t *queue_spsc_deq(odp_queue_t handle)
{
- odp_buffer_hdr_t *buf_hdr = NULL;
+ _odp_event_hdr_t *event_hdr = NULL;
int ret;
- ret = spsc_deq_multi(handle, &buf_hdr, 1);
+ ret = spsc_deq_multi(handle, &event_hdr, 1);
if (ret == 1)
- return buf_hdr;
+ return event_hdr;
else
return NULL;
}
diff --git a/platform/linux-dpdk/odp_shared_memory.c b/platform/linux-dpdk/odp_shared_memory.c
index 645bb8847..c18c9acca 100644
--- a/platform/linux-dpdk/odp_shared_memory.c
+++ b/platform/linux-dpdk/odp_shared_memory.c
@@ -6,14 +6,17 @@
*/
#include <odp_posix_extensions.h>
-#include <odp_align_internal.h>
-#include <odp_config_internal.h>
+
#include <odp/api/debug.h>
-#include <odp_debug_internal.h>
+#include <odp/api/plat/strong_types.h>
#include <odp/api/shared_memory.h>
#include <odp/api/spinlock.h>
-#include <odp/api/plat/strong_types.h>
+
+#include <odp_align_internal.h>
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
#include <odp_shm_internal.h>
+
#include <string.h>
#include <sys/mman.h>
#include <sys/syscall.h>
@@ -24,6 +27,9 @@
#include <rte_lcore.h>
#include <rte_memzone.h>
+/* Supported ODP_SHM_* flags */
+#define SUPPORTED_SHM_FLAGS (ODP_SHM_SW_ONLY | ODP_SHM_EXPORT | ODP_SHM_HP)
+
#define SHM_MAX_ALIGN (0x80000000)
#define SHM_BLOCK_NAME "%" PRIu64 "-%d-%s"
#define SHM_MAX_NB_BLOCKS (CONFIG_INTERNAL_SHM_BLOCKS + CONFIG_SHM_BLOCKS)
@@ -226,6 +232,7 @@ int odp_shm_capability(odp_shm_capability_t *capa)
capa->max_blocks = CONFIG_SHM_BLOCKS;
capa->max_size = 0;
capa->max_align = SHM_MAX_ALIGN;
+ capa->flags = SUPPORTED_SHM_FLAGS;
return 0;
}
@@ -238,6 +245,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t mz_flags = RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY;
int idx;
+ uint32_t supported_flgs = SUPPORTED_SHM_FLAGS;
+
+ if (flags & ~supported_flgs) {
+ ODP_ERR("Unsupported SHM flag\n");
+ return ODP_SHM_INVALID;
+ }
if (align > SHM_MAX_ALIGN) {
ODP_ERR("Align too large: %" PRIu64 "\n", align);
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index 96df1bf79..d0ebd0763 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -462,6 +462,12 @@ int odp_timer_res_capability(odp_timer_clk_src_t clk_src,
return 0;
}
+void odp_timer_pool_param_init(odp_timer_pool_param_t *param)
+{
+ memset(param, 0, sizeof(odp_timer_pool_param_t));
+ param->clk_src = ODP_CLOCK_DEFAULT;
+}
+
odp_timer_pool_t odp_timer_pool_create(const char *name,
const odp_timer_pool_param_t *param)
{
@@ -959,9 +965,8 @@ void *odp_timeout_user_ptr(odp_timeout_t tmo)
odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
{
- odp_timeout_t tmo;
+ odp_event_t event;
pool_t *pool;
- int ret;
ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
@@ -969,17 +974,16 @@ odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)&tmo, 1);
-
- if (odp_likely(ret == 1))
- return tmo;
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_TIMEOUT_INVALID;
- return ODP_TIMEOUT_INVALID;
+ return odp_timeout_from_event(event);
}
void odp_timeout_free(odp_timeout_t tmo)
{
- _odp_buffer_free_multi((odp_buffer_hdr_t **)&tmo, 1);
+ _odp_event_free(odp_timeout_to_event(tmo));
}
void odp_timer_pool_print(odp_timer_pool_t timer_pool)
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 8763606ad..64245801e 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -61,6 +61,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/cpumask.h \
include-abi/odp/api/abi/crypto.h \
include-abi/odp/api/abi/debug.h \
+ include-abi/odp/api/abi/dma_types.h \
include-abi/odp/api/abi/errno.h \
include-abi/odp/api/abi/event.h \
include-abi/odp/api/abi/hash.h \
@@ -90,7 +91,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/thrmask.h \
include-abi/odp/api/abi/ticketlock.h \
include-abi/odp/api/abi/time.h \
- include-abi/odp/api/abi/timer.h \
+ include-abi/odp/api/abi/timer_types.h \
include-abi/odp/api/abi/traffic_mngr.h \
include-abi/odp/api/abi/version.h
endif
@@ -106,6 +107,7 @@ noinst_HEADERS = \
include/odp_config_internal.h \
include/odp_debug_internal.h \
include/odp_errno_define.h \
+ include/odp_event_internal.h \
include/odp_fdserver_internal.h \
include/odp_forward_typedefs_internal.h \
include/odp_global_data.h \
@@ -176,6 +178,7 @@ __LIB__libodp_linux_la_SOURCES = \
miniz/miniz_tinfl.c miniz/miniz_tinfl.h \
odp_cpumask.c \
odp_cpumask_task.c \
+ odp_dma.c \
odp_errno.c \
odp_event.c \
odp_fdserver.c \
@@ -275,6 +278,7 @@ __LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/arm/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -291,7 +295,8 @@ noinst_HEADERS += arch/arm/odp_atomic.h \
arch/arm/odp_llsc.h \
arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_AARCH64
__LIB__libodp_linux_la_SOURCES += arch/aarch64/odp_atomic.c \
@@ -299,6 +304,7 @@ __LIB__libodp_linux_la_SOURCES += arch/aarch64/odp_atomic.c \
arch/aarch64/cpu_flags.c \
arch/aarch64/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/aarch64/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/aarch64/odp/api/abi/cpu_time.h \
arch/aarch64/odp/api/abi/hash_crc32.h
@@ -314,13 +320,15 @@ noinst_HEADERS += arch/aarch64/odp_atomic.h \
arch/aarch64/odp_cpu.h \
arch/aarch64/cpu_flags.h \
arch/aarch64/odp_cpu_idling.h \
- arch/aarch64/odp_llsc.h
+ arch/aarch64/odp_llsc.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_DEFAULT
__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/default/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -333,12 +341,14 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_MIPS64
__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/mips64/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -351,13 +361,15 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_POWERPC
__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
arch/default/odp_cpu_cycles.c \
arch/default/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/powerpc/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/cpu_time.h \
arch/default/odp/api/abi/hash_crc32.h
@@ -370,7 +382,8 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
- arch/default/odp_cpu_idling.h
+ arch/default/odp_cpu_idling.h \
+ arch/default/odp_random.h
endif
if ARCH_IS_X86
__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
@@ -378,6 +391,7 @@ __LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
arch/x86/odp_cpu_cycles.c \
arch/x86/odp_global_time.c \
arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
arch/x86/odp_sysinfo_parse.c
odpapiabiarchinclude_HEADERS += arch/x86/odp/api/abi/cpu_rdtsc.h \
arch/x86/odp/api/abi/cpu_time.h \
@@ -390,6 +404,7 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
endif
noinst_HEADERS += arch/x86/cpu_flags.h \
arch/x86/odp_cpu.h \
+ arch/x86/odp_random.h \
arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
arch/default/odp_cpu_idling.h
diff --git a/platform/linux-generic/README b/platform/linux-generic/README
index 04267909b..8f41d1d45 100644
--- a/platform/linux-generic/README
+++ b/platform/linux-generic/README
@@ -41,3 +41,21 @@ SPDX-License-Identifier: BSD-3-Clause
socket
socket_mmap
tap
+
+5. Random data
+ On x86 ODP_RANDOM_TRUE type random data is generated using rdseed [1] via
+ compiler builtin functions. If OpenSSL is not available or its use for
+ generating random data is disabled with the --disable-openssl-rand
+ configure option, ODP_RANDOM_CRYPTO type random data is generated using
+ rdrand [1].
+
+ Note that there may be issues with the quality or security of rdrand and
+ rdseed. [2]
+
+6. References
+ [1] Intel Digital Random Number Generator (DRNG) Software Implementation
+ Guide. John P Mechalas, 17 October 2018.
+ https://www.intel.com/content/www/us/en/developer/articles/guide/intel-digital-random-number-generator-drng-software-implementation-guide.html
+
+ [2] RDRAND. Wikipedia, 29 September 2021.
+ https://en.wikipedia.org/wiki/RDRAND#Reception
diff --git a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
index dbb6d43af..921bafaf0 100644
--- a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
@@ -120,6 +120,10 @@ static void aarch64_part_info(char *str, int maxlen, odp_cpu_arch_arm_t *cpu_isa
snprintf(str, maxlen, "Cortex-A77");
*cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
+ case 0xd40:
+ snprintf(str, maxlen, "Neoverse V1");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_4;
+ return;
case 0xd41:
snprintf(str, maxlen, "Cortex-A78");
*cpu_isa = ODP_CPU_ARCH_ARMV8_2;
@@ -128,6 +132,18 @@ static void aarch64_part_info(char *str, int maxlen, odp_cpu_arch_arm_t *cpu_isa
snprintf(str, maxlen, "Cortex-A78AE");
*cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
+ case 0xd46:
+ snprintf(str, maxlen, "Cortex-A510");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd47:
+ snprintf(str, maxlen, "Cortex-A710");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd49:
+ snprintf(str, maxlen, "Neoverse N2");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
case 0xd4a:
snprintf(str, maxlen, "Neoverse E1");
*cpu_isa = ODP_CPU_ARCH_ARMV8_2;
diff --git a/platform/linux-generic/arch/default/odp_random.c b/platform/linux-generic/arch/default/odp_random.c
new file mode 100644
index 000000000..18d2a45d2
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_random.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_random.h>
+#include <odp/api/spec/random.h>
+
+#include <odp/visibility_begin.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void)
+{
+ return ODP_RANDOM_BASIC;
+}
+
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len)
+{
+ (void)buf;
+ (void)len;
+
+ return -1;
+}
+
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len)
+{
+ (void)buf;
+ (void)len;
+
+ return -1;
+}
+
+#include <odp/visibility_end.h>
diff --git a/platform/linux-generic/arch/default/odp_random.h b/platform/linux-generic/arch/default/odp_random.h
new file mode 100644
index 000000000..215eb6d93
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_random.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_DEFAULT_RANDOM_H_
+#define ODP_DEFAULT_RANDOM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/random.h>
+
+#include <stdint.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void);
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len);
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len);
+
+static inline odp_random_kind_t _odp_random_max_kind(void)
+{
+ return _odp_random_max_kind_generic();
+}
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_true_data_generic(buf, len);
+}
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_crypto_data_generic(buf, len);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp_random.h b/platform/linux-generic/arch/x86/odp_random.h
new file mode 100644
index 000000000..54628038e
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_random.h
@@ -0,0 +1,160 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * These functions implement ODP_RANDOM_CRYPTO random data using rdrand [1],
+ * and ODP_RANDOM_TRUE random data using rdseed [1], via compiler builtin
+ * functions.
+ *
+ * Note that there may be issues with the quality or security of rdrand and
+ * rdseed. [2]
+ *
+ * [1] Intel Digital Random Number Generator (DRNG) Software Implementation
+ * Guide. John P Mechalas, 17 October 2018.
+ * https://www.intel.com/content/www/us/en/developer/articles/guide/intel-digital-random-number-generator-drng-software-implementation-guide.html
+ *
+ * [2] RDRAND. Wikipedia, 29 September 2021.
+ * https://en.wikipedia.org/wiki/RDRAND#Reception
+ */
+
+#ifndef ODP_X86_RANDOM_H_
+#define ODP_X86_RANDOM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/random.h>
+
+#include <stdint.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void);
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len);
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len);
+
+#ifdef __RDRND__
+
+static inline int _odp_random_max_kind(void)
+{
+#ifdef __RDSEED__
+ return ODP_RANDOM_TRUE;
+#else
+ return ODP_RANDOM_CRYPTO;
+#endif
+}
+
+#else
+
+static inline int _odp_random_max_kind(void)
+{
+ return _odp_random_max_kind_generic();
+}
+
+#endif
+
+#ifdef __RDSEED__
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+#ifdef __x86_64__
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!__builtin_ia32_rdseed_di_step((unsigned long long *)buf))
+ ;
+ buf += 8;
+ }
+
+ if (len & 4) {
+ while (!__builtin_ia32_rdseed_si_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#else
+ for (uint32_t i = 0; i < len / 4; i++) {
+ while (!__builtin_ia32_rdseed_si_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#endif
+ if (len & 2) {
+ while (!__builtin_ia32_rdseed_hi_step((unsigned short int *)buf))
+ ;
+ buf += 2;
+ }
+
+ if (len & 1) {
+ uint16_t w;
+
+ while (!__builtin_ia32_rdseed_hi_step(&w))
+ ;
+ *((uint8_t *)buf) = w & 0xff;
+ }
+
+ return len;
+}
+
+#else
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_true_data_generic(buf, len);
+}
+
+#endif
+
+#ifdef __RDRND__
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+#ifdef __x86_64__
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!__builtin_ia32_rdrand64_step((unsigned long long *)buf))
+ ;
+ buf += 8;
+ }
+
+ if (len & 4) {
+ while (!__builtin_ia32_rdrand32_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#else
+ for (uint32_t i = 0; i < len / 4; i++) {
+ while (!__builtin_ia32_rdrand32_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#endif
+ if (len & 2) {
+ while (!__builtin_ia32_rdrand16_step((unsigned short int *)buf))
+ ;
+ buf += 2;
+ }
+
+ if (len & 1) {
+ uint16_t w;
+
+ while (!__builtin_ia32_rdrand16_step(&w))
+ ;
+ *((uint8_t *)buf) = w & 0xff;
+ }
+
+ return len;
+}
+
+#else
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_crypto_data_generic(buf, len);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/dma_types.h b/platform/linux-generic/include-abi/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..768591b10
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/dma_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_DMA_TYPES_H_
+#define ODP_API_ABI_DMA_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_dma
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_dma_t);
+
+#define ODP_DMA_INVALID _odp_cast_scalar(odp_dma_t, 0)
+
+typedef uint32_t odp_dma_transfer_id_t;
+
+#define ODP_DMA_TRANSFER_ID_INVALID ((odp_dma_transfer_id_t)0)
+
+typedef ODP_HANDLE_T(odp_dma_compl_t);
+
+#define ODP_DMA_COMPL_INVALID _odp_cast_scalar(odp_dma_compl_t, 0)
+
+#define ODP_DMA_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event.h b/platform/linux-generic/include-abi/odp/api/abi/event.h
index 1cbb81afe..6530ac2e4 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/event.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/event.h
@@ -35,7 +35,8 @@ typedef enum odp_event_type_t {
ODP_EVENT_CRYPTO_COMPL = 4,
ODP_EVENT_IPSEC_STATUS = 5,
ODP_EVENT_PACKET_VECTOR = 6,
- ODP_EVENT_PACKET_TX_COMPL = 7
+ ODP_EVENT_PACKET_TX_COMPL = 7,
+ ODP_EVENT_DMA_COMPL = 8,
} odp_event_type_t;
typedef enum odp_event_subtype_t {
diff --git a/platform/linux-generic/include-abi/odp/api/abi/timer.h b/platform/linux-generic/include-abi/odp/api/abi/timer_types.h
index c08da1ce3..6cfa37a36 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/timer.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/timer_types.h
@@ -11,8 +11,8 @@
* ODP timer service
*/
-#ifndef ODP_API_ABI_TIMER_H_
-#define ODP_API_ABI_TIMER_H_
+#ifndef ODP_API_ABI_TIMER_TYPES_H_
+#define ODP_API_ABI_TIMER_TYPES_H_
#ifdef __cplusplus
extern "C" {
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index dec85f9d3..e0be593d0 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -29,88 +29,40 @@ extern "C" {
#include <odp/api/byteorder.h>
#include <odp/api/thread.h>
#include <odp/api/event.h>
-#include <odp_forward_typedefs_internal.h>
+#include <odp_event_internal.h>
#include <stddef.h>
-typedef union buffer_index_t {
- uint32_t u32;
+/* Internal buffer header */
+typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
- struct {
- uint32_t pool :8;
- uint32_t buffer :24;
- };
-} buffer_index_t;
-
-/* Check that pool index fit into bit field */
-ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
-
-/* Check that buffer index fit into bit field */
-ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
-
-/* Type size limits number of flow IDs supported */
-#define BUF_HDR_MAX_FLOW_ID 255
-
-/* Common buffer header */
-struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
- /* Initial buffer data pointer */
- uint8_t *base_data;
-
- /* Pool pointer */
- void *pool_ptr;
-
- /* --- Mostly read only data --- */
- const void *user_ptr;
-
- /* Initial buffer tail pointer */
- uint8_t *buf_end;
-
- /* User area pointer */
- void *uarea_addr;
-
- /* Combined pool and buffer index */
- buffer_index_t index;
-
- /* Reference count */
- odp_atomic_u32_t ref_cnt;
-
- /* Pool type */
- int8_t type;
-
- /* Event type. Maybe different than pool type (crypto compl event) */
- int8_t event_type;
-
- /* Event flow id */
- uint8_t flow_id;
-
- /* Data or next header */
+ /* Data */
uint8_t data[];
-};
+} odp_buffer_hdr_t;
/* Buffer header size is critical for performance. Ensure that it does not accidentally
* grow over cache line size. Note that ODP_ALIGNED_CACHE rounds up struct size to a multiple of
* ODP_CACHE_LINE_SIZE. */
ODP_STATIC_ASSERT(sizeof(odp_buffer_hdr_t) <= ODP_CACHE_LINE_SIZE, "BUFFER_HDR_SIZE_ERROR");
-odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf);
-void _odp_buffer_event_type_set(odp_buffer_t buf, int ev);
-
-static inline odp_buffer_t buf_from_buf_hdr(odp_buffer_hdr_t *hdr)
+static inline odp_buffer_hdr_t *_odp_buf_hdr(odp_buffer_t buf)
{
- return (odp_buffer_t)hdr;
+ return (odp_buffer_hdr_t *)(uintptr_t)buf;
}
static inline uint32_t event_flow_id(odp_event_t ev)
{
odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
- return buf_hdr->flow_id;
+ return buf_hdr->event_hdr.flow_id;
}
static inline void event_flow_id_set(odp_event_t ev, uint32_t flow_id)
{
odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
- buf_hdr->flow_id = flow_id;
+ buf_hdr->event_hdr.flow_id = flow_id;
}
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index 899b261bd..872d6f6d5 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -54,6 +54,16 @@ extern "C" {
#define CONFIG_QUEUE_MAX_ORD_LOCKS 2
/*
+ * Maximum number of DMA sessions
+ */
+#define CONFIG_MAX_DMA_SESSIONS 32
+
+/*
+ * Stashes reserved for internal usage
+ */
+#define CONFIG_INTERNAL_STASHES CONFIG_MAX_DMA_SESSIONS
+
+/*
* Maximum number of stashes
*/
#define CONFIG_MAX_STASHES 128
diff --git a/platform/linux-generic/include/odp_event_internal.h b/platform/linux-generic/include/odp_event_internal.h
new file mode 100644
index 000000000..fa7e5f354
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_internal.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_INTERNAL_H_
+#define ODP_EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/std_types.h>
+
+#include <odp_config_internal.h>
+
+typedef union buffer_index_t {
+ uint32_t u32;
+
+ struct {
+ uint32_t pool :8;
+ uint32_t buffer :24;
+ };
+} buffer_index_t;
+
+/* Check that pool index fit into bit field */
+ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
+
+/* Check that buffer index fit into bit field */
+ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
+
+/* Type size limits number of flow IDs supported */
+#define BUF_HDR_MAX_FLOW_ID 255
+
+/* Common header for all event types without alignment constraints. */
+typedef struct _odp_event_hdr_t {
+ /* Initial buffer data pointer */
+ uint8_t *base_data;
+
+ /* Pool pointer */
+ void *pool_ptr;
+
+ /* --- Mostly read only data --- */
+ const void *user_ptr;
+
+ /* Initial buffer tail pointer */
+ uint8_t *buf_end;
+
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* Combined pool and buffer index */
+ buffer_index_t index;
+
+ /* Reference count */
+ odp_atomic_u32_t ref_cnt;
+
+ /* Pool type */
+ int8_t type;
+
+ /* Event type. Maybe different than pool type (crypto compl event) */
+ int8_t event_type;
+
+ /* Event flow id */
+ uint8_t flow_id;
+
+} _odp_event_hdr_t;
+
+static inline odp_event_t _odp_event_from_hdr(_odp_event_hdr_t *hdr)
+{
+ return (odp_event_t)hdr;
+}
+
+static inline _odp_event_hdr_t *_odp_event_hdr(odp_event_t event)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)event;
+}
+
+static inline odp_event_type_t _odp_event_type(odp_event_t event)
+{
+ return _odp_event_hdr(event)->event_type;
+}
+
+static inline void _odp_event_type_set(odp_event_t event, int ev)
+{
+ _odp_event_hdr(event)->event_type = ev;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_event_vector_internal.h b/platform/linux-generic/include/odp_event_vector_internal.h
index 2d51801df..33b26d711 100644
--- a/platform/linux-generic/include/odp_event_vector_internal.h
+++ b/platform/linux-generic/include/odp_event_vector_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2020, Nokia
+/* Copyright (c) 2020-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,18 +13,20 @@
#ifndef ODP_EVENT_VECTOR_INTERNAL_H_
#define ODP_EVENT_VECTOR_INTERNAL_H_
-#include <stdint.h>
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
#include <odp/api/packet.h>
-#include <odp_buffer_internal.h>
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpedantic"
+#include <odp_event_internal.h>
+
+#include <stdint.h>
+
/**
* Internal event vector header
*/
-typedef struct {
- /* Common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
/* Event vector size */
uint32_t size;
@@ -33,7 +35,11 @@ typedef struct {
odp_packet_t packet[];
} odp_event_vector_hdr_t;
-#pragma GCC diagnostic pop
+
+/* Vector header size is critical for performance. Ensure that it does not accidentally
+ * grow over cache line size. */
+ODP_STATIC_ASSERT(sizeof(odp_event_vector_hdr_t) <= ODP_CACHE_LINE_SIZE,
+ "EVENT_VECTOR_HDR_SIZE_ERROR");
/**
* Return the vector header
diff --git a/platform/linux-generic/include/odp_forward_typedefs_internal.h b/platform/linux-generic/include/odp_forward_typedefs_internal.h
index fc2d74857..d7e14b953 100644
--- a/platform/linux-generic/include/odp_forward_typedefs_internal.h
+++ b/platform/linux-generic/include/odp_forward_typedefs_internal.h
@@ -10,7 +10,7 @@
* ODP forward typedefs - implementation internal
*
* This needs to be a separate file because it is needed by both
- * odp_queue_internal.h and odp_buffer_internal.h and clang prohibits forward
+ * odp_queue_internal.h and odp_queue_lf.h and clang prohibits forward
* "redefining" typedefs. Note that this file can be extended with additional
* forward typedefs as needed.
*/
@@ -22,7 +22,6 @@
extern "C" {
#endif
-typedef struct odp_buffer_hdr_t odp_buffer_hdr_t;
typedef union queue_entry_u queue_entry_t;
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_global_data.h b/platform/linux-generic/include/odp_global_data.h
index 75978bd6c..c94369e9f 100644
--- a/platform/linux-generic/include/odp_global_data.h
+++ b/platform/linux-generic/include/odp_global_data.h
@@ -72,12 +72,12 @@ typedef struct odp_global_data_ro_t {
uint8_t has_config_rt;
config_t libconfig_default;
config_t libconfig_runtime;
- odp_random_kind_t ipsec_rand_kind;
/* Disabled features during global init */
struct {
uint8_t compress;
uint8_t crypto;
+ uint8_t dma;
uint8_t ipsec;
uint8_t stash;
uint8_t traffic_mngr;
diff --git a/platform/linux-generic/include/odp_init_internal.h b/platform/linux-generic/include/odp_init_internal.h
index 9754dfa31..2a1039854 100644
--- a/platform/linux-generic/include/odp_init_internal.h
+++ b/platform/linux-generic/include/odp_init_internal.h
@@ -99,6 +99,9 @@ int _odp_hash_term_global(void);
int _odp_stash_init_global(void);
int _odp_stash_term_global(void);
+int _odp_dma_init_global(void);
+int _odp_dma_term_global(void);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h
index cc224e4cc..b7fb2cbf7 100644
--- a/platform/linux-generic/include/odp_ipsec_internal.h
+++ b/platform/linux-generic/include/odp_ipsec_internal.h
@@ -171,6 +171,7 @@ struct ipsec_sa_s {
union {
unsigned flags;
struct {
+ unsigned inbound : 1;
unsigned dec_ttl : 1;
unsigned copy_dscp : 1;
unsigned copy_df : 1;
@@ -246,7 +247,28 @@ struct ipsec_sa_s {
} stats;
uint32_t next_sa;
- odp_ipsec_sa_param_t param;
+
+ /* Data stored solely for odp_ipsec_sa_info() */
+ struct {
+ odp_cipher_alg_t cipher_alg;
+ uint32_t cipher_key_len;
+ uint32_t cipher_key_extra_len;
+
+ odp_auth_alg_t auth_alg;
+ uint32_t auth_key_len;
+ uint32_t auth_key_extra_len;
+
+ uint32_t icv_len;
+ uint32_t context_len;
+ union {
+ struct {
+ uint32_t antireplay_ws;
+ } in;
+ struct{
+ uint32_t mtu;
+ } out;
+ };
+ } sa_info;
};
/**
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 62f8aea25..7c9b7735e 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -20,15 +20,16 @@ extern "C" {
#include <odp/api/align.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
#include <odp/api/packet.h>
#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
#include <odp/api/comp.h>
-#include <odp_ipsec_internal.h>
#include <odp/api/abi/packet.h>
+
+#include <odp_event_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_pool_internal.h>
#include <odp_queue_if.h>
#include <stdint.h>
@@ -73,8 +74,6 @@ typedef struct {
ODP_STATIC_ASSERT(PKT_MAX_SEGS < UINT16_MAX, "PACKET_MAX_SEGS_ERROR");
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpedantic"
/**
* Internal Packet header
*
@@ -82,9 +81,9 @@ ODP_STATIC_ASSERT(PKT_MAX_SEGS < UINT16_MAX, "PACKET_MAX_SEGS_ERROR");
* packet_init(). Because of this any new fields added must be reviewed for
* initialization requirements.
*/
-typedef struct odp_packet_hdr_t {
- /* Common buffer header (cache line aligned) */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
/* Segment data start */
uint8_t *seg_data;
@@ -153,13 +152,10 @@ typedef struct odp_packet_hdr_t {
uint8_t data[];
} odp_packet_hdr_t;
-#pragma GCC diagnostic pop
/* Packet header size is critical for performance. Ensure that it does not accidentally
- * grow over 256 bytes when cache line size is 64 bytes (or less). With larger cache line sizes,
- * the struct size is larger due to the odp_buffer_hdr_t alignment requirement. */
-ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 256 || ODP_CACHE_LINE_SIZE > 64,
- "PACKET_HDR_SIZE_ERROR");
+ * grow over 256 bytes. */
+ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 256, "PACKET_HDR_SIZE_ERROR");
/**
* Return the packet header
@@ -174,14 +170,14 @@ static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr)
return (odp_packet_t)pkt_hdr;
}
-static inline odp_buffer_hdr_t *packet_to_buf_hdr(odp_packet_t pkt)
+static inline _odp_event_hdr_t *packet_to_event_hdr(odp_packet_t pkt)
{
- return &packet_hdr(pkt)->buf_hdr;
+ return (_odp_event_hdr_t *)(uintptr_t)&packet_hdr(pkt)->event_hdr;
}
-static inline odp_packet_t packet_from_buf_hdr(odp_buffer_hdr_t *buf_hdr)
+static inline odp_packet_t packet_from_event_hdr(_odp_event_hdr_t *event_hdr)
{
- return (odp_packet_t)(odp_packet_hdr_t *)buf_hdr;
+ return (odp_packet_t)(uintptr_t)event_hdr;
}
static inline odp_packet_hdr_t *packet_last_seg(odp_packet_hdr_t *hdr)
@@ -202,7 +198,7 @@ static inline void packet_subtype_set(odp_packet_t pkt, int ev)
*/
static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
uint32_t seg_len;
int num = pkt_hdr->seg_count;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 001bdfc37..c9bae7142 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -23,6 +23,7 @@ extern "C" {
#include <odp/api/align.h>
#include <odp_buffer_internal.h>
+#include <odp_event_internal.h>
#include <odp_config_internal.h>
#include <odp_ring_ptr_internal.h>
#include <odp/api/plat/strong_types.h>
@@ -31,22 +32,22 @@ typedef struct ODP_ALIGNED_CACHE pool_cache_t {
/* Number of buffers in cache */
uint32_t cache_num;
/* Cached buffers */
- odp_buffer_hdr_t *buf_hdr[CONFIG_POOL_CACHE_MAX_SIZE];
+ _odp_event_hdr_t *event_hdr[CONFIG_POOL_CACHE_MAX_SIZE];
} pool_cache_t;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
-/* Buffer header ring */
+/* Event header ring */
typedef struct ODP_ALIGNED_CACHE {
/* Ring header */
ring_ptr_t hdr;
/* Ring data: buffer handles */
- odp_buffer_hdr_t *buf_hdr[CONFIG_POOL_MAX_NUM + 1];
+ _odp_event_hdr_t *event_hdr[CONFIG_POOL_MAX_NUM + 1];
/* Index to pointer look-up table for external memory pool */
- odp_buffer_hdr_t *buf_hdr_by_index[];
+ _odp_event_hdr_t *event_hdr_by_index[];
} pool_ring_t;
#pragma GCC diagnostic pop
@@ -88,6 +89,7 @@ typedef struct pool_t {
uint8_t *base_addr;
uint8_t *max_addr;
uint8_t *uarea_base_addr;
+ odp_pool_type_t type_2;
odp_pool_ext_param_t ext_param;
/* Used by DPDK zero-copy pktio */
@@ -140,27 +142,22 @@ static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl)
return &_odp_pool_glb->pool[_odp_typeval(pool_hdl) - 1];
}
-static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
-{
- return (odp_buffer_hdr_t *)(uintptr_t)buf;
-}
-
-static inline odp_buffer_hdr_t *buf_hdr_from_index(pool_t *pool,
- uint32_t buffer_idx)
+static inline _odp_event_hdr_t *event_hdr_from_index(pool_t *pool,
+ uint32_t event_idx)
{
uint64_t block_offset;
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
- block_offset = (buffer_idx * (uint64_t)pool->block_size) +
+ block_offset = (event_idx * (uint64_t)pool->block_size) +
pool->block_offset;
/* clang requires cast to uintptr_t */
- buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
+ event_hdr = (_odp_event_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
- return buf_hdr;
+ return event_hdr;
}
-static inline odp_buffer_hdr_t *buf_hdr_from_index_u32(uint32_t u32)
+static inline _odp_event_hdr_t *_odp_event_hdr_from_index_u32(uint32_t u32)
{
buffer_index_t index;
uint32_t pool_idx, buffer_idx;
@@ -171,12 +168,21 @@ static inline odp_buffer_hdr_t *buf_hdr_from_index_u32(uint32_t u32)
buffer_idx = index.buffer;
pool = pool_entry(pool_idx);
- return buf_hdr_from_index(pool, buffer_idx);
+ return event_hdr_from_index(pool, buffer_idx);
+}
+
+odp_event_t _odp_event_alloc(pool_t *pool);
+int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int num);
+void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_free);
+int _odp_event_is_valid(odp_event_t event);
+
+static inline void _odp_event_free(odp_event_t event)
+{
+ _odp_event_free_multi((_odp_event_hdr_t **)&event, 1);
}
-int _odp_buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int num);
-void _odp_buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_free);
-int _odp_buffer_is_valid(odp_buffer_t buf);
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_if.h b/platform/linux-generic/include/odp_queue_if.h
index fa92a4171..ed4ec4e61 100644
--- a/platform/linux-generic/include/odp_queue_if.h
+++ b/platform/linux-generic/include/odp_queue_if.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2017, ARM Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,6 +15,8 @@ extern "C" {
#include <odp/api/queue.h>
#include <odp/api/schedule.h>
#include <odp/api/packet_io.h>
+
+#include <odp_event_internal.h>
#include <odp_forward_typedefs_internal.h>
#define QUEUE_MULTI_MAX CONFIG_BURST_SIZE
@@ -22,12 +25,12 @@ typedef int (*queue_init_global_fn_t)(void);
typedef int (*queue_term_global_fn_t)(void);
typedef int (*queue_init_local_fn_t)(void);
typedef int (*queue_term_local_fn_t)(void);
-typedef int (*queue_enq_fn_t)(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr);
+typedef int (*queue_enq_fn_t)(odp_queue_t queue, _odp_event_hdr_t *event_hdr);
typedef int (*queue_enq_multi_fn_t)(odp_queue_t queue,
- odp_buffer_hdr_t **buf_hdr, int num);
-typedef odp_buffer_hdr_t *(*queue_deq_fn_t)(odp_queue_t queue);
+ _odp_event_hdr_t **event_hdr, int num);
+typedef _odp_event_hdr_t *(*queue_deq_fn_t)(odp_queue_t queue);
typedef int (*queue_deq_multi_fn_t)(odp_queue_t queue,
- odp_buffer_hdr_t **buf_hdr, int num);
+ _odp_event_hdr_t **event_hdr, int num);
typedef odp_pktout_queue_t (*queue_get_pktout_fn_t)(odp_queue_t queue);
typedef void (*queue_set_pktout_fn_t)(odp_queue_t queue, odp_pktio_t pktio,
int index);
diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h
index 9f326a9ee..6f9b85c85 100644
--- a/platform/linux-generic/include/odp_queue_scalable_internal.h
+++ b/platform/linux-generic/include/odp_queue_scalable_internal.h
@@ -17,7 +17,7 @@ extern "C" {
#include <odp/api/queue.h>
#include <odp_forward_typedefs_internal.h>
#include <odp_queue_if.h>
-#include <odp_buffer_internal.h>
+#include <odp_event_internal.h>
#include <odp_align_internal.h>
#include <odp/api/packet_io.h>
#include <odp/api/align.h>
@@ -58,10 +58,10 @@ union queue_entry_u {
uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
};
-int _odp_queue_deq(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num);
+int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num);
int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num);
int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num);
-int _odp_queue_enq_sp(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num);
+int _odp_queue_enq_sp(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num);
queue_entry_t *_odp_qentry_from_ext(odp_queue_t handle);
/* Round up memory size to next cache line size to
diff --git a/platform/linux-generic/include/odp_random_openssl_internal.h b/platform/linux-generic/include/odp_random_openssl_internal.h
index 3205a2c32..5cb9006d1 100644
--- a/platform/linux-generic/include/odp_random_openssl_internal.h
+++ b/platform/linux-generic/include/odp_random_openssl_internal.h
@@ -13,10 +13,7 @@ extern "C" {
#include <stdint.h>
-#include <odp/api/random.h>
-
-odp_random_kind_t _odp_random_openssl_max_kind(void);
-int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind);
+int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len);
int _odp_random_openssl_init_local(void);
int _odp_random_openssl_term_local(void);
diff --git a/platform/linux-generic/include/odp_random_std_internal.h b/platform/linux-generic/include/odp_random_std_internal.h
index 69f8b6d85..fb350fd22 100644
--- a/platform/linux-generic/include/odp_random_std_internal.h
+++ b/platform/linux-generic/include/odp_random_std_internal.h
@@ -13,11 +13,8 @@ extern "C" {
#include <stdint.h>
-#include <odp/api/random.h>
-
-odp_random_kind_t _odp_random_std_max_kind(void);
int32_t _odp_random_std_test_data(uint8_t *buf, uint32_t len, uint64_t *seed);
-int32_t _odp_random_std_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind);
+int32_t _odp_random_std_data(uint8_t *buf, uint32_t len);
int _odp_random_std_init_local(void);
int _odp_random_std_term_local(void);
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index a804f8c95..dddd2182d 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -13,9 +13,10 @@ extern "C" {
#endif
#include <odp/api/queue.h>
-#include <odp_queue_if.h>
#include <odp/api/schedule.h>
-#include <odp_forward_typedefs_internal.h>
+
+#include <odp_event_internal.h>
+#include <odp_queue_if.h>
#define _ODP_SCHED_ID_BASIC 0
#define _ODP_SCHED_ID_SP 1
@@ -45,8 +46,8 @@ typedef int (*schedule_create_queue_fn_t)(uint32_t queue_index,
typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_unsched_queue_fn_t)(uint32_t queue_index);
-typedef int (*schedule_ord_enq_multi_fn_t)(odp_queue_t queue,
- void *buf_hdr[], int num, int *ret);
+typedef int (*schedule_ord_enq_multi_fn_t)(odp_queue_t queue, void *event_hdr[],
+ int num, int *ret);
typedef int (*schedule_init_global_fn_t)(void);
typedef int (*schedule_term_global_fn_t)(void);
typedef int (*schedule_init_local_fn_t)(void);
@@ -87,7 +88,7 @@ extern const schedule_fn_t *_odp_sched_fn;
/* Interface for the scheduler */
int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
- odp_buffer_hdr_t *hdr_tbl[], int num);
+ _odp_event_hdr_t *hdr_tbl[], int num);
int _odp_sched_cb_pktin_poll_one(int pktio_index, int rx_queue, odp_event_t evts[]);
void _odp_sched_cb_pktio_stop_finalize(int pktio_index);
diff --git a/platform/linux-generic/include/odp_schedule_scalable.h b/platform/linux-generic/include/odp_schedule_scalable.h
index c5e6a2880..207573f4c 100644
--- a/platform/linux-generic/include/odp_schedule_scalable.h
+++ b/platform/linux-generic/include/odp_schedule_scalable.h
@@ -13,6 +13,7 @@
#include <odp/api/schedule.h>
#include <odp/api/ticketlock.h>
+#include <odp_event_internal.h>
#include <odp_schedule_scalable_config.h>
#include <odp_schedule_scalable_ordered.h>
#include <odp_llqueue.h>
@@ -74,13 +75,13 @@ typedef struct ODP_ALIGNED_CACHE {
ringidx_t prod_read SPLIT_PC;
ringidx_t prod_write;
ringidx_t prod_mask;
- odp_buffer_hdr_t **prod_ring;
+ _odp_event_hdr_t **prod_ring;
ringidx_t cons_write SPLIT_PC;
ringidx_t cons_read;
reorder_window_t *rwin;
void *user_ctx;
#ifdef CONFIG_SPLIT_PRODCONS
- odp_buffer_hdr_t **cons_ring;
+ _odp_event_hdr_t **cons_ring;
ringidx_t cons_mask;
uint16_t cons_type;
#else
diff --git a/platform/linux-generic/include/odp_schedule_scalable_ordered.h b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
index 3fa81f750..21c89bed2 100644
--- a/platform/linux-generic/include/odp_schedule_scalable_ordered.h
+++ b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
@@ -13,6 +13,7 @@
#include <odp_align_internal.h>
#include <odp_bitset.h>
+#include <odp_event_internal.h>
#include <odp_ishmpool_internal.h>
/* High level functioning of reordering
@@ -106,7 +107,7 @@ struct ODP_ALIGNED_CACHE reorder_context {
/* Number of events stored in this reorder context */
uint8_t numevts;
/* Events stored in this context */
- odp_buffer_hdr_t *events[RC_EVT_SIZE];
+ _odp_event_hdr_t *events[RC_EVT_SIZE];
queue_entry_t *destq[RC_EVT_SIZE];
};
@@ -119,6 +120,6 @@ void _odp_rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn);
void _odp_rctx_init(reorder_context_t *rctx, uint16_t idx,
reorder_window_t *rwin, uint32_t sn);
void _odp_rctx_release(reorder_context_t *rctx);
-int _odp_rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+int _odp_rctx_save(queue_entry_t *queue, _odp_event_hdr_t *event_hdr[], int num);
#endif /* ODP_SCHEDULE_SCALABLE_ORDERED_H */
diff --git a/platform/linux-generic/include/odp_sysinfo_internal.h b/platform/linux-generic/include/odp_sysinfo_internal.h
index 81bfd045f..16e4ced84 100644
--- a/platform/linux-generic/include/odp_sysinfo_internal.h
+++ b/platform/linux-generic/include/odp_sysinfo_internal.h
@@ -17,7 +17,6 @@ extern "C" {
#include <string.h>
int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo);
-uint64_t odp_cpu_hz_current(int id);
uint64_t odp_cpu_arch_hz_current(int id);
void _odp_sys_info_print_arch(void);
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index 435fa8b70..2a7173d29 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -15,19 +16,18 @@
#include <odp/api/align.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
#include <odp/api/timer.h>
+
+#include <odp_event_internal.h>
#include <odp_global_data.h>
+#include <odp_pool_internal.h>
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpedantic"
/**
* Internal Timeout header
*/
-typedef struct {
- /* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_timeout_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
/* Requested expiration time */
uint64_t expiration;
@@ -39,7 +39,6 @@ typedef struct {
odp_timer_t timer;
} odp_timeout_hdr_t;
-#pragma GCC diagnostic pop
/* A larger decrement value should be used after receiving events compared to
* an 'empty' call. */
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index ffac70414..9481deca1 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -21,6 +21,7 @@ AS_IF([test "x$with_pcap" != xno],
AM_CONDITIONAL([ODP_PKTIO_PCAP], [test x$have_pcap = xyes])
m4_include([platform/linux-generic/m4/odp_libconfig.m4])
+m4_include([platform/linux-generic/m4/odp_openssl.m4])
m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_netmap.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
@@ -31,6 +32,8 @@ AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS
# Add text to the end of configure with platform specific settings.
# Make sure it's aligned same as other lines in configure.ac.
AS_VAR_APPEND([PLAT_CFG_TEXT], ["
+ openssl: ${with_openssl}
+ openssl_rand: ${openssl_rand}
pcap: ${have_pcap}
pcapng: ${have_pcapng}
default_config_path: ${default_config_path}"])
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
index ccbf1d6f5..90b49d155 100644
--- a/platform/linux-generic/m4/odp_libconfig.m4
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [18])
+m4_define([_odp_config_version_minor], [19])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-generic/m4/odp_openssl.m4 b/platform/linux-generic/m4/odp_openssl.m4
new file mode 100644
index 000000000..c9d584c64
--- /dev/null
+++ b/platform/linux-generic/m4/odp_openssl.m4
@@ -0,0 +1,36 @@
+##########################################################################
+# Enable/disable usage of OpenSSL library
+##########################################################################
+AC_ARG_WITH([openssl],
+ [AS_HELP_STRING([--without-openssl],
+ [compile without OpenSSL (may result in disabled crypto and random support)]
+ [[default=with] (linux-generic)])],
+ [],
+ [with_openssl=yes])
+AS_IF([test "$with_openssl" != "no"],
+ [ODP_OPENSSL
+ have_openssl=1], [have_openssl=0])
+AM_CONDITIONAL([WITH_OPENSSL], [test x$with_openssl != xno])
+AC_DEFINE_UNQUOTED([_ODP_OPENSSL], [$have_openssl],
+ [Define to 1 to enable OpenSSL support])
+
+AS_IF([test "${with_openssl}" = "no"],
+ [AC_MSG_WARN([Strong cryptography is not available without OpenSSL])])
+
+##########################################################################
+# Enable/disable usage of OpenSSL for random data
+##########################################################################
+have_openssl_rand=1
+AC_ARG_ENABLE([openssl-rand],
+ [AS_HELP_STRING([--disable-openssl-rand],
+ [disable OpenSSL random data (use arch-specific instead)]
+ [[default=enabled] (linux-generic)])],
+ [if test "x$enableval" = "xno"; then
+ have_openssl_rand=0
+ fi])
+
+AS_IF([test "$have_openssl" != "1"], [have_openssl_rand=0])
+AS_IF([test "$have_openssl_rand" = "1"], [openssl_rand=yes], [openssl_rand=no])
+
+AC_DEFINE_UNQUOTED([_ODP_OPENSSL_RAND], [$have_openssl_rand],
+ [Define to 1 to enable OpenSSL support])
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index fed113923..0e606017f 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -20,16 +20,16 @@
/* Fill in buffer header field offsets for inline functions */
const _odp_buffer_inline_offset_t
_odp_buffer_inline_offset ODP_ALIGNED_CACHE = {
- .event_type = offsetof(odp_buffer_hdr_t, event_type),
- .base_data = offsetof(odp_buffer_hdr_t, base_data)
+ .event_type = offsetof(odp_buffer_hdr_t, event_hdr.event_type),
+ .base_data = offsetof(odp_buffer_hdr_t, event_hdr.base_data)
};
#include <odp/visibility_end.h>
uint32_t odp_buffer_size(odp_buffer_t buf)
{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
- pool_t *pool = hdr->pool_ptr;
+ odp_buffer_hdr_t *hdr = _odp_buf_hdr(buf);
+ pool_t *pool = hdr->event_hdr.pool_ptr;
return pool->seg_len;
}
@@ -47,12 +47,13 @@ void odp_buffer_print(odp_buffer_t buf)
return;
}
- hdr = buf_hdl_to_hdr(buf);
+ hdr = _odp_buf_hdr(buf);
len += snprintf(&str[len], n - len, "Buffer\n------\n");
- len += snprintf(&str[len], n - len, " pool index %u\n", hdr->index.pool);
- len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->index.buffer);
- len += snprintf(&str[len], n - len, " addr %p\n", (void *)hdr->base_data);
+ len += snprintf(&str[len], n - len, " pool index %u\n", hdr->event_hdr.index.pool);
+ len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->event_hdr.index.buffer);
+ len += snprintf(&str[len], n - len, " addr %p\n",
+ (void *)hdr->event_hdr.base_data);
len += snprintf(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
str[len] = 0;
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index 70b85f1cb..ba77522c3 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -47,20 +48,11 @@ int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
odp_cpumask_t overlap;
int cpu, i;
- /*
- * If no user supplied number then default to one control CPU.
- */
- if (0 == num) {
- num = 1;
- } else {
- /*
- * If user supplied number is too large, then attempt
- * to use all installed control CPUs
- */
- cpu = odp_cpumask_count(&odp_global_ro.control_cpus);
- if (cpu < num)
- num = cpu;
- }
+ /* If no user supplied number or it's too large, attempt to use all
+ * control CPUs. */
+ cpu = odp_cpumask_count(&odp_global_ro.control_cpus);
+ if (num == 0 || cpu < num)
+ num = cpu;
/* build the mask, allocating upwards from lowest numbered CPU */
odp_cpumask_zero(mask);
diff --git a/platform/linux-generic/odp_dma.c b/platform/linux-generic/odp_dma.c
new file mode 100644
index 000000000..412c33bd5
--- /dev/null
+++ b/platform/linux-generic/odp_dma.c
@@ -0,0 +1,926 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/dma.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/stash.h>
+#include <odp/api/pool.h>
+#include <odp/api/plat/std_inlines.h>
+#include <odp_global_data.h>
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+#include <odp_event_internal.h>
+#include <odp_pool_internal.h>
+
+#include <string.h>
+#include <inttypes.h>
+
+#define MAX_SESSIONS CONFIG_MAX_DMA_SESSIONS
+#define MAX_TRANSFERS 256
+#define MAX_SEGS 16
+#define MAX_SEG_LEN (128 * 1024)
+
+typedef struct segment_t {
+ void *addr;
+ uint32_t len;
+
+} segment_t;
+
+typedef struct transfer_t {
+ void *dst;
+ void *src;
+ uint32_t len;
+
+} transfer_t;
+
+typedef struct result_t {
+ void *user_ptr;
+
+} result_t;
+
+typedef struct ODP_ALIGNED_CACHE dma_session_t {
+ odp_ticketlock_t lock;
+ odp_dma_param_t dma_param;
+ uint8_t active;
+ char name[ODP_DMA_NAME_LEN];
+ odp_stash_t stash;
+ result_t result[MAX_TRANSFERS];
+
+} dma_session_t;
+
+typedef struct dma_global_t {
+ odp_shm_t shm;
+
+ /* Buffer pool capability and default parameters */
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+
+ dma_session_t session[MAX_SESSIONS];
+
+} dma_global_t;
+
+static dma_global_t *_odp_dma_glb;
+
+static inline dma_session_t *dma_session_from_handle(odp_dma_t dma)
+{
+ return (dma_session_t *)(uintptr_t)dma;
+}
+
+int odp_dma_capability(odp_dma_capability_t *capa)
+{
+ if (odp_global_ro.disable.dma) {
+ ODP_ERR("DMA is disabled\n");
+ return -1;
+ }
+
+ memset(capa, 0, sizeof(odp_dma_capability_t));
+
+ capa->max_sessions = MAX_SESSIONS;
+ capa->max_transfers = MAX_TRANSFERS;
+ capa->max_src_segs = MAX_SEGS;
+ capa->max_dst_segs = MAX_SEGS;
+ capa->max_segs = 2 * MAX_SEGS;
+ capa->max_seg_len = MAX_SEG_LEN;
+
+ capa->compl_mode_mask = ODP_DMA_COMPL_SYNC | ODP_DMA_COMPL_NONE |
+ ODP_DMA_COMPL_EVENT | ODP_DMA_COMPL_POLL;
+
+ capa->queue_type_sched = 1;
+ capa->queue_type_plain = 1;
+
+ capa->pool.max_pools = _odp_dma_glb->pool_capa.buf.max_pools;
+ capa->pool.max_num = _odp_dma_glb->pool_capa.buf.max_num;
+ capa->pool.min_cache_size = _odp_dma_glb->pool_capa.buf.min_cache_size;
+ capa->pool.max_cache_size = _odp_dma_glb->pool_capa.buf.max_cache_size;
+
+ return 0;
+}
+
+void odp_dma_param_init(odp_dma_param_t *param)
+{
+ memset(param, 0, sizeof(odp_dma_param_t));
+
+ param->direction = ODP_DMA_MAIN_TO_MAIN;
+ param->type = ODP_DMA_TYPE_COPY;
+ param->mt_mode = ODP_DMA_MT_SAFE;
+ param->order = ODP_DMA_ORDER_NONE;
+}
+
+static odp_stash_t create_stash(void)
+{
+ odp_stash_param_t stash_param;
+ odp_stash_t stash;
+ uint32_t id, tmp, i;
+ int32_t ret;
+
+ odp_stash_param_init(&stash_param);
+ stash_param.num_obj = MAX_TRANSFERS;
+ stash_param.obj_size = sizeof(uint32_t);
+ stash_param.cache_size = 0;
+
+ stash = odp_stash_create("_odp_dma_transfer_id", &stash_param);
+
+ if (stash == ODP_STASH_INVALID) {
+ ODP_ERR("Stash create failed\n");
+ return ODP_STASH_INVALID;
+ }
+
+ /* Zero is invalid ID */
+ for (id = 1; id < MAX_TRANSFERS + 1; id++) {
+ ret = odp_stash_put_u32(stash, &id, 1);
+ if (ret != 1) {
+ ODP_ERR("Stash put failed: %i, %u\n", ret, id);
+ break;
+ }
+ }
+
+ if (ret != 1) {
+ for (i = 0; i < id; i++) {
+ if (odp_stash_get_u32(stash, &tmp, 1) != 1) {
+ ODP_ERR("Stash get failed: %u\n", i);
+ break;
+ }
+ }
+
+ if (odp_stash_destroy(stash))
+ ODP_ERR("Stash destroy failed\n");
+
+ return ODP_STASH_INVALID;
+ }
+
+ return stash;
+}
+
+static int destroy_stash(odp_stash_t stash)
+{
+ uint32_t tmp;
+ int32_t num;
+ int ret = 0;
+
+ while (1) {
+ num = odp_stash_get_u32(stash, &tmp, 1);
+
+ if (num == 1)
+ continue;
+
+ if (num == 0)
+ break;
+
+ ODP_ERR("Stash get failed: %i\n", num);
+ ret = -1;
+ break;
+ }
+
+ if (odp_stash_destroy(stash)) {
+ ODP_ERR("Stash destroy failed\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+odp_dma_t odp_dma_create(const char *name, const odp_dma_param_t *param)
+{
+ odp_dma_capability_t dma_capa;
+ int i;
+ dma_session_t *session = NULL;
+
+ if (odp_global_ro.disable.dma) {
+ ODP_ERR("DMA is disabled\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if ((param->direction != ODP_DMA_MAIN_TO_MAIN) ||
+ (param->type != ODP_DMA_TYPE_COPY)) {
+ ODP_ERR("Bad DMA parameter\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask == 0) {
+ ODP_ERR("Empty compl mode mask\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (odp_dma_capability(&dma_capa)) {
+ ODP_ERR("DMA capa failed\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask & ~dma_capa.compl_mode_mask) {
+ ODP_ERR("Compl mode not supported\n");
+ return ODP_DMA_INVALID;
+ }
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (_odp_dma_glb->session[i].active)
+ continue;
+
+ odp_ticketlock_lock(&_odp_dma_glb->session[i].lock);
+
+ if (_odp_dma_glb->session[i].active) {
+ odp_ticketlock_unlock(&_odp_dma_glb->session[i].lock);
+ continue;
+ }
+
+ session = &_odp_dma_glb->session[i];
+ session->active = 1;
+ odp_ticketlock_unlock(&_odp_dma_glb->session[i].lock);
+ break;
+ }
+
+ if (session == NULL) {
+ ODP_DBG("Out of DMA sessions\n");
+ return ODP_DMA_INVALID;
+ }
+
+ session->stash = ODP_STASH_INVALID;
+
+ /* Create stash for transfer IDs */
+ if (param->compl_mode_mask & ODP_DMA_COMPL_POLL) {
+ session->stash = create_stash();
+
+ if (session->stash == ODP_STASH_INVALID)
+ return ODP_DMA_INVALID;
+ }
+
+ session->name[0] = 0;
+
+ if (name) {
+ strncpy(session->name, name, ODP_DMA_NAME_LEN - 1);
+ session->name[ODP_DMA_NAME_LEN - 1] = 0;
+ }
+
+ session->dma_param = *param;
+
+ return (odp_dma_t)session;
+}
+
+int odp_dma_destroy(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ int ret = 0;
+
+ if (dma == ODP_DMA_INVALID) {
+ ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (session->stash != ODP_STASH_INVALID)
+ if (destroy_stash(session->stash))
+ ret = -1;
+
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->active == 0) {
+ ODP_ERR("Session not created\n");
+ odp_ticketlock_unlock(&session->lock);
+ return -1;
+ }
+
+ session->active = 0;
+ odp_ticketlock_unlock(&session->lock);
+
+ return ret;
+}
+
+odp_dma_t odp_dma_lookup(const char *name)
+{
+ dma_session_t *session;
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ session = &_odp_dma_glb->session[i];
+
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->active == 0) {
+ odp_ticketlock_unlock(&session->lock);
+ continue;
+ }
+
+ if (strcmp(session->name, name) == 0) {
+ /* found it */
+ odp_ticketlock_unlock(&session->lock);
+ return (odp_dma_t)session;
+ }
+ odp_ticketlock_unlock(&session->lock);
+ }
+
+ return ODP_DMA_INVALID;
+}
+
+void odp_dma_transfer_param_init(odp_dma_transfer_param_t *trs_param)
+{
+ memset(trs_param, 0, sizeof(odp_dma_transfer_param_t));
+
+ trs_param->src_format = ODP_DMA_FORMAT_ADDR;
+ trs_param->dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param->num_src = 1;
+ trs_param->num_dst = 1;
+}
+
+static uint32_t transfer_len(const odp_dma_transfer_param_t *trs_param)
+{
+ uint32_t i;
+ uint32_t src_len = 0;
+ uint32_t dst_len = 0;
+
+ for (i = 0; i < trs_param->num_src; i++)
+ src_len += trs_param->src_seg[i].len;
+
+ for (i = 0; i < trs_param->num_dst; i++)
+ dst_len += trs_param->dst_seg[i].len;
+
+ if (src_len != dst_len)
+ return 0;
+
+ return src_len;
+}
+
+static inline void segment_raw(segment_t seg[], int num, const odp_dma_seg_t *dma_seg)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ seg[i].addr = dma_seg[i].addr;
+ seg[i].len = dma_seg[i].len;
+ }
+}
+
+static inline int segment_pkt(segment_t seg[], int num_seg, const odp_dma_seg_t *dma_seg)
+{
+ odp_packet_t pkt;
+ uint32_t offset;
+ void *addr;
+ uint32_t seg_len, tot_len, len;
+ int i;
+ int num = 0;
+
+ for (i = 0; i < num_seg; i++) {
+ pkt = dma_seg[i].packet;
+ offset = dma_seg[i].offset;
+ tot_len = dma_seg[i].len;
+
+ if (odp_unlikely(offset + tot_len > odp_packet_len(pkt))) {
+ ODP_ERR("Bad packet segment len/offset (%u/%u)\n", tot_len, offset);
+ return 0;
+ }
+
+ while (tot_len) {
+ addr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ if (odp_unlikely(addr == NULL)) {
+ ODP_ERR("Bad packet offset %u\n", offset);
+ return 0;
+ }
+
+ seg[num].addr = addr;
+ len = tot_len;
+ if (tot_len > seg_len)
+ len = seg_len;
+
+ seg[num].len = len;
+
+ tot_len -= len;
+ offset += len;
+ num++;
+
+ if (odp_unlikely(num >= MAX_SEGS)) {
+ ODP_ERR("Too many packet segments\n");
+ return 0;
+ }
+ }
+ }
+
+ return num;
+}
+
+static int transfer_table(transfer_t *trs, const segment_t src_seg[], const segment_t dst_seg[],
+ int max_num, uint32_t tot_len)
+{
+ uint32_t len, src_len, dst_len;
+ uint8_t *src_ptr, *dst_ptr;
+ int i;
+ int src = 0;
+ int dst = 0;
+
+ src_ptr = src_seg[0].addr;
+ dst_ptr = dst_seg[0].addr;
+ src_len = src_seg[0].len;
+ dst_len = dst_seg[0].len;
+
+ len = src_len;
+ if (dst_len < src_len)
+ len = dst_len;
+
+ for (i = 0; i < max_num; i++) {
+ trs[i].src = src_ptr;
+ trs[i].dst = dst_ptr;
+ trs[i].len = len;
+ tot_len -= len;
+
+ if (tot_len == 0)
+ break;
+
+ if (dst_len < src_len) {
+ dst++;
+ dst_ptr = dst_seg[dst].addr;
+ dst_len = dst_seg[dst].len;
+ src_ptr += len;
+ src_len -= len;
+ } else if (src_len < dst_len) {
+ src++;
+ src_ptr = src_seg[src].addr;
+ src_len = src_seg[src].len;
+ dst_ptr += len;
+ dst_len -= len;
+ } else { /* equal lengths */
+ dst++;
+ src++;
+ dst_ptr = dst_seg[dst].addr;
+ dst_len = dst_seg[dst].len;
+ src_ptr = src_seg[src].addr;
+ src_len = src_seg[src].len;
+ }
+
+ len = src_len;
+ if (dst_len < src_len)
+ len = dst_len;
+ }
+
+ return i + 1;
+}
+
+int odp_dma_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *transfer,
+ odp_dma_result_t *result)
+{
+ int num, i;
+ uint32_t tot_len;
+ dma_session_t *session = dma_session_from_handle(dma);
+ int num_src, num_dst;
+ const int max_num = 2 * MAX_SEGS;
+ transfer_t trs[max_num];
+ segment_t src[MAX_SEGS];
+ segment_t dst[MAX_SEGS];
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(session->active == 0)) {
+ ODP_ERR("Session not created\n");
+ return -1;
+ }
+
+ if (odp_unlikely(transfer->num_src == 0 || transfer->num_src > MAX_SEGS)) {
+ ODP_ERR("Bad number of src segments\n");
+ return -1;
+ }
+
+ if (odp_unlikely(transfer->num_dst == 0 || transfer->num_dst > MAX_SEGS)) {
+ ODP_ERR("Bad number of dst segments\n");
+ return -1;
+ }
+
+ tot_len = transfer_len(transfer);
+
+ if (odp_unlikely(tot_len == 0)) {
+ ODP_ERR("Bad transfer length\n");
+ return -1;
+ }
+
+ if (transfer->src_format == ODP_DMA_FORMAT_ADDR) {
+ num_src = transfer->num_src;
+ segment_raw(src, num_src, transfer->src_seg);
+ } else {
+ num_src = segment_pkt(src, transfer->num_src, transfer->src_seg);
+
+ if (odp_unlikely(num_src == 0))
+ return -1;
+ }
+
+ if (transfer->dst_format == ODP_DMA_FORMAT_ADDR) {
+ num_dst = transfer->num_dst;
+ segment_raw(dst, num_dst, transfer->dst_seg);
+ } else {
+ num_dst = segment_pkt(dst, transfer->num_dst, transfer->dst_seg);
+
+ if (odp_unlikely(num_dst == 0))
+ return -1;
+ }
+
+ num = transfer_table(trs, src, dst, max_num, tot_len);
+
+ if (odp_unlikely(num > max_num)) {
+ ODP_ERR("Segment table error\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++)
+ memcpy(trs[i].dst, trs[i].src, trs[i].len);
+
+ if (result) {
+ memset(result, 0, sizeof(odp_dma_result_t));
+ result->success = 1;
+ }
+
+ return 1;
+}
+
+int odp_dma_transfer_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ odp_dma_result_t *result[], int num)
+{
+ int i;
+ odp_dma_result_t *res = NULL;
+ int ret = 0;
+
+ if (odp_unlikely(num < 1)) {
+ ODP_ERR("Bad number of transfers\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (result)
+ res = result[i];
+
+ ret = odp_dma_transfer(dma, trs_param[i], res);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+void odp_dma_compl_param_init(odp_dma_compl_param_t *compl_param)
+{
+ memset(compl_param, 0, sizeof(odp_dma_compl_param_t));
+ compl_param->queue = ODP_QUEUE_INVALID;
+ compl_param->event = ODP_EVENT_INVALID;
+ compl_param->transfer_id = ODP_DMA_TRANSFER_ID_INVALID;
+}
+
+odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma)
+{
+ int32_t num;
+ uint32_t id;
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ num = odp_stash_get_u32(session->stash, &id, 1);
+
+ if (odp_unlikely(num != 1))
+ return ODP_DMA_TRANSFER_ID_INVALID;
+
+ return id;
+}
+
+void odp_dma_transfer_id_free(odp_dma_t dma, odp_dma_transfer_id_t transfer_id)
+{
+ int32_t num;
+ dma_session_t *session = dma_session_from_handle(dma);
+ uint32_t id = transfer_id;
+
+ num = odp_stash_put_u32(session->stash, &id, 1);
+
+ if (odp_unlikely(num != 1))
+ ODP_ERR("Stash put failed\n");
+}
+
+static inline uint32_t index_from_transfer_id(odp_dma_transfer_id_t transfer_id)
+{
+ return transfer_id - 1;
+}
+
+int odp_dma_transfer_start(odp_dma_t dma, const odp_dma_transfer_param_t *transfer,
+ const odp_dma_compl_param_t *compl)
+{
+ int ret;
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ /* Check completion mode */
+ switch (compl->compl_mode) {
+ case ODP_DMA_COMPL_NONE:
+ break;
+ case ODP_DMA_COMPL_POLL:
+ if (compl->transfer_id == ODP_DMA_TRANSFER_ID_INVALID ||
+ compl->transfer_id > MAX_TRANSFERS) {
+ ODP_ERR("Bad transfer ID: %u\n", compl->transfer_id);
+ return -1;
+ }
+ break;
+ case ODP_DMA_COMPL_EVENT:
+ if (compl->event == ODP_EVENT_INVALID ||
+ compl->queue == ODP_QUEUE_INVALID) {
+ ODP_ERR("Bad event or queue\n");
+ return -1;
+ }
+ break;
+ default:
+ ODP_ERR("Bad completion mode %u\n", compl->compl_mode);
+ return -1;
+ }
+
+ ret = odp_dma_transfer(dma, transfer, NULL);
+
+ if (odp_unlikely(ret < 1))
+ return ret;
+
+ if (compl->compl_mode == ODP_DMA_COMPL_POLL) {
+ uint32_t index = index_from_transfer_id(compl->transfer_id);
+
+ session->result[index].user_ptr = compl->user_ptr;
+
+ } else if (compl->compl_mode == ODP_DMA_COMPL_EVENT) {
+ odp_dma_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)compl->event;
+
+ if (odp_unlikely(odp_event_type(compl->event) != ODP_EVENT_DMA_COMPL)) {
+ ODP_ERR("Bad completion event type\n");
+ return -1;
+ }
+
+ result = odp_buffer_addr(buf);
+ result->success = 1;
+ result->user_ptr = compl->user_ptr;
+
+ if (odp_unlikely(odp_queue_enq(compl->queue, compl->event))) {
+ ODP_ERR("Completion event enqueue failed %" PRIu64 "\n",
+ odp_queue_to_u64(compl->queue));
+ return -1;
+ }
+ }
+
+ return 1;
+}
+
+int odp_dma_transfer_start_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ const odp_dma_compl_param_t *compl_param[], int num)
+{
+ int i;
+ int ret = 0;
+
+ if (odp_unlikely(num < 1)) {
+ ODP_ERR("Bad number of transfers\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = odp_dma_transfer_start(dma, trs_param[i], compl_param[i]);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+int odp_dma_transfer_done(odp_dma_t dma, odp_dma_transfer_id_t transfer_id,
+ odp_dma_result_t *result)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(transfer_id == ODP_DMA_TRANSFER_ID_INVALID ||
+ transfer_id > MAX_TRANSFERS)) {
+ ODP_ERR("Bad transfer ID: %u\n", transfer_id);
+ return -1;
+ }
+
+ if (result) {
+ uint32_t index = index_from_transfer_id(transfer_id);
+
+ result->success = 1;
+ result->user_ptr = session->result[index].user_ptr;
+ }
+
+ return 1;
+}
+
+void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param)
+{
+ memset(pool_param, 0, sizeof(odp_dma_pool_param_t));
+
+ pool_param->cache_size = _odp_dma_glb->pool_param.buf.cache_size;
+}
+
+odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *dma_pool_param)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ uint32_t num = dma_pool_param->num;
+ uint32_t cache_size = dma_pool_param->cache_size;
+
+ if (num > _odp_dma_glb->pool_capa.buf.max_num) {
+ ODP_ERR("Too many DMA completion events: %u\n", num);
+ return ODP_POOL_INVALID;
+ }
+
+ if (cache_size < _odp_dma_glb->pool_capa.buf.min_cache_size ||
+ cache_size > _odp_dma_glb->pool_capa.buf.max_cache_size) {
+ ODP_ERR("Bad cache size: %u\n", cache_size);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = num;
+ pool_param.buf.cache_size = cache_size;
+ pool_param.buf.size = sizeof(odp_dma_result_t);
+
+ pool = _odp_pool_create(name, &pool_param, ODP_POOL_DMA_COMPL);
+
+ return pool;
+}
+
+odp_dma_compl_t odp_dma_compl_alloc(odp_pool_t pool)
+{
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_dma_result_t *result;
+
+ buf = odp_buffer_alloc(pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return ODP_DMA_COMPL_INVALID;
+
+ result = odp_buffer_addr(buf);
+ memset(result, 0, sizeof(odp_dma_result_t));
+
+ ev = odp_buffer_to_event(buf);
+ _odp_event_type_set(ev, ODP_EVENT_DMA_COMPL);
+
+ return (odp_dma_compl_t)(uintptr_t)buf;
+}
+
+void odp_dma_compl_free(odp_dma_compl_t dma_compl)
+{
+ odp_event_t ev;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)dma_compl;
+
+ if (odp_unlikely(dma_compl == ODP_DMA_COMPL_INVALID)) {
+ ODP_ERR("Bad DMA compl handle\n");
+ return;
+ }
+
+ ev = odp_buffer_to_event(buf);
+ _odp_event_type_set(ev, ODP_EVENT_BUFFER);
+
+ odp_buffer_free(buf);
+}
+
+odp_dma_compl_t odp_dma_compl_from_event(odp_event_t ev)
+{
+ return (odp_dma_compl_t)(uintptr_t)ev;
+}
+
+odp_event_t odp_dma_compl_to_event(odp_dma_compl_t dma_compl)
+{
+ return (odp_event_t)(uintptr_t)dma_compl;
+}
+
+int odp_dma_compl_result(odp_dma_compl_t dma_compl, odp_dma_result_t *result_out)
+{
+ odp_dma_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)dma_compl;
+
+ if (odp_unlikely(dma_compl == ODP_DMA_COMPL_INVALID)) {
+ ODP_ERR("Bad DMA compl handle\n");
+ return -1;
+ }
+
+ result = odp_buffer_addr(buf);
+
+ if (result_out)
+ *result_out = *result;
+
+ return result->success ? 0 : -1;
+}
+
+uint64_t odp_dma_to_u64(odp_dma_t dma)
+{
+ return (uint64_t)(uintptr_t)dma;
+}
+
+uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl)
+{
+ return (uint64_t)(uintptr_t)dma_compl;
+}
+
+void odp_dma_print(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ if (dma == ODP_DMA_INVALID) {
+ ODP_ERR("Bad DMA handle\n");
+ return;
+ }
+
+ ODP_PRINT("\nDMA info\n");
+ ODP_PRINT("--------\n");
+ ODP_PRINT(" DMA handle 0x%" PRIx64 "\n", odp_dma_to_u64(dma));
+ ODP_PRINT(" name %s\n", session->name);
+ ODP_PRINT("\n");
+}
+
+void odp_dma_compl_print(odp_dma_compl_t dma_compl)
+{
+ odp_dma_result_t result;
+ int ret;
+
+ if (dma_compl == ODP_DMA_COMPL_INVALID) {
+ ODP_ERR("Bad DMA compl handle\n");
+ return;
+ }
+
+ ret = odp_dma_compl_result(dma_compl, &result);
+
+ ODP_PRINT("\nDMA completion\n");
+ ODP_PRINT("--------------\n");
+ ODP_PRINT(" Compl event handle: 0x%" PRIx64 "\n", (uint64_t)(uintptr_t)dma_compl);
+
+ if (ret == 0) {
+ ODP_PRINT(" Result: %s\n", result.success ? "success" : "fail");
+ ODP_PRINT(" User pointer: 0x%" PRIx64 "\n",
+ (uint64_t)(uintptr_t)result.user_ptr);
+ } else {
+ ODP_PRINT(" No result metadata\n");
+ }
+
+ ODP_PRINT("\n");
+}
+
+int _odp_dma_init_global(void)
+{
+ odp_shm_t shm;
+ int i;
+
+ if (odp_global_ro.disable.dma) {
+ ODP_PRINT("DMA is DISABLED\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_dma_global", sizeof(dma_global_t), ODP_CACHE_LINE_SIZE, 0);
+ _odp_dma_glb = odp_shm_addr(shm);
+
+ if (_odp_dma_glb == NULL) {
+ ODP_ERR("SHM reserve failed\n");
+ return -1;
+ }
+
+ memset(_odp_dma_glb, 0, sizeof(dma_global_t));
+ _odp_dma_glb->shm = shm;
+
+ odp_pool_param_init(&_odp_dma_glb->pool_param);
+
+ if (odp_pool_capability(&_odp_dma_glb->pool_capa)) {
+ ODP_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ for (i = 0; i < MAX_SESSIONS; i++)
+ odp_ticketlock_init(&_odp_dma_glb->session[i].lock);
+
+ return 0;
+}
+
+int _odp_dma_term_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.dma)
+ return 0;
+
+ if (_odp_dma_glb == NULL)
+ return 0;
+
+ shm = _odp_dma_glb->shm;
+
+ if (odp_shm_free(shm)) {
+ ODP_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c
index 5398442d6..b78881cb4 100644
--- a/platform/linux-generic/odp_event.c
+++ b/platform/linux-generic/odp_event.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020, Nokia
+ * Copyright (c) 2020-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -15,6 +15,7 @@
#include <odp_ipsec_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
+#include <odp_event_internal.h>
#include <odp_event_vector_internal.h>
/* Inlined API functions */
@@ -24,8 +25,7 @@
odp_event_subtype_t odp_event_subtype(odp_event_t event)
{
- if (_odp_buffer_event_type(odp_buffer_from_event(event)) !=
- ODP_EVENT_PACKET)
+ if (_odp_event_type(event) != ODP_EVENT_PACKET)
return ODP_EVENT_NO_SUBTYPE;
return odp_packet_subtype(odp_packet_from_event(event));
@@ -34,8 +34,7 @@ odp_event_subtype_t odp_event_subtype(odp_event_t event)
odp_event_type_t odp_event_types(odp_event_t event,
odp_event_subtype_t *subtype)
{
- odp_buffer_t buf = odp_buffer_from_event(event);
- odp_event_type_t event_type = _odp_buffer_event_type(buf);
+ odp_event_type_t event_type = _odp_event_type(event);
*subtype = event_type == ODP_EVENT_PACKET ?
odp_packet_subtype(odp_packet_from_event(event)) :
@@ -75,6 +74,9 @@ void odp_event_free(odp_event_t event)
case ODP_EVENT_IPSEC_STATUS:
_odp_ipsec_status_free(_odp_ipsec_status_from_event(event));
break;
+ case ODP_EVENT_DMA_COMPL:
+ odp_dma_compl_free(odp_dma_compl_from_event(event));
+ break;
default:
ODP_ABORT("Invalid event type: %d\n", odp_event_type(event));
}
@@ -100,13 +102,10 @@ uint64_t odp_event_to_u64(odp_event_t hdl)
int odp_event_is_valid(odp_event_t event)
{
- odp_buffer_t buf;
-
if (event == ODP_EVENT_INVALID)
return 0;
- buf = odp_buffer_from_event(event);
- if (_odp_buffer_is_valid(buf) == 0)
+ if (_odp_event_is_valid(event) == 0)
return 0;
switch (odp_event_type(event)) {
@@ -121,6 +120,8 @@ int odp_event_is_valid(odp_event_t event)
case ODP_EVENT_IPSEC_STATUS:
/* Fall through */
case ODP_EVENT_PACKET_VECTOR:
+ /* Fall through */
+ case ODP_EVENT_DMA_COMPL:
break;
default:
return 0;
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 18646dc08..0c49946b0 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -44,16 +45,21 @@ enum init_stage {
IPSEC_EVENTS_INIT,
IPSEC_SAD_INIT,
IPSEC_INIT,
+ DMA_INIT,
ALL_INIT /* All init stages completed */
};
odp_global_data_ro_t odp_global_ro;
odp_global_data_rw_t *odp_global_rw;
+/* odp_init_local() call status */
+static __thread uint8_t init_local_called;
+
static void disable_features(odp_global_data_ro_t *global_ro,
const odp_init_t *init_param)
{
int disable_ipsec, disable_crypto;
+ int disable_dma;
if (init_param == NULL)
return;
@@ -66,7 +72,13 @@ static void disable_features(odp_global_data_ro_t *global_ro,
if (disable_ipsec && disable_crypto)
global_ro->disable.crypto = 1;
- global_ro->disable.stash = init_param->not_used.feat.stash;
+ disable_dma = init_param->not_used.feat.dma;
+ global_ro->disable.dma = disable_dma;
+
+ /* DMA uses stash. Disable stash only when both are disabled. */
+ if (disable_dma && init_param->not_used.feat.stash)
+ global_ro->disable.stash = 1;
+
global_ro->disable.traffic_mngr = init_param->not_used.feat.tm;
global_ro->disable.compress = init_param->not_used.feat.compress;
}
@@ -119,6 +131,13 @@ static int term_global(enum init_stage stage)
switch (stage) {
case ALL_INIT:
+ case DMA_INIT:
+ if (_odp_dma_term_global()) {
+ ODP_ERR("ODP DMA term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
case IPSEC_INIT:
if (_odp_ipsec_term_global()) {
ODP_ERR("ODP IPsec term failed.\n");
@@ -458,6 +477,12 @@ int odp_init_global(odp_instance_t *instance,
}
stage = IPSEC_INIT;
+ if (_odp_dma_init_global()) {
+ ODP_ERR("ODP DMA init failed.\n");
+ goto init_failed;
+ }
+ stage = DMA_INIT;
+
*instance = (odp_instance_t)odp_global_ro.main_pid;
return 0;
@@ -560,6 +585,13 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
goto init_fail;
}
+ /* Detect if odp_init_local() has been already called from this thread */
+ if (getpid() == odp_global_ro.main_pid && init_local_called) {
+ ODP_ERR("%s() called multiple times by the same thread\n", __func__);
+ goto init_fail;
+ }
+ init_local_called = 1;
+
if (_odp_ishm_init_local()) {
ODP_ERR("ODP ishm local init failed.\n");
goto init_fail;
@@ -623,6 +655,13 @@ init_fail:
int odp_term_local(void)
{
+ /* Check that odp_init_local() has been called by this thread */
+ if (!init_local_called) {
+ ODP_ERR("%s() called by a non-initialized thread\n", __func__);
+ return -1;
+ }
+ init_local_called = 0;
+
return term_local(ALL_INIT);
}
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 3bd524c3c..e28611849 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -186,12 +186,38 @@ int odp_ipsec_capability(odp_ipsec_capability_t *capa)
return 0;
}
+static int cipher_requires_randomness(odp_cipher_alg_t cipher)
+{
+ int ret;
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ case ODP_CIPHER_ALG_AES_CTR:
+#if ODP_DEPRECATED_API
+ case ODP_CIPHER_ALG_AES128_GCM:
+#endif
+ case ODP_CIPHER_ALG_AES_GCM:
+ case ODP_CIPHER_ALG_AES_CCM:
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ ret = 0;
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
int odp_ipsec_cipher_capability(odp_cipher_alg_t cipher,
odp_ipsec_cipher_capability_t capa[], int num)
{
uint32_t req_iv_len;
int rc, i, out, max_capa;
+ if (odp_random_max_kind() < ODP_RANDOM_CRYPTO &&
+ cipher_requires_randomness(cipher))
+ return 0;
+
max_capa = odp_crypto_cipher_capability(cipher, NULL, 0);
if (max_capa <= 0)
return max_capa;
@@ -1254,7 +1280,7 @@ static int ipsec_random_data(uint8_t *data, uint32_t len)
uint32_t rnd_len;
rnd_len = odp_random_data(buffer, IPSEC_RANDOM_BUF_SIZE,
- odp_global_ro.ipsec_rand_kind);
+ ODP_RANDOM_CRYPTO);
if (odp_unlikely(rnd_len != IPSEC_RANDOM_BUF_SIZE))
return -1;
memcpy(data, &buffer[0], len);
@@ -2435,10 +2461,6 @@ int _odp_ipsec_init_global(void)
memset(&default_out_opt, 0, sizeof(default_out_opt));
- odp_global_ro.ipsec_rand_kind = ODP_RANDOM_CRYPTO;
- if (odp_global_ro.ipsec_rand_kind > odp_random_max_kind())
- odp_global_ro.ipsec_rand_kind = odp_random_max_kind();
-
return 0;
}
diff --git a/platform/linux-generic/odp_ipsec_events.c b/platform/linux-generic/odp_ipsec_events.c
index a199ffdf3..f229a5cf2 100644
--- a/platform/linux-generic/odp_ipsec_events.c
+++ b/platform/linux-generic/odp_ipsec_events.c
@@ -93,7 +93,7 @@ static odp_event_t ipsec_status_to_event(ipsec_status_t status)
static ipsec_status_hdr_t *ipsec_status_hdr_from_buf(odp_buffer_t buf)
{
- return (ipsec_status_hdr_t *)(void *)buf_hdl_to_hdr(buf);
+ return (ipsec_status_hdr_t *)(void *)_odp_buf_hdr(buf);
}
static ipsec_status_hdr_t *ipsec_status_hdr(ipsec_status_t status)
@@ -110,7 +110,7 @@ static ipsec_status_t odp_ipsec_status_alloc(void)
if (odp_unlikely(buf == ODP_BUFFER_INVALID))
return ODP_IPSEC_STATUS_INVALID;
- _odp_buffer_event_type_set(buf, ODP_EVENT_IPSEC_STATUS);
+ _odp_event_type_set(odp_buffer_to_event(buf), ODP_EVENT_IPSEC_STATUS);
return _odp_ipsec_status_from_event(odp_buffer_to_event(buf));
}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 756370516..50f2bb360 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -480,6 +480,24 @@ static int ipsec_antireplay_init(ipsec_sa_t *ipsec_sa,
return 0;
}
+static void store_sa_info(ipsec_sa_t *ipsec_sa, const odp_ipsec_sa_param_t *p)
+{
+ ipsec_sa->sa_info.cipher_alg = p->crypto.cipher_alg;
+ ipsec_sa->sa_info.cipher_key_len = p->crypto.cipher_key.length;
+ ipsec_sa->sa_info.cipher_key_extra_len = p->crypto.cipher_key.length;
+ ipsec_sa->sa_info.auth_alg = p->crypto.auth_alg;
+ ipsec_sa->sa_info.auth_key_len = p->crypto.auth_key.length;
+ ipsec_sa->sa_info.auth_key_extra_len = p->crypto.auth_key_extra.length;
+
+ ipsec_sa->sa_info.icv_len = p->crypto.icv_len;
+ ipsec_sa->sa_info.context_len = p->context_len;
+
+ if (p->dir == ODP_IPSEC_DIR_INBOUND)
+ ipsec_sa->sa_info.in.antireplay_ws = p->inbound.antireplay_ws;
+ else
+ ipsec_sa->sa_info.out.mtu = p->outbound.mtu;
+}
+
odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
{
ipsec_sa_t *ipsec_sa;
@@ -487,12 +505,18 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
odp_crypto_ses_create_err_t ses_create_rc;
const odp_crypto_key_t *salt_param = NULL;
+ if (!odp_ipsec_cipher_capability(param->crypto.cipher_alg, NULL, 0) ||
+ !odp_ipsec_auth_capability(param->crypto.auth_alg, NULL, 0))
+ return ODP_IPSEC_SA_INVALID;
+
ipsec_sa = ipsec_sa_reserve();
if (NULL == ipsec_sa) {
ODP_ERR("No more free SA\n");
return ODP_IPSEC_SA_INVALID;
}
+ store_sa_info(ipsec_sa, param);
+
ipsec_sa->proto = param->proto;
ipsec_sa->spi = param->spi;
ipsec_sa->context = param->context;
@@ -506,6 +530,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->esn = param->opt.esn;
if (ODP_IPSEC_DIR_INBOUND == param->dir) {
+ ipsec_sa->inbound = 1;
ipsec_sa->lookup_mode = param->inbound.lookup_mode;
if (ODP_IPSEC_LOOKUP_DSTADDR_SPI == ipsec_sa->lookup_mode) {
ipsec_sa->in.lookup_ver =
@@ -551,20 +576,6 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_pkts, 0);
odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_bytes, 0);
- /* Copy application provided parameter values. */
- ipsec_sa->param = *param;
-
- /* Set all the key related pointers and ip address pointers to null. */
- ipsec_sa->param.crypto.cipher_key.data = NULL;
- ipsec_sa->param.crypto.cipher_key_extra.data = NULL;
- ipsec_sa->param.crypto.auth_key.data = NULL;
- ipsec_sa->param.crypto.auth_key_extra.data = NULL;
- ipsec_sa->param.inbound.lookup_param.dst_addr = NULL;
- ipsec_sa->param.outbound.tunnel.ipv4.src_addr = NULL;
- ipsec_sa->param.outbound.tunnel.ipv4.dst_addr = NULL;
- ipsec_sa->param.outbound.tunnel.ipv6.src_addr = NULL;
- ipsec_sa->param.outbound.tunnel.ipv6.dst_addr = NULL;
-
if (ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode &&
ODP_IPSEC_DIR_OUTBOUND == param->dir) {
if (ODP_IPSEC_TUNNEL_IPV4 == param->outbound.tunnel.type) {
@@ -1161,22 +1172,35 @@ void _odp_ipsec_sa_stats_pkts(ipsec_sa_t *sa, odp_ipsec_stats_t *stats)
static void ipsec_out_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
{
+ odp_ipsec_tunnel_param_t *tun_param = &sa_info->param.outbound.tunnel;
+
+ tun_param->type = ipsec_sa->tun_ipv4 ? ODP_IPSEC_TUNNEL_IPV4 :
+ ODP_IPSEC_TUNNEL_IPV6;
+ tun_param->ipv4.dscp = ipsec_sa->out.tun_ipv4.param.dscp;
+ tun_param->ipv4.df = ipsec_sa->out.tun_ipv4.param.df;
+ tun_param->ipv4.ttl = ipsec_sa->out.tun_ipv4.param.ttl;
+ tun_param->ipv6.flabel = ipsec_sa->out.tun_ipv6.param.flabel;
+ tun_param->ipv6.dscp = ipsec_sa->out.tun_ipv6.param.dscp;
+ tun_param->ipv6.hlimit = ipsec_sa->out.tun_ipv6.param.hlimit;
+
+ sa_info->param.outbound.frag_mode = ipsec_sa->out.frag_mode;
+ sa_info->param.outbound.mtu = ipsec_sa->sa_info.out.mtu;
+
sa_info->outbound.seq_num =
(uint64_t)odp_atomic_load_u64(&ipsec_sa->hot.out.seq) - 1;
- if (ipsec_sa->param.mode == ODP_IPSEC_MODE_TUNNEL) {
+ if (ipsec_sa->mode == ODP_IPSEC_MODE_TUNNEL) {
uint8_t *src, *dst;
- if (ipsec_sa->param.outbound.tunnel.type ==
- ODP_IPSEC_TUNNEL_IPV4) {
+ if (ipsec_sa->tun_ipv4) {
src = sa_info->outbound.tunnel.ipv4.src_addr;
dst = sa_info->outbound.tunnel.ipv4.dst_addr;
memcpy(src, &ipsec_sa->out.tun_ipv4.src_ip,
ODP_IPV4_ADDR_SIZE);
memcpy(dst, &ipsec_sa->out.tun_ipv4.dst_ip,
ODP_IPV4_ADDR_SIZE);
- sa_info->param.outbound.tunnel.ipv4.src_addr = src;
- sa_info->param.outbound.tunnel.ipv4.dst_addr = dst;
+ tun_param->ipv4.src_addr = src;
+ tun_param->ipv4.dst_addr = dst;
} else {
src = sa_info->outbound.tunnel.ipv6.src_addr;
dst = sa_info->outbound.tunnel.ipv6.dst_addr;
@@ -1184,8 +1208,8 @@ static void ipsec_out_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info
ODP_IPV6_ADDR_SIZE);
memcpy(dst, &ipsec_sa->out.tun_ipv6.dst_ip,
ODP_IPV6_ADDR_SIZE);
- sa_info->param.outbound.tunnel.ipv6.src_addr = src;
- sa_info->param.outbound.tunnel.ipv6.dst_addr = dst;
+ tun_param->ipv6.src_addr = src;
+ tun_param->ipv6.dst_addr = dst;
}
}
}
@@ -1194,9 +1218,16 @@ static void ipsec_in_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
{
uint8_t *dst = sa_info->inbound.lookup_param.dst_addr;
+ sa_info->param.inbound.lookup_mode = ipsec_sa->lookup_mode;
+ sa_info->param.inbound.lookup_param.ip_version = ipsec_sa->in.lookup_ver;
+ sa_info->param.inbound.lookup_param.dst_addr = dst;
+ sa_info->param.inbound.antireplay_ws = ipsec_sa->sa_info.in.antireplay_ws;
+ sa_info->param.inbound.pipeline = ODP_IPSEC_PIPELINE_NONE;
+ sa_info->param.inbound.dest_cos = ODP_COS_INVALID;
+ sa_info->param.inbound.reassembly_en = false;
+
if (ipsec_sa->lookup_mode == ODP_IPSEC_LOOKUP_DSTADDR_SPI) {
- if (ipsec_sa->param.inbound.lookup_param.ip_version ==
- ODP_IPSEC_IPV4)
+ if (ipsec_sa->in.lookup_ver == ODP_IPSEC_IPV4)
memcpy(dst, &ipsec_sa->in.lookup_dst_ipv4,
ODP_IPV4_ADDR_SIZE);
else
@@ -1216,6 +1247,7 @@ static void ipsec_in_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
int odp_ipsec_sa_info(odp_ipsec_sa_t sa, odp_ipsec_sa_info_t *sa_info)
{
ipsec_sa_t *ipsec_sa;
+ odp_ipsec_sa_param_t *param;
ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
@@ -1223,13 +1255,46 @@ int odp_ipsec_sa_info(odp_ipsec_sa_t sa, odp_ipsec_sa_info_t *sa_info)
ODP_ASSERT(sa_info != NULL);
memset(sa_info, 0, sizeof(*sa_info));
-
- sa_info->param = ipsec_sa->param;
-
- if (ipsec_sa->param.dir == ODP_IPSEC_DIR_OUTBOUND)
- ipsec_out_sa_info(ipsec_sa, sa_info);
- else
+ param = &sa_info->param;
+
+ param->dir = ipsec_sa->inbound ? ODP_IPSEC_DIR_INBOUND :
+ ODP_IPSEC_DIR_OUTBOUND;
+ param->proto = ipsec_sa->proto;
+ param->mode = ipsec_sa->mode;
+
+ param->crypto.cipher_alg = ipsec_sa->sa_info.cipher_alg;
+ param->crypto.cipher_key.data = NULL;
+ param->crypto.cipher_key.length = ipsec_sa->sa_info.cipher_key_len;
+ param->crypto.cipher_key_extra.data = NULL;
+ param->crypto.cipher_key_extra.length = ipsec_sa->sa_info.cipher_key_extra_len;
+ param->crypto.auth_alg = ipsec_sa->sa_info.auth_alg;
+ param->crypto.auth_key.data = NULL;
+ param->crypto.auth_key.length = ipsec_sa->sa_info.auth_key_len;
+ param->crypto.auth_key_extra.data = NULL;
+ param->crypto.auth_key_extra.length = ipsec_sa->sa_info.auth_key_extra_len;
+ param->crypto.icv_len = ipsec_sa->sa_info.icv_len;
+
+ param->opt.esn = ipsec_sa->esn;
+ param->opt.udp_encap = ipsec_sa->udp_encap;
+ param->opt.copy_dscp = ipsec_sa->copy_dscp;
+ param->opt.copy_flabel = ipsec_sa->copy_flabel;
+ param->opt.copy_df = ipsec_sa->copy_df;
+ param->opt.dec_ttl = ipsec_sa->dec_ttl;
+
+ param->lifetime.soft_limit.bytes = ipsec_sa->soft_limit_bytes;
+ param->lifetime.soft_limit.packets = ipsec_sa->soft_limit_packets;
+ param->lifetime.hard_limit.bytes = ipsec_sa->hard_limit_bytes;
+ param->lifetime.hard_limit.packets = ipsec_sa->hard_limit_packets;
+
+ param->spi = ipsec_sa->spi;
+ param->dest_queue = ipsec_sa->queue;
+ param->context = ipsec_sa->context;
+ param->context_len = ipsec_sa->sa_info.context_len;
+
+ if (ipsec_sa->inbound)
ipsec_in_sa_info(ipsec_sa, sa_info);
+ else
+ ipsec_out_sa_info(ipsec_sa, sa_info);
return 0;
}
diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c
index a10b9d5df..dea4d56f0 100644
--- a/platform/linux-generic/odp_ishm.c
+++ b/platform/linux-generic/odp_ishm.c
@@ -1071,7 +1071,8 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
/* Get system page sizes: page_hp_size is 0 if no huge page available*/
page_sz = odp_sys_page_size();
- page_hp_size = odp_sys_huge_page_size();
+ /* Use normal pages if ODP_SHM_NO_HP was used */
+ page_hp_size = (user_flags & ODP_SHM_NO_HP) ? 0 : odp_sys_huge_page_size();
/* grab a new entry: */
for (new_index = 0; new_index < ISHM_MAX_NB_BLOCKS; new_index++) {
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index ed5d81952..a15508ca2 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -21,6 +21,8 @@
#include <odp/api/plat/pktio_inlines.h>
#include <odp/api/proto_stats.h>
+#include <odp_event_internal.h>
+
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -45,11 +47,11 @@ const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
.frame_len = offsetof(odp_packet_hdr_t, frame_len),
.headroom = offsetof(odp_packet_hdr_t, headroom),
.tailroom = offsetof(odp_packet_hdr_t, tailroom),
- .pool = offsetof(odp_packet_hdr_t, buf_hdr.pool_ptr),
+ .pool = offsetof(odp_packet_hdr_t, event_hdr.pool_ptr),
.input = offsetof(odp_packet_hdr_t, input),
.seg_count = offsetof(odp_packet_hdr_t, seg_count),
- .user_ptr = offsetof(odp_packet_hdr_t, buf_hdr.user_ptr),
- .user_area = offsetof(odp_packet_hdr_t, buf_hdr.uarea_addr),
+ .user_ptr = offsetof(odp_packet_hdr_t, event_hdr.user_ptr),
+ .user_area = offsetof(odp_packet_hdr_t, event_hdr.uarea_addr),
.l2_offset = offsetof(odp_packet_hdr_t, p.l2_offset),
.l3_offset = offsetof(odp_packet_hdr_t, p.l3_offset),
.l4_offset = offsetof(odp_packet_hdr_t, p.l4_offset),
@@ -81,11 +83,6 @@ ODP_STATIC_ASSERT(ODP_TIMEOUT_INVALID == 0, "Timeout invalid not 0");
#pragma GCC diagnostic pop
#endif
-static inline odp_buffer_t packet_to_buffer(odp_packet_t pkt)
-{
- return (odp_buffer_t)pkt;
-}
-
static inline odp_packet_hdr_t *packet_seg_to_hdr(odp_packet_seg_t seg)
{
return (odp_packet_hdr_t *)(uintptr_t)seg;
@@ -144,7 +141,7 @@ static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr)
static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_seg)
{
- odp_buffer_hdr_t *hdr = &pkt_seg->buf_hdr;
+ _odp_event_hdr_t *hdr = &pkt_seg->event_hdr;
pool_t *pool = hdr->pool_ptr;
uint8_t *base = hdr->base_data;
uint8_t *head = pkt_seg->seg_data;
@@ -154,7 +151,7 @@ static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_seg)
static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_seg)
{
- odp_buffer_hdr_t *hdr = &pkt_seg->buf_hdr;
+ _odp_event_hdr_t *hdr = &pkt_seg->event_hdr;
uint8_t *tail = pkt_seg->seg_data + pkt_seg->seg_len;
return hdr->buf_end - tail;
@@ -196,9 +193,9 @@ static inline void packet_seg_copy_md(odp_packet_hdr_t *dst,
if (src->p.flags.payload_off)
dst->payload_offset = src->payload_offset;
- /* buffer header side packet metadata */
- dst->buf_hdr.user_ptr = src->buf_hdr.user_ptr;
- dst->buf_hdr.uarea_addr = src->buf_hdr.uarea_addr;
+ /* event header side packet metadata */
+ dst->event_hdr.user_ptr = src->event_hdr.user_ptr;
+ dst->event_hdr.uarea_addr = src->event_hdr.uarea_addr;
/* segmentation data is not copied:
* seg_next
@@ -314,13 +311,13 @@ static inline void link_segments(odp_packet_hdr_t *pkt_hdr[], int num)
int cur = 0;
odp_packet_hdr_t *hdr;
odp_packet_hdr_t *head = pkt_hdr[0];
- uint32_t seg_len = ((pool_t *)(head->buf_hdr.pool_ptr))->seg_len;
+ uint32_t seg_len = ((pool_t *)(head->event_hdr.pool_ptr))->seg_len;
while (1) {
- odp_buffer_hdr_t *buf_hdr = &pkt_hdr[cur]->buf_hdr;
+ _odp_event_hdr_t *event_hdr = &pkt_hdr[cur]->event_hdr;
hdr = pkt_hdr[cur];
- hdr->seg_data = buf_hdr->base_data;
+ hdr->seg_data = event_hdr->base_data;
hdr->seg_len = seg_len;
/* init_segments() handles first seg ref_cnt init */
@@ -328,7 +325,7 @@ static inline void link_segments(odp_packet_hdr_t *pkt_hdr[], int num)
uint32_t prev_ref;
odp_atomic_u32_t *ref_cnt;
- ref_cnt = &pkt_hdr[cur]->buf_hdr.ref_cnt;
+ ref_cnt = &pkt_hdr[cur]->event_hdr.ref_cnt;
prev_ref = odp_atomic_fetch_inc_u32(ref_cnt);
ODP_ASSERT(prev_ref == 0);
@@ -353,10 +350,10 @@ static inline void init_segments(odp_packet_hdr_t *pkt_hdr[], int num)
/* First segment is the packet descriptor */
hdr = pkt_hdr[0];
- seg_len = ((pool_t *)(hdr->buf_hdr.pool_ptr))->seg_len;
+ seg_len = ((pool_t *)(hdr->event_hdr.pool_ptr))->seg_len;
/* Defaults for single segment packet */
- hdr->seg_data = hdr->buf_hdr.base_data;
+ hdr->seg_data = hdr->event_hdr.base_data;
hdr->seg_len = seg_len;
hdr->seg_next = NULL;
@@ -364,7 +361,7 @@ static inline void init_segments(odp_packet_hdr_t *pkt_hdr[], int num)
if (ODP_DEBUG == 1) {
uint32_t prev_ref =
- odp_atomic_fetch_inc_u32(&hdr->buf_hdr.ref_cnt);
+ odp_atomic_fetch_inc_u32(&hdr->event_hdr.ref_cnt);
ODP_ASSERT(prev_ref == 0);
}
@@ -377,10 +374,10 @@ static inline void init_segments(odp_packet_hdr_t *pkt_hdr[], int num)
static inline void reset_segments(odp_packet_hdr_t *pkt_hdr)
{
void *base;
- uint32_t seg_len = ((pool_t *)(pkt_hdr->buf_hdr.pool_ptr))->seg_len;
+ uint32_t seg_len = ((pool_t *)(pkt_hdr->event_hdr.pool_ptr))->seg_len;
while (pkt_hdr != NULL) {
- base = pkt_hdr->buf_hdr.base_data;
+ base = pkt_hdr->event_hdr.base_data;
pkt_hdr->seg_len = seg_len;
pkt_hdr->seg_data = base;
@@ -417,11 +414,11 @@ static inline odp_packet_hdr_t *alloc_segments(pool_t *pool, int num)
odp_packet_hdr_t *pkt_hdr[num];
int ret;
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)pkt_hdr, num);
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)pkt_hdr, num);
if (odp_unlikely(ret != num)) {
if (ret > 0)
- _odp_buffer_free_multi((odp_buffer_hdr_t **)pkt_hdr, ret);
+ _odp_event_free_multi((_odp_event_hdr_t **)pkt_hdr, ret);
return NULL;
}
@@ -477,25 +474,25 @@ static inline odp_packet_hdr_t *add_segments(odp_packet_hdr_t *pkt_hdr,
return pkt_hdr;
}
-static inline void buffer_ref_inc(odp_buffer_hdr_t *buf_hdr)
+static inline void segment_ref_inc(_odp_event_hdr_t *event_hdr)
{
- uint32_t ref_cnt = odp_atomic_load_u32(&buf_hdr->ref_cnt);
+ uint32_t ref_cnt = odp_atomic_load_u32(&event_hdr->ref_cnt);
/* First count increment after alloc */
if (odp_likely(ref_cnt == 0))
- odp_atomic_store_u32(&buf_hdr->ref_cnt, 2);
+ odp_atomic_store_u32(&event_hdr->ref_cnt, 2);
else
- odp_atomic_inc_u32(&buf_hdr->ref_cnt);
+ odp_atomic_inc_u32(&event_hdr->ref_cnt);
}
-static inline uint32_t buffer_ref_dec(odp_buffer_hdr_t *buf_hdr)
+static inline uint32_t segment_ref_dec(_odp_event_hdr_t *event_hdr)
{
- return odp_atomic_fetch_dec_u32(&buf_hdr->ref_cnt);
+ return odp_atomic_fetch_dec_u32(&event_hdr->ref_cnt);
}
-static inline uint32_t buffer_ref(odp_buffer_hdr_t *buf_hdr)
+static inline uint32_t segment_ref(_odp_event_hdr_t *event_hdr)
{
- return odp_atomic_load_u32(&buf_hdr->ref_cnt);
+ return odp_atomic_load_u32(&event_hdr->ref_cnt);
}
static inline int is_multi_ref(uint32_t ref_cnt)
@@ -503,17 +500,7 @@ static inline int is_multi_ref(uint32_t ref_cnt)
return (ref_cnt > 1);
}
-static inline void packet_ref_inc(odp_packet_hdr_t *pkt_hdr)
-{
- odp_packet_hdr_t *hdr = pkt_hdr;
-
- while (hdr != NULL) {
- buffer_ref_inc(&hdr->buf_hdr);
- hdr = hdr->seg_next;
- }
-}
-
-static inline void packet_free_multi(odp_buffer_hdr_t *hdr[], int num)
+static inline void packet_free_multi(_odp_event_hdr_t *hdr[], int num)
{
int i;
uint32_t ref_cnt;
@@ -521,10 +508,10 @@ static inline void packet_free_multi(odp_buffer_hdr_t *hdr[], int num)
for (i = 0; i < num; i++) {
/* Zero when reference API has not been used */
- ref_cnt = buffer_ref(hdr[i]);
+ ref_cnt = segment_ref(hdr[i]);
if (odp_unlikely(ref_cnt)) {
- ref_cnt = buffer_ref_dec(hdr[i]);
+ ref_cnt = segment_ref_dec(hdr[i]);
if (is_multi_ref(ref_cnt)) {
num_ref++;
@@ -540,21 +527,21 @@ static inline void packet_free_multi(odp_buffer_hdr_t *hdr[], int num)
num -= num_ref;
if (odp_likely(num))
- _odp_buffer_free_multi(hdr, num);
+ _odp_event_free_multi(hdr, num);
}
static inline void free_all_segments(odp_packet_hdr_t *pkt_hdr, int num)
{
int i;
- odp_buffer_hdr_t *buf_hdr[num];
+ _odp_event_hdr_t *event_hdr[num];
odp_packet_hdr_t *seg_hdr = pkt_hdr;
for (i = 0; i < num; i++) {
- buf_hdr[i] = &seg_hdr->buf_hdr;
+ event_hdr[i] = &seg_hdr->event_hdr;
seg_hdr = seg_hdr->seg_next;
}
- packet_free_multi(buf_hdr, num);
+ packet_free_multi(event_hdr, num);
}
static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
@@ -566,14 +553,14 @@ static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
int num_remain = pkt_hdr->seg_count - num;
odp_packet_hdr_t *hdr = pkt_hdr;
odp_packet_hdr_t *last_hdr = packet_last_seg(pkt_hdr);
- odp_buffer_hdr_t *buf_hdr[num];
+ _odp_event_hdr_t *event_hdr[num];
if (head) {
odp_packet_hdr_t *new_hdr;
for (i = 0; i < num; i++) {
seg_hdr = packet_seg_step(&hdr);
- buf_hdr[i] = &seg_hdr->buf_hdr;
+ event_hdr[i] = &seg_hdr->event_hdr;
}
/* The first remaining header is the new packet descriptor.
@@ -597,7 +584,7 @@ static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
pkt_hdr = new_hdr;
- packet_free_multi(buf_hdr, num);
+ packet_free_multi(event_hdr, num);
} else {
/* Free last 'num' bufs.
* First, find the last remaining header. */
@@ -608,10 +595,10 @@ static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
for (i = 0; i < num; i++) {
seg_hdr = packet_seg_step(&hdr);
- buf_hdr[i] = &seg_hdr->buf_hdr;
+ event_hdr[i] = &seg_hdr->event_hdr;
}
- packet_free_multi(buf_hdr, num);
+ packet_free_multi(event_hdr, num);
/* Head segment remains, no need to copy or update majority
* of the metadata. */
@@ -637,8 +624,8 @@ static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt,
odp_packet_hdr_t *hdr_next;
odp_packet_hdr_t *hdr;
- num_buf = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)pkt_hdr,
- max_buf);
+ num_buf = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)pkt_hdr,
+ max_buf);
/* Failed to allocate all segments */
if (odp_unlikely(num_buf != max_buf)) {
@@ -648,10 +635,10 @@ static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt,
num_free = num_buf - (num * num_seg);
if (num_free > 0) {
- odp_buffer_hdr_t **p;
+ _odp_event_hdr_t **p;
- p = (odp_buffer_hdr_t **)&pkt_hdr[num_buf - num_free];
- _odp_buffer_free_multi(p, num_free);
+ p = (_odp_event_hdr_t **)&pkt_hdr[num_buf - num_free];
+ _odp_event_free_multi(p, num_free);
}
if (num == 0)
@@ -744,12 +731,12 @@ void odp_packet_free(odp_packet_t pkt)
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
int num_seg = pkt_hdr->seg_count;
- ODP_ASSERT(buffer_ref(&pkt_hdr->buf_hdr) > 0);
+ ODP_ASSERT(segment_ref(&pkt_hdr->event_hdr) > 0);
if (odp_likely(num_seg == 1)) {
- odp_buffer_hdr_t *buf_hdr = &pkt_hdr->buf_hdr;
+ _odp_event_hdr_t *event_hdr = &pkt_hdr->event_hdr;
- packet_free_multi(&buf_hdr, 1);
+ packet_free_multi(&event_hdr, 1);
} else {
free_all_segments(pkt_hdr, num_seg);
}
@@ -757,7 +744,7 @@ void odp_packet_free(odp_packet_t pkt)
void odp_packet_free_multi(const odp_packet_t pkt[], int num)
{
- odp_buffer_hdr_t *buf_hdr[num];
+ _odp_event_hdr_t *event_hdr[num];
int i;
int num_freed = 0;
@@ -765,7 +752,7 @@ void odp_packet_free_multi(const odp_packet_t pkt[], int num)
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt[i]);
int num_seg = pkt_hdr->seg_count;
- ODP_ASSERT(buffer_ref(&pkt_hdr->buf_hdr) > 0);
+ ODP_ASSERT(segment_ref(&pkt_hdr->event_hdr) > 0);
if (odp_unlikely(num_seg > 1)) {
free_all_segments(pkt_hdr, num_seg);
@@ -773,11 +760,11 @@ void odp_packet_free_multi(const odp_packet_t pkt[], int num)
continue;
}
- buf_hdr[i - num_freed] = &pkt_hdr->buf_hdr;
+ event_hdr[i - num_freed] = &pkt_hdr->event_hdr;
}
if (odp_likely(num - num_freed))
- packet_free_multi(buf_hdr, num - num_freed);
+ packet_free_multi(event_hdr, num - num_freed);
}
void odp_packet_free_sp(const odp_packet_t pkt[], int num)
@@ -788,7 +775,7 @@ void odp_packet_free_sp(const odp_packet_t pkt[], int num)
int odp_packet_reset(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *const pkt_hdr = packet_hdr(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
int num = pkt_hdr->seg_count;
int num_req;
@@ -837,7 +824,7 @@ int odp_event_filter_packet(const odp_event_t event[],
uint32_t odp_packet_buf_len(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
return pool->max_seg_len * pkt_hdr->seg_count;
}
@@ -869,7 +856,7 @@ int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len,
int ret = 0;
if (len > headroom) {
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
int num;
void *ptr;
@@ -978,7 +965,7 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len,
ODP_ASSERT(odp_packet_has_ref(*pkt) == 0);
if (len > tailroom) {
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
int num;
void *ptr;
@@ -1089,7 +1076,7 @@ void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
return;
}
- pkt_hdr->buf_hdr.user_ptr = ptr;
+ pkt_hdr->event_hdr.user_ptr = ptr;
pkt_hdr->p.flags.user_ptr_set = 1;
}
@@ -1222,7 +1209,7 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = pkt_hdr->frame_len;
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
odp_packet_t newpkt;
if (offset > pktlen)
@@ -1252,7 +1239,7 @@ int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = pkt_hdr->frame_len;
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
odp_packet_t newpkt;
if (offset + len >= pktlen)
@@ -1284,7 +1271,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
uint32_t shift;
uint32_t seglen = 0; /* GCC */
odp_packet_hdr_t *pkt_hdr = packet_hdr(*pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
void *addr = packet_map(pkt_hdr, offset, &seglen, NULL);
uint64_t uaddr = (uint64_t)(uintptr_t)addr;
uint64_t misalign;
@@ -1326,8 +1313,8 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
{
odp_packet_hdr_t *dst_hdr = packet_hdr(*dst);
odp_packet_hdr_t *src_hdr = packet_hdr(src);
- pool_t *dst_pool = dst_hdr->buf_hdr.pool_ptr;
- pool_t *src_pool = src_hdr->buf_hdr.pool_ptr;
+ pool_t *dst_pool = dst_hdr->event_hdr.pool_ptr;
+ pool_t *src_pool = src_hdr->event_hdr.pool_ptr;
uint32_t dst_len = dst_hdr->frame_len;
uint32_t src_len = src_hdr->frame_len;
@@ -1589,8 +1576,8 @@ void odp_packet_print(odp_packet_t pkt)
odp_packet_hdr_t *hdr = packet_hdr(pkt);
len += snprintf(&str[len], n - len, "Packet\n------\n");
- len += snprintf(&str[len], n - len, " pool index %u\n", hdr->buf_hdr.index.pool);
- len += snprintf(&str[len], n - len, " buf index %u\n", hdr->buf_hdr.index.buffer);
+ len += snprintf(&str[len], n - len, " pool index %u\n", hdr->event_hdr.index.pool);
+ len += snprintf(&str[len], n - len, " buf index %u\n", hdr->event_hdr.index.buffer);
len += snprintf(&str[len], n - len, " ev subtype %i\n", hdr->subtype);
len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
hdr->p.input_flags.all);
@@ -1627,7 +1614,7 @@ void odp_packet_print(odp_packet_t pkt)
for (int seg_idx = 0; seg != ODP_PACKET_SEG_INVALID; seg_idx++) {
odp_packet_hdr_t *seg_hdr = packet_seg_to_hdr(seg);
- odp_buffer_hdr_t *buf_hdr = &seg_hdr->buf_hdr;
+ _odp_event_hdr_t *event_hdr = &seg_hdr->event_hdr;
char seg_str[max_len];
int str_len;
@@ -1637,7 +1624,7 @@ void odp_packet_print(odp_packet_t pkt)
seg_idx,
odp_packet_seg_data_len(pkt, seg),
odp_packet_seg_data(pkt, seg),
- buffer_ref(buf_hdr));
+ segment_ref(event_hdr));
/* Prevent print buffer overflow */
if (n - len - str_len < 10) {
@@ -1663,14 +1650,14 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
int len = 0;
int n = max_len - 1;
uint32_t data_len = odp_packet_len(pkt);
- pool_t *pool = hdr->buf_hdr.pool_ptr;
+ pool_t *pool = hdr->event_hdr.pool_ptr;
len += snprintf(&str[len], n - len, "Packet\n------\n");
len += snprintf(&str[len], n - len,
" pool index %" PRIu32 "\n", pool->pool_idx);
len += snprintf(&str[len], n - len,
" buf index %" PRIu32 "\n",
- hdr->buf_hdr.index.buffer);
+ hdr->event_hdr.index.buffer);
len += snprintf(&str[len], n - len,
" seg_count %" PRIu16 "\n", hdr->seg_count);
len += snprintf(&str[len], n - len,
@@ -1721,11 +1708,11 @@ int odp_packet_is_valid(odp_packet_t pkt)
if (pkt == ODP_PACKET_INVALID)
return 0;
- if (_odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
- return 0;
-
ev = odp_packet_to_event(pkt);
+ if (_odp_event_is_valid(ev) == 0)
+ return 0;
+
if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
@@ -1757,8 +1744,8 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
{
odp_packet_hdr_t *srchdr = packet_hdr(srcpkt);
odp_packet_hdr_t *dsthdr = packet_hdr(dstpkt);
- pool_t *src_pool = srchdr->buf_hdr.pool_ptr;
- pool_t *dst_pool = dsthdr->buf_hdr.pool_ptr;
+ pool_t *src_pool = srchdr->event_hdr.pool_ptr;
+ pool_t *dst_pool = dsthdr->event_hdr.pool_ptr;
uint32_t src_uarea_size = src_pool->param_uarea_size;
uint32_t dst_uarea_size = dst_pool->param_uarea_size;
@@ -1766,10 +1753,10 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
dsthdr->dst_queue = srchdr->dst_queue;
dsthdr->cos = srchdr->cos;
dsthdr->cls_mark = srchdr->cls_mark;
- dsthdr->buf_hdr.user_ptr = srchdr->buf_hdr.user_ptr;
- if (dsthdr->buf_hdr.uarea_addr != NULL &&
- srchdr->buf_hdr.uarea_addr != NULL) {
- memcpy(dsthdr->buf_hdr.uarea_addr, srchdr->buf_hdr.uarea_addr,
+ dsthdr->event_hdr.user_ptr = srchdr->event_hdr.user_ptr;
+ if (dsthdr->event_hdr.uarea_addr != NULL &&
+ srchdr->event_hdr.uarea_addr != NULL) {
+ memcpy(dsthdr->event_hdr.uarea_addr, srchdr->event_hdr.uarea_addr,
dst_uarea_size <= src_uarea_size ? dst_uarea_size :
src_uarea_size);
}
@@ -2790,7 +2777,10 @@ odp_packet_t odp_packet_ref_static(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- packet_ref_inc(pkt_hdr);
+ while (pkt_hdr != NULL) {
+ segment_ref_inc(&pkt_hdr->event_hdr);
+ pkt_hdr = pkt_hdr->seg_next;
+ }
return pkt;
}
@@ -2844,14 +2834,14 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
int odp_packet_has_ref(odp_packet_t pkt)
{
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t ref_cnt;
while (pkt_hdr != NULL) {
- buf_hdr = &pkt_hdr->buf_hdr;
+ event_hdr = &pkt_hdr->event_hdr;
- ref_cnt = buffer_ref(buf_hdr);
+ ref_cnt = segment_ref(event_hdr);
if (is_multi_ref(ref_cnt))
return 1;
@@ -3048,7 +3038,7 @@ static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
{
odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
if (odp_unlikely(pool->pool_ext == 0)) {
@@ -3062,7 +3052,7 @@ void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
{
odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
return pool->ext_param.pkt.buf_size - head_offset;
@@ -3114,7 +3104,7 @@ uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], ui
uint32_t i;
odp_packet_seg_t seg;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool_t *pool = pkt_hdr->event_hdr.pool_ptr;
uint32_t num_segs = odp_packet_num_segs(pkt);
if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index bd8bb58e8..a3a3f1c19 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -61,9 +61,9 @@ static inline pktio_entry_t *pktio_entry_by_index(int index)
return _odp_pktio_entry_ptr[index];
}
-static inline odp_buffer_hdr_t *packet_vector_to_buf_hdr(odp_packet_vector_t pktv)
+static inline _odp_event_hdr_t *packet_vector_to_event_hdr(odp_packet_vector_t pktv)
{
- return &_odp_packet_vector_hdr(pktv)->buf_hdr;
+ return (_odp_event_hdr_t *)(uintptr_t)&_odp_packet_vector_hdr(pktv)->event_hdr;
}
static int read_config_file(pktio_global_t *pktio_glb)
@@ -825,13 +825,13 @@ static inline odp_packet_vector_t packet_vector_create(odp_packet_t packets[], u
}
static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
- odp_buffer_hdr_t *buffer_hdrs[], int num)
+ _odp_event_hdr_t *event_hdrs[], int num)
{
odp_packet_t pkt;
odp_packet_t packets[num];
odp_packet_hdr_t *pkt_hdr;
odp_pool_t pool = ODP_POOL_INVALID;
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
int i, pkts, num_rx, num_ev, num_dst;
odp_queue_t cur_queue;
odp_event_t ev[num];
@@ -860,7 +860,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
for (i = 0; i < pkts; i++) {
pkt = packets[i];
pkt_hdr = packet_hdr(pkt);
- buf_hdr = packet_to_buf_hdr(pkt);
+ event_hdr = packet_to_event_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
/* Sort events for enqueue multi operation(s) based on CoS
@@ -888,7 +888,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
num_ev++;
continue;
}
- buffer_hdrs[num_rx++] = buf_hdr;
+ event_hdrs[num_rx++] = event_hdr;
}
/* Optimization for the common case */
@@ -897,13 +897,13 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
return num_rx;
/* Create packet vector */
- odp_packet_vector_t pktv = packet_vector_create((odp_packet_t *)buffer_hdrs,
+ odp_packet_vector_t pktv = packet_vector_create((odp_packet_t *)event_hdrs,
num_rx, pool);
if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
return 0;
- buffer_hdrs[0] = packet_vector_to_buf_hdr(pktv);
+ event_hdrs[0] = packet_vector_to_event_hdr(pktv);
return 1;
}
@@ -975,15 +975,15 @@ static inline int packet_vector_send(odp_pktout_queue_t pktout_queue, odp_event_
return 0;
}
-static int pktout_enqueue(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
+static int pktout_enqueue(odp_queue_t queue, _odp_event_hdr_t *event_hdr)
{
- odp_event_t event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr));
- odp_packet_t pkt = packet_from_buf_hdr(buf_hdr);
+ odp_event_t event = _odp_event_from_hdr(event_hdr);
+ odp_packet_t pkt = packet_from_event_hdr(event_hdr);
odp_pktout_queue_t pktout_queue;
int len = 1;
int nbr;
- if (_odp_sched_fn->ord_enq_multi(queue, (void **)buf_hdr, len, &nbr))
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)event_hdr, len, &nbr))
return (nbr == len ? 0 : -1);
pktout_queue = _odp_queue_fn->get_pktout(queue);
@@ -995,7 +995,7 @@ static int pktout_enqueue(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
return (nbr == len ? 0 : -1);
}
-static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
+static int pktout_enq_multi(odp_queue_t queue, _odp_event_hdr_t *event_hdr[],
int num)
{
odp_event_t event;
@@ -1005,18 +1005,18 @@ static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
int nbr;
int i;
- if (_odp_sched_fn->ord_enq_multi(queue, (void **)buf_hdr, num, &nbr))
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)event_hdr, num, &nbr))
return nbr;
for (i = 0; i < num; ++i) {
- event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr[i]));
+ event = _odp_event_from_hdr(event_hdr[i]);
if (odp_event_type(event) == ODP_EVENT_PACKET_VECTOR) {
have_pktv = 1;
break;
}
- pkt_tbl[i] = packet_from_buf_hdr(buf_hdr[i]);
+ pkt_tbl[i] = packet_from_event_hdr(event_hdr[i]);
}
pktout_queue = _odp_queue_fn->get_pktout(queue);
@@ -1025,13 +1025,13 @@ static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
return odp_pktout_send(pktout_queue, pkt_tbl, num);
for (i = 0; i < num; ++i) {
- event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr[i]));
+ event = _odp_event_from_hdr(event_hdr[i]);
if (odp_event_type(event) == ODP_EVENT_PACKET_VECTOR) {
if (odp_unlikely(packet_vector_send(pktout_queue, event)))
break;
} else {
- odp_packet_t pkt = packet_from_buf_hdr(buf_hdr[i]);
+ odp_packet_t pkt = packet_from_event_hdr(event_hdr[i]);
nbr = odp_pktout_send(pktout_queue, &pkt, 1);
if (odp_unlikely(nbr != 1))
@@ -1041,10 +1041,10 @@ static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
return i;
}
-static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
+static _odp_event_hdr_t *pktin_dequeue(odp_queue_t queue)
{
- odp_buffer_hdr_t *buf_hdr;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *event_hdr;
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts;
odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
@@ -1053,8 +1053,8 @@ static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
ODP_ASSERT(entry != NULL);
- if (_odp_queue_fn->orig_deq_multi(queue, &buf_hdr, 1) == 1)
- return buf_hdr;
+ if (_odp_queue_fn->orig_deq_multi(queue, &event_hdr, 1) == 1)
+ return event_hdr;
pkts = pktin_recv_buf(entry, pktin_index, hdr_tbl, QUEUE_MULTI_MAX);
@@ -1074,19 +1074,19 @@ static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
ODP_DBG("Interface %s dropped %i packets\n",
entry->s.name, num - num_enq);
- _odp_buffer_free_multi(&hdr_tbl[num_enq + 1], num - num_enq);
+ _odp_event_free_multi(&hdr_tbl[num_enq + 1], num - num_enq);
}
}
- buf_hdr = hdr_tbl[0];
- return buf_hdr;
+ event_hdr = hdr_tbl[0];
+ return event_hdr;
}
-static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
+static int pktin_deq_multi(odp_queue_t queue, _odp_event_hdr_t *event_hdr[],
int num)
{
int nbr;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts, i, j;
odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
@@ -1095,7 +1095,7 @@ static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
ODP_ASSERT(entry != NULL);
- nbr = _odp_queue_fn->orig_deq_multi(queue, buf_hdr, num);
+ nbr = _odp_queue_fn->orig_deq_multi(queue, event_hdr, num);
if (odp_unlikely(nbr > num))
ODP_ABORT("queue_deq_multi req: %d, returned %d\n", num, nbr);
@@ -1111,7 +1111,7 @@ static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
return nbr;
for (i = 0; i < pkts && nbr < num; i++, nbr++)
- buf_hdr[nbr] = hdr_tbl[i];
+ event_hdr[nbr] = hdr_tbl[i];
/* Queue the rest for later */
for (j = 0; i < pkts; i++, j++)
@@ -1128,7 +1128,7 @@ static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
ODP_DBG("Interface %s dropped %i packets\n",
entry->s.name, j - num_enq);
- _odp_buffer_free_multi(&buf_hdr[num_enq], j - num_enq);
+ _odp_event_free_multi(&event_hdr[num_enq], j - num_enq);
}
}
@@ -1225,7 +1225,7 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
}
int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
- odp_buffer_hdr_t *hdr_tbl[], int num)
+ _odp_event_hdr_t *hdr_tbl[], int num)
{
pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
int state = entry->s.state;
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
index 6f0ee201a..66570739b 100644
--- a/platform/linux-generic/odp_packet_vector.c
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -5,7 +5,6 @@
*/
#include <odp/api/align.h>
-#include <odp/api/buffer.h>
#include <odp/api/hints.h>
#include <odp/api/packet.h>
#include <odp/api/pool.h>
@@ -24,17 +23,21 @@
/* Packet vector header field offsets for inline functions */
const _odp_event_vector_inline_offset_t _odp_event_vector_inline ODP_ALIGNED_CACHE = {
.packet = offsetof(odp_event_vector_hdr_t, packet),
- .pool = offsetof(odp_event_vector_hdr_t, buf_hdr.pool_ptr),
+ .pool = offsetof(odp_event_vector_hdr_t, event_hdr.pool_ptr),
.size = offsetof(odp_event_vector_hdr_t, size)
};
#include <odp/visibility_end.h>
+static inline odp_event_vector_hdr_t *event_vector_hdr_from_event(odp_event_t event)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)event;
+}
+
odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool_hdl)
{
- odp_packet_vector_t pktv;
+ odp_event_t event;
pool_t *pool;
- int ret;
ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
@@ -42,12 +45,13 @@ odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool_hdl)
ODP_ASSERT(pool->type == ODP_POOL_VECTOR);
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)&pktv, 1);
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_PACKET_VECTOR_INVALID;
- if (odp_likely(ret == 1))
- return pktv;
+ ODP_ASSERT(event_vector_hdr_from_event(event)->size == 0);
- return ODP_PACKET_VECTOR_INVALID;
+ return odp_packet_vector_from_event(event);
}
void odp_packet_vector_free(odp_packet_vector_t pktv)
@@ -56,7 +60,7 @@ void odp_packet_vector_free(odp_packet_vector_t pktv)
pktv_hdr->size = 0;
- _odp_buffer_free_multi((odp_buffer_hdr_t **)&pktv_hdr, 1);
+ _odp_event_free(odp_packet_vector_to_event(pktv));
}
int odp_packet_vector_valid(odp_packet_vector_t pktv)
@@ -69,16 +73,16 @@ int odp_packet_vector_valid(odp_packet_vector_t pktv)
if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
return 0;
- if (_odp_buffer_is_valid((odp_buffer_t)pktv) == 0)
- return 0;
-
ev = odp_packet_vector_to_event(pktv);
+ if (_odp_event_is_valid(ev) == 0)
+ return 0;
+
if (odp_event_type(ev) != ODP_EVENT_PACKET_VECTOR)
return 0;
pktv_hdr = _odp_packet_vector_hdr(pktv);
- pool = pktv_hdr->buf_hdr.pool_ptr;
+ pool = pktv_hdr->event_hdr.pool_ptr;
if (odp_unlikely(pktv_hdr->size > pool->params.vector.max_size))
return 0;
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 49d2e74f5..ca696fc96 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -24,6 +24,7 @@
#include <odp_shm_internal.h>
#include <odp_timer_internal.h>
#include <odp_event_vector_internal.h>
+#include <odp_buffer_internal.h>
#include <string.h>
#include <stdio.h>
@@ -79,9 +80,9 @@ static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx)
static inline pool_t *pool_from_buf(odp_buffer_t buf)
{
- odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(buf);
+ odp_buffer_hdr_t *buf_hdr = _odp_buf_hdr(buf);
- return buf_hdr->pool_ptr;
+ return buf_hdr->event_hdr.pool_ptr;
}
static inline void cache_init(pool_cache_t *cache)
@@ -90,7 +91,7 @@ static inline void cache_init(pool_cache_t *cache)
}
static inline uint32_t cache_pop(pool_cache_t *cache,
- odp_buffer_hdr_t *buf_hdr[], int max_num)
+ _odp_event_hdr_t *event_hdr[], int max_num)
{
uint32_t cache_num = cache->cache_num;
uint32_t num_ch = max_num;
@@ -104,36 +105,36 @@ static inline uint32_t cache_pop(pool_cache_t *cache,
/* Get buffers from the cache */
cache_begin = cache_num - num_ch;
for (i = 0; i < num_ch; i++)
- buf_hdr[i] = cache->buf_hdr[cache_begin + i];
+ event_hdr[i] = cache->event_hdr[cache_begin + i];
cache->cache_num = cache_num - num_ch;
return num_ch;
}
-static inline void cache_push(pool_cache_t *cache, odp_buffer_hdr_t *buf_hdr[],
+static inline void cache_push(pool_cache_t *cache, _odp_event_hdr_t *event_hdr[],
uint32_t num)
{
uint32_t cache_num = cache->cache_num;
uint32_t i;
for (i = 0; i < num; i++)
- cache->buf_hdr[cache_num + i] = buf_hdr[i];
+ cache->event_hdr[cache_num + i] = event_hdr[i];
cache->cache_num = cache_num + num;
}
static void cache_flush(pool_cache_t *cache, pool_t *pool)
{
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
ring_ptr_t *ring;
uint32_t mask;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- while (cache_pop(cache, &buf_hdr, 1))
- ring_ptr_enq(ring, mask, buf_hdr);
+ while (cache_pop(cache, &event_hdr, 1))
+ ring_ptr_enq(ring, mask, event_hdr);
}
static inline uint64_t cache_total_available(pool_t *pool)
@@ -299,6 +300,7 @@ int _odp_pool_init_global(void)
}
ODP_DBG("\nPool init global\n");
+ ODP_DBG(" event_hdr_t size %zu\n", sizeof(_odp_event_hdr_t));
ODP_DBG(" buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
ODP_DBG(" packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
ODP_DBG(" timeout_hdr_t size %zu\n", sizeof(odp_timeout_hdr_t));
@@ -393,7 +395,7 @@ static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
/* Reserve memory for the ring, and for lookup table in case of pool ext */
mem_size = sizeof(pool_ring_t);
if (pool_ext)
- mem_size += num * sizeof(odp_buffer_hdr_t *);
+ mem_size += num * sizeof(_odp_event_hdr_t *);
shm = odp_shm_reserve(ring_name, mem_size, ODP_CACHE_LINE_SIZE, shmflags);
@@ -417,30 +419,32 @@ static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
return NULL;
}
-static void init_buffer_hdr(pool_t *pool, odp_buffer_hdr_t *buf_hdr, uint32_t buf_index,
- uint32_t hdr_len, uint8_t *data_ptr, void *uarea)
+static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t buf_index,
+ uint32_t hdr_len, uint8_t *data_ptr, void *uarea)
{
odp_pool_type_t type = pool->type;
- memset(buf_hdr, 0, hdr_len);
+ memset(event_hdr, 0, hdr_len);
- /* Initialize buffer metadata */
- buf_hdr->index.u32 = 0;
- buf_hdr->index.pool = pool->pool_idx;
- buf_hdr->index.buffer = buf_index;
- buf_hdr->type = type;
- buf_hdr->event_type = type;
- buf_hdr->pool_ptr = pool;
- buf_hdr->uarea_addr = uarea;
- odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
+ /* Initialize common event metadata */
+ event_hdr->index.u32 = 0;
+ event_hdr->index.pool = pool->pool_idx;
+ event_hdr->index.buffer = buf_index;
+ event_hdr->type = type;
+ event_hdr->event_type = type;
+ event_hdr->pool_ptr = pool;
+ event_hdr->uarea_addr = uarea;
+ odp_atomic_init_u32(&event_hdr->ref_cnt, 0);
/* Store base values for fast init */
- buf_hdr->base_data = data_ptr;
- buf_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET) {
+ event_hdr->base_data = data_ptr;
+ event_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+ }
/* Initialize segmentation metadata */
if (type == ODP_POOL_PACKET) {
- odp_packet_hdr_t *pkt_hdr = (void *)buf_hdr;
+ odp_packet_hdr_t *pkt_hdr = (void *)event_hdr;
pkt_hdr->seg_data = data_ptr;
pkt_hdr->seg_len = pool->seg_len;
@@ -450,22 +454,24 @@ static void init_buffer_hdr(pool_t *pool, odp_buffer_hdr_t *buf_hdr, uint32_t bu
/* Initialize event vector metadata */
if (type == ODP_POOL_VECTOR) {
- odp_event_vector_hdr_t *vect_hdr = (void *)buf_hdr;
+ odp_event_vector_hdr_t *vect_hdr = (void *)event_hdr;
- vect_hdr->size = 0;
- buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
+ vect_hdr->size = 0;
+ event_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
}
}
static void init_buffers(pool_t *pool)
{
uint64_t i;
+ _odp_event_hdr_t *event_hdr;
odp_buffer_hdr_t *buf_hdr;
odp_packet_hdr_t *pkt_hdr;
odp_shm_info_t shm_info;
void *addr;
void *uarea = NULL;
- uint8_t *data;
+ uint8_t *data = NULL;
+ uint8_t *data_ptr = NULL;
uint32_t offset, hdr_len;
ring_ptr_t *ring;
uint32_t mask;
@@ -486,6 +492,7 @@ static void init_buffers(pool_t *pool)
addr = &pool->base_addr[(i * pool->block_size) +
pool->block_offset];
+ event_hdr = addr;
buf_hdr = addr;
pkt_hdr = addr;
@@ -508,23 +515,34 @@ static void init_buffers(pool_t *pool)
if (pool->uarea_size)
uarea = &pool->uarea_base_addr[(i - skipped_blocks) *
pool->uarea_size];
- data = buf_hdr->data;
- if (type == ODP_POOL_PACKET)
- data = pkt_hdr->data;
+ /* Only buffers and packets have data pointer */
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_BUFFER)
+ data = buf_hdr->data;
+ else
+ data = pkt_hdr->data;
- offset = pool->headroom;
+ offset = pool->headroom;
- /* move to correct align */
- while (((uintptr_t)&data[offset]) % pool->align != 0)
- offset++;
+ /* Move to correct align */
+ while (((uintptr_t)&data[offset]) % pool->align != 0)
+ offset++;
+
+ hdr_len = (uintptr_t)data - (uintptr_t)event_hdr;
+ data_ptr = &data[offset];
+ } else {
+ if (type == ODP_POOL_TIMEOUT)
+ hdr_len = sizeof(odp_timeout_hdr_t);
+ else
+ hdr_len = sizeof(odp_event_vector_hdr_t);
+ }
- hdr_len = (uintptr_t)data - (uintptr_t)buf_hdr;
- init_buffer_hdr(pool, buf_hdr, i, hdr_len, &data[offset], uarea);
+ init_event_hdr(pool, event_hdr, i, hdr_len, data_ptr, uarea);
/* Store buffer into the global pool */
if (!skip)
- ring_ptr_enq(ring, mask, buf_hdr);
+ ring_ptr_enq(ring, mask, event_hdr);
}
pool->skipped_blocks = skipped_blocks;
}
@@ -606,8 +624,10 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, ui
return 0;
}
-static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
- uint32_t shmflags)
+/* Create pool according to params. Actual type of the pool is type_2, which is recorded for pool
+ * info calls. */
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2)
{
pool_t *pool;
uint32_t uarea_size, headroom, tailroom;
@@ -616,11 +636,17 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t max_len, cache_size;
uint32_t ring_size;
odp_pool_type_t type = params->type;
+ uint32_t shmflags = 0;
uint32_t num_extra = 0;
const char *max_prefix = "pool_000_";
int max_prefix_len = strlen(max_prefix);
char shm_name[ODP_POOL_NAME_LEN + max_prefix_len];
+ if (type == ODP_POOL_PACKET)
+ shmflags = ODP_SHM_PROC;
+ if (odp_global_ro.shm_single_va)
+ shmflags |= ODP_SHM_SINGLE_VA;
+
align = 0;
if (type == ODP_POOL_PACKET) {
@@ -738,6 +764,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
sprintf(shm_name, "pool_%03i_%s", pool->pool_idx, pool->name);
pool->type = type;
+ pool->type_2 = type_2;
pool->params = *params;
pool->block_offset = 0;
@@ -1019,17 +1046,10 @@ static int check_params(const odp_pool_param_t *params)
odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
{
- uint32_t shm_flags = 0;
-
if (check_params(params))
return ODP_POOL_INVALID;
- if (params->type == ODP_POOL_PACKET)
- shm_flags = ODP_SHM_PROC;
- if (odp_global_ro.shm_single_va)
- shm_flags |= ODP_SHM_SINGLE_VA;
-
- return pool_create(name, params, shm_flags);
+ return _odp_pool_create(name, params, params->type);
}
int odp_pool_destroy(odp_pool_t pool_hdl)
@@ -1073,16 +1093,6 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
return 0;
}
-odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
-{
- return buf_hdl_to_hdr(buf)->event_type;
-}
-
-void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
-{
- buf_hdl_to_hdr(buf)->event_type = ev;
-}
-
odp_pool_t odp_pool_lookup(const char *name)
{
uint32_t i;
@@ -1112,11 +1122,17 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
memset(info, 0, sizeof(odp_pool_info_t));
+ info->type = pool->type_2;
info->name = pool->name;
if (pool->pool_ext) {
info->pool_ext = 1;
info->pool_ext_param = pool->ext_param;
+
+ } else if (pool->type_2 == ODP_POOL_DMA_COMPL) {
+ info->dma_pool_param.num = pool->params.buf.num;
+ info->dma_pool_param.cache_size = pool->params.buf.cache_size;
+
} else {
info->params = pool->params;
}
@@ -1130,18 +1146,18 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
return 0;
}
-int _odp_buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
+int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int max_num)
{
uint32_t pool_idx = pool->pool_idx;
pool_cache_t *cache = local.cache[pool_idx];
ring_ptr_t *ring;
- odp_buffer_hdr_t *hdr;
+ _odp_event_hdr_t *hdr;
uint32_t mask, num_ch, num_alloc, i;
uint32_t num_deq = 0;
uint32_t burst_size = pool->burst_size;
/* First pull packets from local cache */
- num_ch = cache_pop(cache, buf_hdr, max_num);
+ num_ch = cache_pop(cache, event_hdr, max_num);
if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_alloc_ops && num_ch)
odp_atomic_inc_u64(&pool->stats.cache_alloc_ops);
@@ -1155,7 +1171,7 @@ int _odp_buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_n
if (odp_unlikely(num_deq > burst_size))
burst = num_deq;
- odp_buffer_hdr_t *hdr_tmp[burst];
+ _odp_event_hdr_t *hdr_tmp[burst];
ring = &pool->ring->hdr;
mask = pool->ring_mask;
@@ -1180,7 +1196,7 @@ int _odp_buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_n
hdr = hdr_tmp[i];
odp_prefetch(hdr);
- buf_hdr[idx] = hdr;
+ event_hdr[idx] = hdr;
}
/* Cache possible extra buffers. Cache is currently empty. */
@@ -1193,8 +1209,8 @@ int _odp_buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_n
return num_alloc;
}
-static inline void buffer_free_to_pool(pool_t *pool,
- odp_buffer_hdr_t *buf_hdr[], int num)
+static inline void event_free_to_pool(pool_t *pool,
+ _odp_event_hdr_t *event_hdr[], int num)
{
uint32_t pool_idx = pool->pool_idx;
pool_cache_t *cache = local.cache[pool_idx];
@@ -1208,7 +1224,7 @@ static inline void buffer_free_to_pool(pool_t *pool,
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- ring_ptr_enq_multi(ring, mask, (void **)buf_hdr, num);
+ ring_ptr_enq_multi(ring, mask, (void **)event_hdr, num);
if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
odp_atomic_inc_u64(&pool->stats.free_ops);
@@ -1231,21 +1247,21 @@ static inline void buffer_free_to_pool(pool_t *pool,
if (odp_unlikely((uint32_t)num > cache_num))
burst = cache_num;
- odp_buffer_hdr_t *buf_hdr[burst];
+ _odp_event_hdr_t *event_hdr[burst];
- cache_pop(cache, buf_hdr, burst);
+ cache_pop(cache, event_hdr, burst);
- ring_ptr_enq_multi(ring, mask, (void **)buf_hdr, burst);
+ ring_ptr_enq_multi(ring, mask, (void **)event_hdr, burst);
if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
odp_atomic_inc_u64(&pool->stats.free_ops);
}
- cache_push(cache, buf_hdr, num);
+ cache_push(cache, event_hdr, num);
if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_free_ops)
odp_atomic_inc_u64(&pool->stats.cache_free_ops);
}
-void _odp_buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_total)
+void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_total)
{
pool_t *pool;
int num;
@@ -1255,18 +1271,18 @@ void _odp_buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_total)
while (1) {
num = 1;
i = 1;
- pool = buf_hdr[first]->pool_ptr;
+ pool = event_hdr[first]->pool_ptr;
/* 'num' buffers are from the same pool */
if (num_total > 1) {
for (i = first; i < num_total; i++)
- if (pool != buf_hdr[i]->pool_ptr)
+ if (pool != event_hdr[i]->pool_ptr)
break;
num = i - first;
}
- buffer_free_to_pool(pool, &buf_hdr[first], num);
+ event_free_to_pool(pool, &event_hdr[first], num);
if (i == num_total)
return;
@@ -1287,7 +1303,7 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)&buf, 1);
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)&buf, 1);
if (odp_likely(ret == 1))
return buf;
@@ -1295,6 +1311,19 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
return ODP_BUFFER_INVALID;
}
+odp_event_t _odp_event_alloc(pool_t *pool)
+{
+ odp_event_t event;
+ int ret;
+
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)&event, 1);
+
+ if (odp_likely(ret == 1))
+ return event;
+
+ return ODP_EVENT_INVALID;
+}
+
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
{
pool_t *pool;
@@ -1305,17 +1334,17 @@ int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
- return _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)buf, num);
+ return _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)buf, num);
}
void odp_buffer_free(odp_buffer_t buf)
{
- _odp_buffer_free_multi((odp_buffer_hdr_t **)&buf, 1);
+ _odp_event_free_multi((_odp_event_hdr_t **)&buf, 1);
}
void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
{
- _odp_buffer_free_multi((odp_buffer_hdr_t **)(uintptr_t)buf, num);
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)buf, num);
}
int odp_pool_capability(odp_pool_capability_t *capa)
@@ -1571,10 +1600,10 @@ int odp_pool_stats_reset(odp_pool_t pool_hdl)
return 0;
}
-static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
+static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
{
int i;
- uint8_t *ptr = (uint8_t *)buf_hdr;
+ uint8_t *ptr = (uint8_t *)event_hdr;
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
pool_t *pool = pool_entry(i);
@@ -1589,23 +1618,23 @@ static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
return NULL;
}
-int _odp_buffer_is_valid(odp_buffer_t buf)
+int _odp_event_is_valid(odp_event_t event)
{
pool_t *pool;
- odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(buf);
+ _odp_event_hdr_t *event_hdr = _odp_event_hdr(event);
- if (buf == ODP_BUFFER_INVALID)
+ if (event == ODP_EVENT_INVALID)
return 0;
/* Check that buffer header is from a known pool */
- pool = find_pool(buf_hdr);
+ pool = find_pool(event_hdr);
if (pool == NULL)
return 0;
- if (pool != buf_hdr->pool_ptr)
+ if (pool != event_hdr->pool_ptr)
return 0;
- if (buf_hdr->index.buffer >= (pool->num + pool->skipped_blocks))
+ if (event_hdr->index.buffer >= (pool->num + pool->skipped_blocks))
return 0;
return 1;
@@ -1613,7 +1642,7 @@ int _odp_buffer_is_valid(odp_buffer_t buf)
int odp_buffer_is_valid(odp_buffer_t buf)
{
- if (_odp_buffer_is_valid(buf) == 0)
+ if (_odp_event_is_valid(odp_buffer_to_event(buf)) == 0)
return 0;
if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
@@ -1793,7 +1822,7 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
uint32_t flags)
{
pool_t *pool;
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
ring_ptr_t *ring;
uint32_t i, ring_mask, buf_index, head_offset;
uint32_t num_populated;
@@ -1841,14 +1870,14 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
for (i = 0; i < num; i++) {
- buf_hdr = buf[i];
+ event_hdr = buf[i];
- if ((uintptr_t)buf_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
+ if ((uintptr_t)event_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
ODP_ERR("Bad packet buffer align: buf[%u]\n", i);
return -1;
}
- if (((uintptr_t)buf_hdr + head_offset) & (MIN_HEAD_ALIGN - 1)) {
+ if (((uintptr_t)event_hdr + head_offset) & (MIN_HEAD_ALIGN - 1)) {
ODP_ERR("Bad head pointer align: buf[%u]\n", i);
return -1;
}
@@ -1856,12 +1885,12 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
if (pool->uarea_size)
uarea = &pool->uarea_base_addr[buf_index * pool->uarea_size];
- data_ptr = (uint8_t *)buf_hdr + head_offset + pool->headroom;
- init_buffer_hdr(pool, buf_hdr, buf_index, hdr_size, data_ptr, uarea);
- pool->ring->buf_hdr_by_index[buf_index] = buf_hdr;
+ data_ptr = (uint8_t *)event_hdr + head_offset + pool->headroom;
+ init_event_hdr(pool, event_hdr, buf_index, hdr_size, data_ptr, uarea);
+ pool->ring->event_hdr_by_index[buf_index] = event_hdr;
buf_index++;
- ring_ptr_enq(ring, ring_mask, buf_hdr);
+ ring_ptr_enq(ring, ring_mask, event_hdr);
}
pool->num_populated += num;
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
index fe4d90930..fe523cc62 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -9,8 +10,6 @@
#include <odp_queue_if.h>
#include <odp/api/std_types.h>
#include <odp/api/align.h>
-#include <odp/api/buffer.h>
-#include <odp_buffer_internal.h>
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
#include <odp_timer_internal.h>
@@ -27,6 +26,8 @@
#include <odp_libconfig_internal.h>
#include <odp/api/plat/queue_inline_types.h>
#include <odp_global_data.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_event_internal.h>
#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->s.lock))
@@ -477,81 +478,81 @@ static odp_queue_t queue_lookup(const char *name)
return ODP_QUEUE_INVALID;
}
-static inline void buffer_index_from_buf(uint32_t buffer_index[],
- odp_buffer_hdr_t *buf_hdr[], int num)
+static inline void event_index_from_hdr(uint32_t event_index[],
+ _odp_event_hdr_t *event_hdr[], int num)
{
int i;
for (i = 0; i < num; i++)
- buffer_index[i] = buf_hdr[i]->index.u32;
+ event_index[i] = event_hdr[i]->index.u32;
}
-static inline void buffer_index_to_buf(odp_buffer_hdr_t *buf_hdr[],
- uint32_t buffer_index[], int num)
+static inline void event_index_to_hdr(_odp_event_hdr_t *event_hdr[],
+ uint32_t event_index[], int num)
{
int i;
for (i = 0; i < num; i++) {
- buf_hdr[i] = buf_hdr_from_index_u32(buffer_index[i]);
- odp_prefetch(buf_hdr[i]);
+ event_hdr[i] = _odp_event_hdr_from_index_u32(event_index[i]);
+ odp_prefetch(event_hdr[i]);
}
}
static inline int _plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
int ret, num_enq;
ring_mpmc_t *ring_mpmc;
- uint32_t buf_idx[num];
+ uint32_t event_idx[num];
queue = qentry_from_handle(handle);
ring_mpmc = &queue->s.ring_mpmc;
- if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
return ret;
- buffer_index_from_buf(buf_idx, buf_hdr, num);
+ event_index_from_hdr(event_idx, event_hdr, num);
num_enq = ring_mpmc_enq_multi(ring_mpmc, queue->s.ring_data,
- queue->s.ring_mask, buf_idx, num);
+ queue->s.ring_mask, event_idx, num);
return num_enq;
}
static inline int _plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
int num_deq;
queue_entry_t *queue;
ring_mpmc_t *ring_mpmc;
- uint32_t buf_idx[num];
+ uint32_t event_idx[num];
queue = qentry_from_handle(handle);
ring_mpmc = &queue->s.ring_mpmc;
num_deq = ring_mpmc_deq_multi(ring_mpmc, queue->s.ring_data,
- queue->s.ring_mask, buf_idx, num);
+ queue->s.ring_mask, event_idx, num);
if (num_deq == 0)
return 0;
- buffer_index_to_buf(buf_hdr, buf_idx, num_deq);
+ event_index_to_hdr(event_hdr, event_idx, num_deq);
return num_deq;
}
static int plain_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_enq_multi(handle, buf_hdr, num);
+ return _plain_queue_enq_multi(handle, event_hdr, num);
}
-static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _plain_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -560,27 +561,27 @@ static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int plain_queue_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _plain_queue_deq_multi(handle, buf_hdr, num);
+ return _plain_queue_deq_multi(handle, event_hdr, num);
}
-static odp_buffer_hdr_t *plain_queue_deq(odp_queue_t handle)
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
{
- odp_buffer_hdr_t *buf_hdr = NULL;
+ _odp_event_hdr_t *event_hdr = NULL;
int ret;
- ret = _plain_queue_deq_multi(handle, &buf_hdr, 1);
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
if (ret == 1)
- return buf_hdr;
+ return event_hdr;
else
return NULL;
}
-static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
- (void)buf_hdr;
+ (void)event_hdr;
ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -589,9 +590,9 @@ static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
}
static int error_enqueue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n",
@@ -600,7 +601,7 @@ static int error_enqueue_multi(odp_queue_t handle,
return -1;
}
-static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
{
ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n",
odp_queue_to_u64(handle));
@@ -609,9 +610,9 @@ static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
}
static int error_dequeue_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- (void)buf_hdr;
+ (void)event_hdr;
(void)num;
ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n",
@@ -880,27 +881,27 @@ static void queue_print_all(void)
}
static inline int _sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
int sched = 0;
int ret;
queue_entry_t *queue;
int num_enq;
ring_st_t *ring_st;
- uint32_t buf_idx[num];
+ uint32_t event_idx[num];
queue = qentry_from_handle(handle);
ring_st = &queue->s.ring_st;
- if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
return ret;
- buffer_index_from_buf(buf_idx, buf_hdr, num);
+ event_index_from_hdr(event_idx, event_hdr, num);
LOCK(queue);
num_enq = ring_st_enq_multi(ring_st, queue->s.ring_data,
- queue->s.ring_mask, buf_idx, num);
+ queue->s.ring_mask, event_idx, num);
if (odp_unlikely(num_enq == 0)) {
UNLOCK(queue);
@@ -927,7 +928,7 @@ int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
int num_deq, status;
ring_st_t *ring_st;
queue_entry_t *queue = qentry_from_index(queue_index);
- uint32_t buf_idx[max_num];
+ uint32_t event_idx[max_num];
ring_st = &queue->s.ring_st;
@@ -948,7 +949,7 @@ int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
}
num_deq = ring_st_deq_multi(ring_st, queue->s.ring_data,
- queue->s.ring_mask, buf_idx, max_num);
+ queue->s.ring_mask, event_idx, max_num);
if (num_deq == 0) {
/* Already empty queue */
@@ -962,22 +963,22 @@ int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
UNLOCK(queue);
- buffer_index_to_buf((odp_buffer_hdr_t **)ev, buf_idx, num_deq);
+ event_index_to_hdr((_odp_event_hdr_t **)ev, event_idx, num_deq);
return num_deq;
}
static int sched_queue_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
- return _sched_queue_enq_multi(handle, buf_hdr, num);
+ return _sched_queue_enq_multi(handle, event_hdr, num);
}
-static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = _sched_queue_enq_multi(handle, &buf_hdr, 1);
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -1157,11 +1158,11 @@ static void queue_set_enq_deq_func(odp_queue_t handle,
}
static int queue_orig_multi(odp_queue_t handle,
- odp_buffer_hdr_t **buf_hdr, int num)
+ _odp_event_hdr_t **event_hdr, int num)
{
queue_entry_t *queue = qentry_from_handle(handle);
- return queue->s.orig_dequeue_multi(handle, buf_hdr, num);
+ return queue->s.orig_dequeue_multi(handle, event_hdr, num);
}
static int queue_api_enq_multi(odp_queue_t handle,
@@ -1176,7 +1177,7 @@ static int queue_api_enq_multi(odp_queue_t handle,
num = QUEUE_MULTI_MAX;
return queue->s.enqueue_multi(handle,
- (odp_buffer_hdr_t **)(uintptr_t)ev, num);
+ (_odp_event_hdr_t **)(uintptr_t)ev, num);
}
static void queue_timer_add(odp_queue_t handle)
@@ -1198,7 +1199,7 @@ static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
queue_entry_t *queue = qentry_from_handle(handle);
return queue->s.enqueue(handle,
- (odp_buffer_hdr_t *)(uintptr_t)ev);
+ (_odp_event_hdr_t *)(uintptr_t)ev);
}
static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
@@ -1209,7 +1210,7 @@ static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
- ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+ ret = queue->s.dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
if (odp_global_rw->inline_timers &&
odp_atomic_load_u64(&queue->s.num_timers))
diff --git a/platform/linux-generic/odp_queue_lf.c b/platform/linux-generic/odp_queue_lf.c
index f1b265970..f5c3830e8 100644
--- a/platform/linux-generic/odp_queue_lf.c
+++ b/platform/linux-generic/odp_queue_lf.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,12 +9,14 @@
#include <odp/api/atomic.h>
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/shared_memory.h>
+
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
#include <odp_queue_basic_internal.h>
+
#include <string.h>
#include <stdio.h>
-#include <odp_debug_internal.h>
-
#define RING_LF_SIZE 32
#define QUEUE_LF_NUM 128
#define ENQ_RETRIES (RING_LF_SIZE / 4)
@@ -112,7 +115,7 @@ static inline int next_idx(int idx)
return next;
}
-static int queue_lf_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int queue_lf_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
queue_entry_t *queue;
queue_lf_t *queue_lf;
@@ -125,7 +128,7 @@ static int queue_lf_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
queue = qentry_from_handle(handle);
queue_lf = queue->s.queue_lf;
- new_val.s.ptr = (uintptr_t)buf_hdr;
+ new_val.s.ptr = (uintptr_t)event_hdr;
new_val.s.counter = odp_atomic_fetch_inc_u64(&queue_lf->enq_counter);
idx = 0;
@@ -159,18 +162,18 @@ static int queue_lf_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-static int queue_lf_enq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
+static int queue_lf_enq_multi(odp_queue_t handle, _odp_event_hdr_t **event_hdr,
int num)
{
(void)num;
- if (queue_lf_enq(handle, buf_hdr[0]) == 0)
+ if (queue_lf_enq(handle, event_hdr[0]) == 0)
return 1;
return 0;
}
-static odp_buffer_hdr_t *queue_lf_deq(odp_queue_t handle)
+static _odp_event_hdr_t *queue_lf_deq(odp_queue_t handle)
{
queue_entry_t *queue;
queue_lf_t *queue_lf;
@@ -179,7 +182,7 @@ static odp_buffer_hdr_t *queue_lf_deq(odp_queue_t handle)
ring_lf_node_t node_val, old_val, new_val;
ring_lf_node_t *node, *old;
uint64_t lowest, counter;
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
queue = qentry_from_handle(handle);
queue_lf = queue->s.queue_lf;
@@ -226,21 +229,21 @@ static odp_buffer_hdr_t *queue_lf_deq(odp_queue_t handle)
}
}
- buf_hdr = (void *)(uintptr_t)old_val.s.ptr;
+ event_hdr = (void *)(uintptr_t)old_val.s.ptr;
/* Try to remove data */
if (lockfree_cas_acq_rel_u128(&old->u128, old_val.u128,
new_val.u128))
- return buf_hdr;
+ return event_hdr;
}
return NULL;
}
-static int queue_lf_deq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
+static int queue_lf_deq_multi(odp_queue_t handle, _odp_event_hdr_t **event_hdr,
int num)
{
- odp_buffer_hdr_t *buf;
+ _odp_event_hdr_t *buf;
(void)num;
@@ -249,7 +252,7 @@ static int queue_lf_deq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
if (buf == NULL)
return 0;
- buf_hdr[0] = buf;
+ event_hdr[0] = buf;
return 1;
}
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c
index 916cb6739..bf5489516 100644
--- a/platform/linux-generic/odp_queue_scalable.c
+++ b/platform/linux-generic/odp_queue_scalable.c
@@ -20,6 +20,7 @@
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_pool_internal.h>
#include <odp_queue_scalable_internal.h>
@@ -48,11 +49,11 @@ typedef struct queue_table_t {
static queue_table_t *queue_tbl;
static _odp_ishm_pool_t *queue_shm_pool;
-static int _queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr);
-static odp_buffer_hdr_t *_queue_deq(odp_queue_t handle);
-static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr);
+static _odp_event_hdr_t *_queue_deq(odp_queue_t handle);
+static int _queue_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num);
-static int _queue_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num);
static queue_entry_t *get_qentry(uint32_t queue_id)
@@ -99,7 +100,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
ringidx_t ring_idx;
sched_elem_t *sched_elem;
uint32_t ring_size;
- odp_buffer_hdr_t **ring;
+ _odp_event_hdr_t **ring;
uint32_t size;
sched_elem = &queue->s.sched_elem;
@@ -109,8 +110,8 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- size = ring_size * sizeof(odp_buffer_hdr_t *);
- ring = (odp_buffer_hdr_t **)shm_pool_alloc_align(queue_shm_pool, size);
+ size = ring_size * sizeof(_odp_event_hdr_t *);
+ ring = (_odp_event_hdr_t **)shm_pool_alloc_align(queue_shm_pool, size);
if (NULL == ring)
return -1;
@@ -207,13 +208,13 @@ static int queue_init_global(void)
pool_size = sizeof(queue_table_t);
/* Add storage required for queues */
pool_size += (CONFIG_SCAL_QUEUE_SIZE *
- sizeof(odp_buffer_hdr_t *)) * CONFIG_MAX_QUEUES;
+ sizeof(_odp_event_hdr_t *)) * CONFIG_MAX_QUEUES;
/* Add the reorder window size */
pool_size += sizeof(reorder_window_t) * CONFIG_MAX_QUEUES;
/* Choose min_alloc and max_alloc such that buddy allocator is selected. */
min_alloc = 0;
- max_alloc = CONFIG_SCAL_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *);
+ max_alloc = CONFIG_SCAL_QUEUE_SIZE * sizeof(_odp_event_hdr_t *);
queue_shm_pool = _odp_ishm_pool_create("queue_shm_pool",
pool_size,
min_alloc, max_alloc, 0);
@@ -514,7 +515,7 @@ static odp_queue_t queue_lookup(const char *name)
#ifndef CONFIG_QSCHST_LOCK
static inline int _odp_queue_enq(sched_elem_t *q,
- odp_buffer_hdr_t *buf_hdr[],
+ _odp_event_hdr_t *event_hdr[],
int num)
{
ringidx_t old_read;
@@ -522,7 +523,7 @@ static inline int _odp_queue_enq(sched_elem_t *q,
ringidx_t new_write;
int actual;
uint32_t mask;
- odp_buffer_hdr_t **ring;
+ _odp_event_hdr_t **ring;
mask = q->prod_mask;
ring = q->prod_ring;
@@ -552,7 +553,7 @@ static inline int _odp_queue_enq(sched_elem_t *q,
#endif
/* Store our event(s) in the ring */
do {
- ring[old_write & mask] = *buf_hdr++;
+ ring[old_write & mask] = *event_hdr++;
} while (++old_write != new_write);
old_write -= actual;
@@ -580,7 +581,7 @@ static inline int _odp_queue_enq(sched_elem_t *q,
#endif
int _odp_queue_enq_sp(sched_elem_t *q,
- odp_buffer_hdr_t *buf_hdr[],
+ _odp_event_hdr_t *event_hdr[],
int num)
{
ringidx_t old_read;
@@ -588,7 +589,7 @@ int _odp_queue_enq_sp(sched_elem_t *q,
ringidx_t new_write;
int actual;
uint32_t mask;
- odp_buffer_hdr_t **ring;
+ _odp_event_hdr_t **ring;
mask = q->prod_mask;
ring = q->prod_ring;
@@ -606,7 +607,7 @@ int _odp_queue_enq_sp(sched_elem_t *q,
/* Store our event(s) in the ring */
do {
- ring[old_write & mask] = *buf_hdr++;
+ ring[old_write & mask] = *event_hdr++;
} while (++old_write != new_write);
old_write -= actual;
@@ -626,7 +627,7 @@ int _odp_queue_enq_sp(sched_elem_t *q,
return actual;
}
-static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
int actual;
@@ -637,15 +638,15 @@ static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
ts = _odp_sched_ts;
if (ts && odp_unlikely(ts->out_of_order) &&
(queue->s.param.order == ODP_QUEUE_ORDER_KEEP)) {
- actual = _odp_rctx_save(queue, buf_hdr, num);
+ actual = _odp_rctx_save(queue, event_hdr, num);
return actual;
}
#ifdef CONFIG_QSCHST_LOCK
LOCK(&queue->s.sched_elem.qschlock);
- actual = _odp_queue_enq_sp(&queue->s.sched_elem, buf_hdr, num);
+ actual = _odp_queue_enq_sp(&queue->s.sched_elem, event_hdr, num);
#else
- actual = _odp_queue_enq(&queue->s.sched_elem, buf_hdr, num);
+ actual = _odp_queue_enq(&queue->s.sched_elem, event_hdr, num);
#endif
if (odp_likely(queue->s.sched_elem.schedq != NULL && actual != 0)) {
@@ -663,14 +664,14 @@ static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
return actual;
}
-static int _queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int _queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
- return odp_likely(_queue_enq_multi(handle, &buf_hdr, 1) == 1) ? 0 : -1;
+ return odp_likely(_queue_enq_multi(handle, &event_hdr, 1) == 1) ? 0 : -1;
}
static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
{
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *event_hdr[QUEUE_MULTI_MAX];
queue_entry_t *queue;
int i;
@@ -680,20 +681,20 @@ static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
queue = _odp_qentry_from_ext(handle);
for (i = 0; i < num; i++)
- buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
+ event_hdr[i] = _odp_event_hdr(ev[i]);
- return queue->s.enqueue_multi(handle, buf_hdr, num);
+ return queue->s.enqueue_multi(handle, event_hdr, num);
}
static int queue_enq(odp_queue_t handle, odp_event_t ev)
{
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
queue_entry_t *queue;
queue = _odp_qentry_from_ext(handle);
- buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
+ event_hdr = _odp_event_hdr(ev);
- return queue->s.enqueue(handle, buf_hdr);
+ return queue->s.enqueue(handle, event_hdr);
}
/* Single-consumer dequeue. */
@@ -704,7 +705,7 @@ int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num)
ringidx_t old_write;
ringidx_t new_read;
uint32_t mask;
- odp_buffer_hdr_t **ring;
+ _odp_event_hdr_t **ring;
/* Load consumer ring state (read & write index). */
old_read = q->cons_read;
@@ -724,7 +725,7 @@ int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num)
mask = q->cons_mask;
ring = q->cons_ring;
do {
- *evp++ = odp_buffer_to_event(buf_from_buf_hdr(ring[old_read & mask]));
+ *evp++ = _odp_event_from_hdr(ring[old_read & mask]);
} while (++old_read != new_read);
/* Signal producers that empty slots are available
@@ -739,15 +740,15 @@ int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num)
return actual;
}
-int _odp_queue_deq(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num)
+int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num)
{
int actual;
ringidx_t old_read;
ringidx_t old_write;
ringidx_t new_read;
uint32_t mask;
- odp_buffer_hdr_t **ring;
- odp_buffer_hdr_t **p_buf_hdr;
+ _odp_event_hdr_t **ring;
+ _odp_event_hdr_t **p_event_hdr;
mask = q->cons_mask;
ring = q->cons_ring;
@@ -778,9 +779,9 @@ int _odp_queue_deq(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num)
#ifdef CONFIG_SPLIT_PRODCONS
__builtin_prefetch(&q->prod_read, 0, 0);
#endif
- p_buf_hdr = buf_hdr;
+ p_event_hdr = event_hdr;
do {
- *p_buf_hdr++ = ring[old_read & mask];
+ *p_event_hdr++ = ring[old_read & mask];
} while (++old_read != new_read);
old_read -= actual;
@@ -809,7 +810,7 @@ int _odp_queue_deq(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num)
int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num)
{
int ret, evt_idx;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
@@ -817,13 +818,13 @@ int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num)
ret = _odp_queue_deq(q, hdr_tbl, num);
if (odp_likely(ret != 0)) {
for (evt_idx = 0; evt_idx < num; evt_idx++)
- evp[evt_idx] = odp_buffer_to_event(buf_from_buf_hdr(hdr_tbl[evt_idx]));
+ evp[evt_idx] = _odp_event_from_hdr(hdr_tbl[evt_idx]);
}
return ret;
}
-static int _queue_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
sched_elem_t *q;
@@ -831,19 +832,19 @@ static int _queue_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
queue = qentry_from_int(handle);
q = &queue->s.sched_elem;
- return _odp_queue_deq(q, buf_hdr, num);
+ return _odp_queue_deq(q, event_hdr, num);
}
-static odp_buffer_hdr_t *_queue_deq(odp_queue_t handle)
+static _odp_event_hdr_t *_queue_deq(odp_queue_t handle)
{
sched_elem_t *q;
- odp_buffer_hdr_t *buf_hdr;
+ _odp_event_hdr_t *event_hdr;
queue_entry_t *queue;
queue = qentry_from_int(handle);
q = &queue->s.sched_elem;
- if (_odp_queue_deq(q, &buf_hdr, 1) == 1)
- return buf_hdr;
+ if (_odp_queue_deq(q, &event_hdr, 1) == 1)
+ return event_hdr;
else
return NULL;
}
@@ -858,7 +859,7 @@ static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
queue = _odp_qentry_from_ext(handle);
- ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+ ret = queue->s.dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
if (odp_global_rw->inline_timers &&
odp_atomic_load_u64(&queue->s.num_timers))
@@ -1121,10 +1122,10 @@ static void queue_set_enq_deq_func(odp_queue_t handle,
}
static int queue_orig_multi(odp_queue_t handle,
- odp_buffer_hdr_t **buf_hdr, int num)
+ _odp_event_hdr_t **event_hdr, int num)
{
return qentry_from_int(handle)->s.orig_dequeue_multi(handle,
- buf_hdr, num);
+ event_hdr, num);
}
static void queue_timer_add(odp_queue_t handle)
diff --git a/platform/linux-generic/odp_queue_spsc.c b/platform/linux-generic/odp_queue_spsc.c
index 92f16e657..c1b02d457 100644
--- a/platform/linux-generic/odp_queue_spsc.c
+++ b/platform/linux-generic/odp_queue_spsc.c
@@ -1,38 +1,40 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <string.h>
-#include <stdio.h>
-#include <odp_queue_basic_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
#include <odp_pool_internal.h>
+#include <odp_queue_basic_internal.h>
-#include <odp_debug_internal.h>
+#include <string.h>
+#include <stdio.h>
-static inline void buffer_index_from_buf(uint32_t buffer_index[],
- odp_buffer_hdr_t *buf_hdr[], int num)
+static inline void event_index_from_hdr(uint32_t event_index[],
+ _odp_event_hdr_t *event_hdr[], int num)
{
int i;
for (i = 0; i < num; i++)
- buffer_index[i] = buf_hdr[i]->index.u32;
+ event_index[i] = event_hdr[i]->index.u32;
}
-static inline void buffer_index_to_buf(odp_buffer_hdr_t *buf_hdr[],
- uint32_t buffer_index[], int num)
+static inline void event_index_to_hdr(_odp_event_hdr_t *event_hdr[],
+ uint32_t event_index[], int num)
{
int i;
for (i = 0; i < num; i++) {
- buf_hdr[i] = buf_hdr_from_index_u32(buffer_index[i]);
- odp_prefetch(buf_hdr[i]);
+ event_hdr[i] = _odp_event_hdr_from_index_u32(event_index[i]);
+ odp_prefetch(event_hdr[i]);
}
}
static inline int spsc_enq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
ring_spsc_t *ring_spsc;
@@ -41,7 +43,7 @@ static inline int spsc_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_spsc = &queue->s.ring_spsc;
- buffer_index_from_buf(buf_idx, buf_hdr, num);
+ event_index_from_hdr(buf_idx, event_hdr, num);
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
ODP_ERR("Bad queue status\n");
@@ -53,7 +55,7 @@ static inline int spsc_enq_multi(odp_queue_t handle,
}
static inline int spsc_deq_multi(odp_queue_t handle,
- odp_buffer_hdr_t *buf_hdr[], int num)
+ _odp_event_hdr_t *event_hdr[], int num)
{
queue_entry_t *queue;
int num_deq;
@@ -74,22 +76,22 @@ static inline int spsc_deq_multi(odp_queue_t handle,
if (num_deq == 0)
return 0;
- buffer_index_to_buf(buf_hdr, buf_idx, num_deq);
+ event_index_to_hdr(event_hdr, buf_idx, num_deq);
return num_deq;
}
-static int queue_spsc_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
- return spsc_enq_multi(handle, buf_hdr, num);
+ return spsc_enq_multi(handle, event_hdr, num);
}
-static int queue_spsc_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+static int queue_spsc_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
{
int ret;
- ret = spsc_enq_multi(handle, &buf_hdr, 1);
+ ret = spsc_enq_multi(handle, &event_hdr, 1);
if (ret == 1)
return 0;
@@ -97,21 +99,21 @@ static int queue_spsc_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-static int queue_spsc_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
int num)
{
- return spsc_deq_multi(handle, buf_hdr, num);
+ return spsc_deq_multi(handle, event_hdr, num);
}
-static odp_buffer_hdr_t *queue_spsc_deq(odp_queue_t handle)
+static _odp_event_hdr_t *queue_spsc_deq(odp_queue_t handle)
{
- odp_buffer_hdr_t *buf_hdr = NULL;
+ _odp_event_hdr_t *event_hdr = NULL;
int ret;
- ret = spsc_deq_multi(handle, &buf_hdr, 1);
+ ret = spsc_deq_multi(handle, &event_hdr, 1);
if (ret == 1)
- return buf_hdr;
+ return event_hdr;
else
return NULL;
}
diff --git a/platform/linux-generic/odp_random.c b/platform/linux-generic/odp_random.c
index acae9663d..3060e8ed9 100644
--- a/platform/linux-generic/odp_random.c
+++ b/platform/linux-generic/odp_random.c
@@ -12,19 +12,38 @@
#include <odp_init_internal.h>
#include <odp_random_std_internal.h>
#include <odp_random_openssl_internal.h>
+#include <odp_random.h>
odp_random_kind_t odp_random_max_kind(void)
{
- if (_ODP_OPENSSL)
- return _odp_random_openssl_max_kind();
- return _odp_random_std_max_kind();
+ odp_random_kind_t kind, max_kind = ODP_RANDOM_BASIC;
+
+ if (_ODP_OPENSSL_RAND)
+ max_kind = ODP_RANDOM_CRYPTO;
+
+ kind = _odp_random_max_kind();
+ if (kind > max_kind)
+ max_kind = kind;
+
+ return max_kind;
}
int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
{
- if (_ODP_OPENSSL)
- return _odp_random_openssl_data(buf, len, kind);
- return _odp_random_std_data(buf, len, kind);
+ switch (kind) {
+ case ODP_RANDOM_BASIC:
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_data(buf, len);
+ return _odp_random_std_data(buf, len);
+ case ODP_RANDOM_CRYPTO:
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_data(buf, len);
+ return _odp_random_crypto_data(buf, len);
+ case ODP_RANDOM_TRUE:
+ return _odp_random_true_data(buf, len);
+ }
+
+ return -1;
}
int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
@@ -34,14 +53,14 @@ int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
int _odp_random_init_local(void)
{
- if (_ODP_OPENSSL)
+ if (_ODP_OPENSSL_RAND)
return _odp_random_openssl_init_local();
return _odp_random_std_init_local();
}
int _odp_random_term_local(void)
{
- if (_ODP_OPENSSL)
+ if (_ODP_OPENSSL_RAND)
return _odp_random_openssl_term_local();
return _odp_random_std_term_local();
}
diff --git a/platform/linux-generic/odp_random_openssl.c b/platform/linux-generic/odp_random_openssl.c
index a74f99bd4..fdc40871b 100644
--- a/platform/linux-generic/odp_random_openssl.c
+++ b/platform/linux-generic/odp_random_openssl.c
@@ -7,49 +7,28 @@
#include <odp_posix_extensions.h>
#include <stdint.h>
-#include <odp/api/random.h>
#include <odp/autoheader_internal.h>
#include <odp_init_internal.h>
#include <odp_random_openssl_internal.h>
-#if _ODP_OPENSSL
+#if _ODP_OPENSSL_RAND
#include <openssl/rand.h>
-odp_random_kind_t _odp_random_openssl_max_kind(void)
-{
- return ODP_RANDOM_CRYPTO;
-}
-
-int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len,
- odp_random_kind_t kind)
+int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len)
{
int rc;
- switch (kind) {
- case ODP_RANDOM_BASIC:
- case ODP_RANDOM_CRYPTO:
- rc = RAND_bytes(buf, len);
- return (1 == rc) ? (int)len /*success*/: -1 /*failure*/;
-
- case ODP_RANDOM_TRUE:
- default:
- return -1;
- }
+ rc = RAND_bytes(buf, len);
+ return (1 == rc) ? (int)len /*success*/: -1 /*failure*/;
}
#else
/* Dummy functions for building without OpenSSL support */
-odp_random_kind_t _odp_random_openssl_max_kind(void)
-{
- return ODP_RANDOM_BASIC;
-}
-
int32_t _odp_random_openssl_data(uint8_t *buf ODP_UNUSED,
- uint32_t len ODP_UNUSED,
- odp_random_kind_t kind ODP_UNUSED)
+ uint32_t len ODP_UNUSED)
{
return -1;
}
-#endif /* _ODP_OPENSSL */
+#endif /* _ODP_OPENSSL_RAND */
int _odp_random_openssl_init_local(void)
{
diff --git a/platform/linux-generic/odp_random_std.c b/platform/linux-generic/odp_random_std.c
index 3b7187af1..3afd049f4 100644
--- a/platform/linux-generic/odp_random_std.c
+++ b/platform/linux-generic/odp_random_std.c
@@ -7,7 +7,6 @@
#include <odp_posix_extensions.h>
#include <stdint.h>
#include <stdlib.h>
-#include <odp/api/random.h>
#include <odp/api/byteorder.h>
#include <odp/api/cpu.h>
#include <odp/api/debug.h>
@@ -20,11 +19,6 @@
ODP_STATIC_ASSERT(RAND_MAX >= UINT16_MAX, "RAND_MAX too small");
ODP_STATIC_ASSERT((RAND_MAX & (RAND_MAX + 1ULL)) == 0, "RAND_MAX not power of two - 1");
-odp_random_kind_t _odp_random_std_max_kind(void)
-{
- return ODP_RANDOM_BASIC;
-}
-
static int32_t _random_data(uint8_t *buf, uint32_t len, uint32_t *seed)
{
union {
@@ -57,11 +51,8 @@ int32_t _odp_random_std_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
static __thread uint32_t this_seed;
-int32_t _odp_random_std_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
+int32_t _odp_random_std_data(uint8_t *buf, uint32_t len)
{
- if (kind != ODP_RANDOM_BASIC)
- return -1;
-
return _random_data(buf, len, &this_seed);
}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index f86914722..0761def57 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -36,6 +36,7 @@
#include <odp_libconfig_internal.h>
#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
+#include <odp_event_internal.h>
#include <string.h>
@@ -122,7 +123,7 @@ ODP_STATIC_ASSERT((8 * sizeof(prio_q_mask_t)) >= MAX_SPREAD,
/* Storage for stashed enqueue operation arguments */
typedef struct {
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *event_hdr[QUEUE_MULTI_MAX];
odp_queue_t queue;
int num;
} ordered_stash_t;
@@ -902,15 +903,15 @@ static inline void ordered_stash_release(void)
for (i = 0; i < sched_local.ordered.stash_num; i++) {
odp_queue_t queue;
- odp_buffer_hdr_t **buf_hdr;
+ _odp_event_hdr_t **event_hdr;
int num, num_enq;
queue = sched_local.ordered.stash[i].queue;
- buf_hdr = sched_local.ordered.stash[i].buf_hdr;
+ event_hdr = sched_local.ordered.stash[i].event_hdr;
num = sched_local.ordered.stash[i].num;
num_enq = odp_queue_enq_multi(queue,
- (odp_event_t *)buf_hdr, num);
+ (odp_event_t *)event_hdr, num);
/* Drop packets that were not enqueued */
if (odp_unlikely(num_enq < num)) {
@@ -918,7 +919,7 @@ static inline void ordered_stash_release(void)
num_enq = 0;
ODP_DBG("Dropped %i packets\n", num - num_enq);
- _odp_buffer_free_multi(&buf_hdr[num_enq], num - num_enq);
+ _odp_event_free_multi(&event_hdr[num_enq], num - num_enq);
}
}
sched_local.ordered.stash_num = 0;
@@ -1076,7 +1077,7 @@ static inline int copy_from_stash(odp_event_t out_ev[], unsigned int max)
return i;
}
-static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *buf_hdr[],
+static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *event_hdr[],
int num, int *ret)
{
int i;
@@ -1123,7 +1124,7 @@ static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *buf_hdr[],
sched_local.ordered.stash[stash_num].queue = dst_queue;
sched_local.ordered.stash[stash_num].num = num;
for (i = 0; i < num; i++)
- sched_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
+ sched_local.ordered.stash[stash_num].event_hdr[i] = event_hdr[i];
sched_local.ordered.stash_num++;
@@ -1140,12 +1141,12 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
odp_event_t ev_tbl[], int max_num)
{
int pktio_index, pktin_index, num, num_pktin;
- odp_buffer_hdr_t **hdr_tbl;
+ _odp_event_hdr_t **hdr_tbl;
int ret;
void *q_int;
- odp_buffer_hdr_t *b_hdr[CONFIG_BURST_SIZE];
+ _odp_event_hdr_t *b_hdr[CONFIG_BURST_SIZE];
- hdr_tbl = (odp_buffer_hdr_t **)ev_tbl;
+ hdr_tbl = (_odp_event_hdr_t **)ev_tbl;
if (!direct_recv) {
hdr_tbl = b_hdr;
@@ -1194,7 +1195,7 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
num_enq = 0;
ODP_DBG("Dropped %i packets\n", num - num_enq);
- _odp_buffer_free_multi(&b_hdr[num_enq], num - num_enq);
+ _odp_event_free_multi(&b_hdr[num_enq], num - num_enq);
}
return ret;
@@ -1988,6 +1989,7 @@ static void schedule_print(void)
ring_u32_t *ring;
odp_schedule_capability_t capa;
int num_spread = sched->config.num_spread;
+ const int col_width = 24;
(void)schedule_capability(&capa);
@@ -2039,14 +2041,14 @@ static void schedule_print(void)
}
}
- ODP_PRINT("\n Number of threads:\n");
- ODP_PRINT(" spread\n");
+ ODP_PRINT("\n Number of threads per schedule group:\n");
+ ODP_PRINT(" name spread\n");
for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
if (sched->sched_grp[grp].allocated == 0)
continue;
- ODP_PRINT(" group %i:", grp);
+ ODP_PRINT(" group %i: %-*s", grp, col_width, sched->sched_grp[grp].name);
for (spr = 0; spr < num_spread; spr++)
ODP_PRINT(" %u", sched->sched_grp[grp].spread_thrs[spr]);
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 10f456eed..5343c2834 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -30,6 +30,7 @@
#include <odp_queue_scalable_internal.h>
#include <odp_schedule_if.h>
#include <odp_bitset.h>
+#include <odp_event_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_timer_internal.h>
@@ -828,12 +829,11 @@ events_dequeued:
}
if (num_rx > num) {
/* Events remain, enqueue them */
- odp_buffer_hdr_t *bufs[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *events[QUEUE_MULTI_MAX];
for (i = num; i < num_rx; i++)
- bufs[i] =
- (odp_buffer_hdr_t *)(void *)rx_evts[i];
- i = _odp_queue_enq_sp(elem, &bufs[num], num_rx - num);
+ events[i] = _odp_event_hdr(rx_evts[i]);
+ i = _odp_queue_enq_sp(elem, &events[num], num_rx - num);
/* Enqueue must succeed as the queue was empty */
ODP_ASSERT(i == num_rx - num);
}
@@ -2095,7 +2095,7 @@ static int sched_queue(uint32_t queue_index)
return 0;
}
-static int ord_enq_multi(odp_queue_t handle, void *buf_hdr[], int num,
+static int ord_enq_multi(odp_queue_t handle, void *event_hdr[], int num,
int *ret)
{
@@ -2107,7 +2107,7 @@ static int ord_enq_multi(odp_queue_t handle, void *buf_hdr[], int num,
queue = qentry_from_int(handle);
if (ts && odp_unlikely(ts->out_of_order) &&
(queue->s.param.order == ODP_QUEUE_ORDER_KEEP)) {
- actual = _odp_rctx_save(queue, (odp_buffer_hdr_t **)buf_hdr, num);
+ actual = _odp_rctx_save(queue, (_odp_event_hdr_t **)event_hdr, num);
*ret = actual;
return 1;
}
diff --git a/platform/linux-generic/odp_schedule_scalable_ordered.c b/platform/linux-generic/odp_schedule_scalable_ordered.c
index 991be658e..5f1f8a405 100644
--- a/platform/linux-generic/odp_schedule_scalable_ordered.c
+++ b/platform/linux-generic/odp_schedule_scalable_ordered.c
@@ -9,9 +9,11 @@
#include <odp/api/shared_memory.h>
#include <odp/api/cpu.h>
#include <odp/api/plat/cpu_inlines.h>
+
+#include <odp_bitset.h>
+#include <odp_event_internal.h>
#include <odp_queue_scalable_internal.h>
#include <odp_schedule_if.h>
-#include <odp_bitset.h>
#include <string.h>
@@ -255,7 +257,7 @@ static void olock_release(const reorder_context_t *rctx)
olock_unlock(rctx, rwin, i);
}
-static void blocking_enqueue(queue_entry_t *q, odp_buffer_hdr_t **evts, int num)
+static void blocking_enqueue(queue_entry_t *q, _odp_event_hdr_t **evts, int num)
{
int actual;
@@ -315,7 +317,7 @@ void _odp_rctx_release(reorder_context_t *rctx)
/* Save destination queue and events in the reorder context for deferred
* enqueue.
*/
-int _odp_rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int _odp_rctx_save(queue_entry_t *queue, _odp_event_hdr_t *event_hdr[], int num)
{
int i;
sched_scalable_thread_state_t *ts;
@@ -361,7 +363,7 @@ int _odp_rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
/* The last rctx (so far) */
cur->next_idx = first->idx;
}
- cur->events[cur->numevts] = buf_hdr[i];
+ cur->events[cur->numevts] = event_hdr[i];
cur->destq[cur->numevts] = queue;
cur->numevts++;
}
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 470075cea..f5aba1a88 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -20,10 +20,12 @@
#include <odp/api/plat/time_inlines.h>
#include <odp/api/schedule.h>
#include <odp/api/shared_memory.h>
+
#include <odp_schedule_if.h>
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
#include <odp_config_internal.h>
+#include <odp_event_internal.h>
#include <odp_ring_u32_internal.h>
#include <odp_timer_internal.h>
#include <odp_queue_basic_internal.h>
@@ -600,7 +602,7 @@ static uint64_t schedule_wait_time(uint64_t ns)
}
static inline void enqueue_packets(odp_queue_t queue,
- odp_buffer_hdr_t *hdr_tbl[], int num_pkt)
+ _odp_event_hdr_t *hdr_tbl[], int num_pkt)
{
int num_enq, num_drop;
@@ -644,7 +646,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
cmd = sched_cmd();
if (cmd && cmd->s.type == CMD_PKTIO) {
- odp_buffer_hdr_t *hdr_tbl[CONFIG_BURST_SIZE];
+ _odp_event_hdr_t *hdr_tbl[CONFIG_BURST_SIZE];
int i;
int num_pkt = 0;
int max_num = CONFIG_BURST_SIZE;
diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c
index 966850a07..424d32ccf 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -1,11 +1,12 @@
-/* Copyright (c) 2019, Nokia
- * Copyright (c) 2013-2018, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
#include <odp/api/debug.h>
#include <odp/api/std_types.h>
#include <odp/api/shared_memory.h>
@@ -15,6 +16,10 @@
#include <odp_global_data.h>
#include <string.h>
+/* Supported ODP_SHM_* flags */
+#define SUPPORTED_SHM_FLAGS (ODP_SHM_SW_ONLY | ODP_SHM_PROC | ODP_SHM_SINGLE_VA | ODP_SHM_EXPORT | \
+ ODP_SHM_HP | ODP_SHM_NO_HP)
+
static inline uint32_t from_handle(odp_shm_t shm)
{
return _odp_typeval(shm) - 1;
@@ -47,6 +52,7 @@ int odp_shm_capability(odp_shm_capability_t *capa)
capa->max_blocks = CONFIG_SHM_BLOCKS;
capa->max_size = odp_global_ro.shm_max_size;
capa->max_align = 0;
+ capa->flags = SUPPORTED_SHM_FLAGS;
return 0;
}
@@ -56,6 +62,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
{
int block_index;
uint32_t flgs = 0; /* internal ishm flags */
+ uint32_t supported_flgs = SUPPORTED_SHM_FLAGS;
+
+ if (flags & ~supported_flgs) {
+ ODP_ERR("Unsupported SHM flag\n");
+ return ODP_SHM_INVALID;
+ }
flgs = get_ishm_flags(flags);
diff --git a/platform/linux-generic/odp_stash.c b/platform/linux-generic/odp_stash.c
index 57cf747f1..9dbc8cc26 100644
--- a/platform/linux-generic/odp_stash.c
+++ b/platform/linux-generic/odp_stash.c
@@ -20,6 +20,8 @@
#include <odp_ring_u32_internal.h>
#include <odp_ring_u64_internal.h>
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_STASHES < CONFIG_MAX_STASHES, "TOO_MANY_INTERNAL_STASHES");
+
#define MAX_RING_SIZE (1024 * 1024)
#define MIN_RING_SIZE 64
@@ -110,8 +112,8 @@ int odp_stash_capability(odp_stash_capability_t *capa, odp_stash_type_t type)
(void)type;
memset(capa, 0, sizeof(odp_stash_capability_t));
- capa->max_stashes_any_type = CONFIG_MAX_STASHES;
- capa->max_stashes = CONFIG_MAX_STASHES;
+ capa->max_stashes_any_type = CONFIG_MAX_STASHES - CONFIG_INTERNAL_STASHES;
+ capa->max_stashes = CONFIG_MAX_STASHES - CONFIG_INTERNAL_STASHES;
capa->max_num_obj = MAX_RING_SIZE;
capa->max_obj_size = sizeof(uint64_t);
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index 778ea08cb..1e4f775e1 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -24,9 +24,9 @@
#include <odp_config_internal.h>
#include <odp/api/align.h>
#include <odp/api/cpu.h>
+
#include <errno.h>
#include <pthread.h>
-#include <sched.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
@@ -276,6 +276,21 @@ static uint64_t read_cpufreq(const char *filename, int id)
return ret;
}
+static inline uint64_t cpu_hz_current(int id)
+{
+ uint64_t cur_hz = read_cpufreq("cpuinfo_cur_freq", id);
+
+ if (!cur_hz)
+ cur_hz = odp_cpu_arch_hz_current(id);
+
+ return cur_hz;
+}
+
+static inline uint64_t cpu_hz_static(int id)
+{
+ return odp_global_ro.system_info.cpu_hz[id];
+}
+
/*
* Analysis of /sys/devices/system/cpu/ files
*/
@@ -337,6 +352,13 @@ static int read_config_file(void)
}
odp_global_ro.system_info.default_cpu_hz_max = (uint64_t)val * 1000000;
+ str = "system.cpu_hz_static";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.cpu_hz_static = !!val;
+
return 0;
}
@@ -364,6 +386,10 @@ int _odp_system_info_init(void)
"CPU IDs. Increase CONFIG_NUM_CPU_IDS value.\n",
num_cpus);
+ /* Read and save all CPU frequencies for static mode */
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++)
+ odp_global_ro.system_info.cpu_hz[i] = cpu_hz_current(i);
+
/* By default, read max frequency from a cpufreq file */
for (i = 0; i < CONFIG_NUM_CPU_IDS; i++) {
uint64_t cpu_hz_max = read_cpufreq("cpuinfo_max_freq", i);
@@ -407,26 +433,22 @@ int _odp_system_info_term(void)
* Public access functions
*************************
*/
-uint64_t odp_cpu_hz_current(int id)
-{
- uint64_t cur_hz = read_cpufreq("cpuinfo_cur_freq", id);
-
- if (!cur_hz)
- cur_hz = odp_cpu_arch_hz_current(id);
-
- return cur_hz;
-}
-
uint64_t odp_cpu_hz(void)
{
- int id = sched_getcpu();
+ int id = odp_cpu_id();
- return odp_cpu_hz_current(id);
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
}
uint64_t odp_cpu_hz_id(int id)
{
- return odp_cpu_hz_current(id);
+ ODP_ASSERT(id >= 0 && id < CONFIG_NUM_CPU_IDS);
+
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
}
uint64_t odp_cpu_hz_max(void)
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index a28d31245..cd98fcc7d 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -51,6 +51,7 @@
#include <odp_timer_internal.h>
#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
+#include <odp_event_internal.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -92,8 +93,8 @@ tick_buf_s {
#endif
union {
- /* ODP_BUFFER_INVALID if timer not active */
- odp_buffer_t tmo_buf;
+ /* ODP_EVENT_INVALID if timer not active */
+ odp_event_t tmo_event;
/* Ensures that tick_buf_t is 128 bits */
uint64_t tmo_u64;
@@ -192,7 +193,7 @@ static void timer_init(_odp_timer_t *tim, tick_buf_t *tb, odp_queue_t _q, const
tim->queue = _q;
tim->user_ptr = _up;
tb->tmo_u64 = 0;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Release the timer by setting timer state to inactive */
#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
@@ -206,7 +207,7 @@ static void timer_init(_odp_timer_t *tim, tick_buf_t *tb, odp_queue_t _q, const
static void timer_fini(_odp_timer_t *tim, tick_buf_t *tb)
{
ODP_ASSERT(tb->exp_tck.v == TMO_UNUSED);
- ODP_ASSERT(tb->tmo_buf == ODP_BUFFER_INVALID);
+ ODP_ASSERT(tb->tmo_event == ODP_EVENT_INVALID);
tim->queue = ODP_QUEUE_INVALID;
tim->user_ptr = NULL;
}
@@ -266,16 +267,14 @@ static inline odp_timer_t tp_idx_to_handle(timer_pool_t *tp,
(idx + 1));
}
-static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf)
+static inline odp_timeout_hdr_t *timeout_hdr_from_event(odp_event_t event)
{
- return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf);
+ return (odp_timeout_hdr_t *)(uintptr_t)event;
}
-static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo)
+static inline odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo)
{
- odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo));
-
- return timeout_hdr_from_buf(buf);
+ return (odp_timeout_hdr_t *)(uintptr_t)tmo;
}
static odp_timer_pool_t timer_pool_new(const char *name,
@@ -286,7 +285,7 @@ static odp_timer_pool_t timer_pool_new(const char *name,
size_t sz0, sz1, sz2;
uint64_t tp_size;
uint64_t res_ns, nsec_per_scan;
- uint32_t flags = ODP_SHM_SW_ONLY;
+ uint32_t flags = 0;
if (odp_global_ro.shm_single_va)
flags |= ODP_SHM_SINGLE_VA;
@@ -376,7 +375,7 @@ static odp_timer_pool_t timer_pool_new(const char *name,
#else
odp_atomic_init_u64(&tp->tick_buf[i].exp_tck, TMO_UNUSED);
#endif
- tp->tick_buf[i].tmo_buf = ODP_BUFFER_INVALID;
+ tp->tick_buf[i].tmo_event = ODP_EVENT_INVALID;
}
tp->tp_idx = tp_idx;
odp_spinlock_init(&tp->lock);
@@ -517,16 +516,15 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp, odp_queue_t queue, const
return hdl;
}
-static odp_buffer_t timer_set_unused(timer_pool_t *tp,
- uint32_t idx);
+static odp_event_t timer_set_unused(timer_pool_t *tp, uint32_t idx);
-static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx)
+static inline odp_event_t timer_free(timer_pool_t *tp, uint32_t idx)
{
_odp_timer_t *tim = &tp->timers[idx];
/* Free the timer by setting timer state to unused and
- * grab any timeout buffer */
- odp_buffer_t old_buf = timer_set_unused(tp, idx);
+ * grab any timeout event */
+ odp_event_t old_event = timer_set_unused(tp, idx);
/* Remove timer from queue */
_odp_queue_fn->timer_rem(tim->queue);
@@ -542,7 +540,7 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx)
tp->num_alloc--;
odp_spinlock_unlock(&tp->lock);
- return old_buf;
+ return old_event;
}
/******************************************************************************
@@ -550,36 +548,36 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx)
* expire/reset/cancel timer
*****************************************************************************/
-static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
+static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_event_t *tmo_event,
timer_pool_t *tp)
{
bool success = true;
tick_buf_t *tb = &tp->tick_buf[idx];
- if (tmo_buf == NULL || *tmo_buf == ODP_BUFFER_INVALID) {
+ if (tmo_event == NULL || *tmo_event == ODP_EVENT_INVALID) {
#ifdef ODP_ATOMIC_U128 /* Target supports 128-bit atomic operations */
tick_buf_t new, old;
- /* Init all bits, also when tmo_buf is less than 64 bits */
+ /* Init all bits, also when tmo_event is less than 64 bits */
new.tmo_u64 = 0;
old.tmo_u64 = 0;
do {
/* Relaxed and non-atomic read of current values */
old.exp_tck.v = tb->exp_tck.v;
- old.tmo_buf = tb->tmo_buf;
+ old.tmo_event = tb->tmo_event;
- /* Check if there actually is a timeout buffer
+ /* Check if there actually is a timeout event
* present */
- if (old.tmo_buf == ODP_BUFFER_INVALID) {
+ if (old.tmo_event == ODP_EVENT_INVALID) {
/* Cannot reset a timer with neither old nor
- * new timeout buffer */
+ * new timeout event */
success = false;
break;
}
/* Set up new values */
new.exp_tck.v = abs_tck;
- new.tmo_buf = old.tmo_buf;
+ new.tmo_event = old.tmo_event;
/* Atomic CAS will fail if we experienced torn reads,
* retry update sequence until CAS succeeds */
@@ -598,7 +596,7 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
if ((old & TMO_INACTIVE) != 0) {
/* Timer was inactive (cancelled or expired),
- * we can't reset a timer without a timeout buffer.
+ * we can't reset a timer without a timeout event.
* Attempt to restore inactive state, we don't
* want this timer to continue as active without
* timeout as this will trigger unnecessary and
@@ -617,13 +615,13 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
odp_cpu_pause();
- /* Only if there is a timeout buffer can be reset the timer */
- if (odp_likely(tb->tmo_buf != ODP_BUFFER_INVALID)) {
+ /* Only if there is a timeout event can the timer be reset */
+ if (odp_likely(tb->tmo_event != ODP_EVENT_INVALID)) {
/* Write the new expiration tick */
tb->exp_tck.v = abs_tck;
} else {
/* Cannot reset a timer with neither old nor new
- * timeout buffer */
+ * timeout event */
success = false;
}
@@ -631,35 +629,34 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
_odp_atomic_flag_clear(IDX2LOCK(idx));
#endif
} else {
- /* We have a new timeout buffer which replaces any old one */
+ /* We have a new timeout event which replaces any old one */
/* Fill in some (constant) header fields for timeout events */
- if (odp_event_type(odp_buffer_to_event(*tmo_buf)) ==
- ODP_EVENT_TIMEOUT) {
- /* Convert from buffer to timeout hdr */
+ if (odp_event_type(*tmo_event) == ODP_EVENT_TIMEOUT) {
+ /* Convert from event to timeout hdr */
odp_timeout_hdr_t *tmo_hdr =
- timeout_hdr_from_buf(*tmo_buf);
+ timeout_hdr_from_event(*tmo_event);
tmo_hdr->timer = tp_idx_to_handle(tp, idx);
tmo_hdr->user_ptr = tp->timers[idx].user_ptr;
/* expiration field filled in when timer expires */
}
- /* Else ignore buffers of other types */
- odp_buffer_t old_buf = ODP_BUFFER_INVALID;
+ /* Else ignore events of other types */
+ odp_event_t old_event = ODP_EVENT_INVALID;
#ifdef ODP_ATOMIC_U128
tick_buf_t new, old;
- /* Init all bits, also when tmo_buf is less than 64 bits */
+ /* Init all bits, also when tmo_event is less than 64 bits */
new.tmo_u64 = 0;
new.exp_tck.v = abs_tck;
- new.tmo_buf = *tmo_buf;
+ new.tmo_event = *tmo_event;
- /* We are releasing the new timeout buffer to some other
+ /* We are releasing the new timeout event to some other
* thread */
_odp_atomic_u128_xchg_mm((_odp_atomic_u128_t *)tb,
(_uint128_t *)&new,
(_uint128_t *)&old,
_ODP_MEMMODEL_ACQ_RLS);
- old_buf = old.tmo_buf;
+ old_event = old.tmo_event;
#else
/* Take a related lock */
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
@@ -667,9 +664,9 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
odp_cpu_pause();
- /* Swap in new buffer, save any old buffer */
- old_buf = tb->tmo_buf;
- tb->tmo_buf = *tmo_buf;
+ /* Swap in new event, save any old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = *tmo_event;
/* Write the new expiration tick */
tb->exp_tck.v = abs_tck;
@@ -677,33 +674,32 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_buffer_t *tmo_buf,
/* Release the lock */
_odp_atomic_flag_clear(IDX2LOCK(idx));
#endif
- /* Return old timeout buffer */
- *tmo_buf = old_buf;
+ /* Return old timeout event */
+ *tmo_event = old_event;
}
return success;
}
-static odp_buffer_t timer_set_unused(timer_pool_t *tp,
- uint32_t idx)
+static odp_event_t timer_set_unused(timer_pool_t *tp, uint32_t idx)
{
tick_buf_t *tb = &tp->tick_buf[idx];
- odp_buffer_t old_buf;
+ odp_event_t old_event;
#ifdef ODP_ATOMIC_U128
tick_buf_t new, old;
- /* Init all bits, also when tmo_buf is less than 64 bits */
+ /* Init all bits, also when tmo_event is less than 64 bits */
new.tmo_u64 = 0;
/* Update the timer state (e.g. cancel the current timeout) */
new.exp_tck.v = TMO_UNUSED;
- /* Swap out the old buffer */
- new.tmo_buf = ODP_BUFFER_INVALID;
+ /* Swap out the old event */
+ new.tmo_event = ODP_EVENT_INVALID;
_odp_atomic_u128_xchg_mm((_odp_atomic_u128_t *)tb,
(_uint128_t *)&new, (_uint128_t *)&old,
_ODP_MEMMODEL_RLX);
- old_buf = old.tmo_buf;
+ old_event = old.tmo_event;
#else
/* Take a related lock */
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
@@ -714,44 +710,43 @@ static odp_buffer_t timer_set_unused(timer_pool_t *tp,
/* Update the timer state (e.g. cancel the current timeout) */
tb->exp_tck.v = TMO_UNUSED;
- /* Swap out the old buffer */
- old_buf = tb->tmo_buf;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ /* Swap out the old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Release the lock */
_odp_atomic_flag_clear(IDX2LOCK(idx));
#endif
- /* Return the old buffer */
- return old_buf;
+ /* Return the old event */
+ return old_event;
}
-static odp_buffer_t timer_cancel(timer_pool_t *tp,
- uint32_t idx)
+static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx)
{
tick_buf_t *tb = &tp->tick_buf[idx];
- odp_buffer_t old_buf;
+ odp_event_t old_event;
#ifdef ODP_ATOMIC_U128
tick_buf_t new, old;
- /* Init all bits, also when tmo_buf is less than 64 bits */
+ /* Init all bits, also when tmo_event is less than 64 bits */
new.tmo_u64 = 0;
old.tmo_u64 = 0;
do {
/* Relaxed and non-atomic read of current values */
old.exp_tck.v = tb->exp_tck.v;
- old.tmo_buf = tb->tmo_buf;
+ old.tmo_event = tb->tmo_event;
/* Check if it is not expired already */
if (old.exp_tck.v & TMO_INACTIVE) {
- old.tmo_buf = ODP_BUFFER_INVALID;
+ old.tmo_event = ODP_EVENT_INVALID;
break;
}
/* Set up new values */
new.exp_tck.v = TMO_INACTIVE;
- new.tmo_buf = ODP_BUFFER_INVALID;
+ new.tmo_event = ODP_EVENT_INVALID;
/* Atomic CAS will fail if we experienced torn reads,
* retry update sequence until CAS succeeds */
@@ -760,7 +755,7 @@ static odp_buffer_t timer_cancel(timer_pool_t *tp,
(_uint128_t *)&new,
_ODP_MEMMODEL_RLS,
_ODP_MEMMODEL_RLX));
- old_buf = old.tmo_buf;
+ old_event = old.tmo_event;
#else
/* Take a related lock */
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
@@ -768,56 +763,56 @@ static odp_buffer_t timer_cancel(timer_pool_t *tp,
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
odp_cpu_pause();
- /* Swap in new buffer, save any old buffer */
- old_buf = tb->tmo_buf;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ /* Swap in new event, save any old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Write the new expiration tick if it not cancelled */
if (tb->exp_tck.v & TMO_INACTIVE)
- old_buf = ODP_BUFFER_INVALID;
+ old_event = ODP_EVENT_INVALID;
else
tb->exp_tck.v = TMO_INACTIVE;
/* Release the lock */
_odp_atomic_flag_clear(IDX2LOCK(idx));
#endif
- /* Return the old buffer */
- return old_buf;
+ /* Return the old event */
+ return old_event;
}
static inline void timer_expire(timer_pool_t *tp, uint32_t idx, uint64_t tick)
{
_odp_timer_t *tim = &tp->timers[idx];
tick_buf_t *tb = &tp->tick_buf[idx];
- odp_buffer_t tmo_buf = ODP_BUFFER_INVALID;
+ odp_event_t tmo_event = ODP_EVENT_INVALID;
uint64_t exp_tck;
#ifdef ODP_ATOMIC_U128
/* Atomic re-read for correctness */
exp_tck = odp_atomic_load_u64(&tb->exp_tck);
/* Re-check exp_tck */
if (odp_likely(exp_tck <= tick)) {
- /* Attempt to grab timeout buffer, replace with inactive timer
- * and invalid buffer */
+ /* Attempt to grab timeout event, replace with inactive timer
+ * and invalid event. */
tick_buf_t new, old;
- /* Init all bits, also when tmo_buf is less than 64 bits */
+ /* Init all bits, also when tmo_event is less than 64 bits. */
new.tmo_u64 = 0;
old.tmo_u64 = 0;
old.exp_tck.v = exp_tck;
- old.tmo_buf = tb->tmo_buf;
+ old.tmo_event = tb->tmo_event;
/* Set the inactive/expired bit keeping the expiration tick so
* that we can check against the expiration tick of the timeout
* when it is received */
new.exp_tck.v = exp_tck | TMO_INACTIVE;
- new.tmo_buf = ODP_BUFFER_INVALID;
+ new.tmo_event = ODP_EVENT_INVALID;
int succ = _odp_atomic_u128_cmp_xchg_mm((_odp_atomic_u128_t *)tb,
(_uint128_t *)&old, (_uint128_t *)&new,
_ODP_MEMMODEL_RLS, _ODP_MEMMODEL_RLX);
if (succ)
- tmo_buf = old.tmo_buf;
+ tmo_event = old.tmo_event;
/* Else CAS failed, something changed => skip timer
* this tick, it will be checked again next tick */
}
@@ -831,41 +826,40 @@ static inline void timer_expire(timer_pool_t *tp, uint32_t idx, uint64_t tick)
/* Proper check for timer expired */
exp_tck = tb->exp_tck.v;
if (odp_likely(exp_tck <= tick)) {
- /* Verify that there is a timeout buffer */
- if (odp_likely(tb->tmo_buf != ODP_BUFFER_INVALID)) {
- /* Grab timeout buffer, replace with inactive timer
- * and invalid buffer */
- tmo_buf = tb->tmo_buf;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ /* Verify that there is a timeout event */
+ if (odp_likely(tb->tmo_event != ODP_EVENT_INVALID)) {
+ /* Grab timeout event, replace with inactive timer
+ * and invalid event. */
+ tmo_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Set the inactive/expired bit keeping the expiration
* tick so that we can check against the expiration
* tick of the timeout when it is received */
tb->exp_tck.v |= TMO_INACTIVE;
}
- /* Else somehow active timer without user buffer */
+ /* Else somehow active timer without user event */
}
/* Else false positive, ignore */
/* Release the lock */
_odp_atomic_flag_clear(IDX2LOCK(idx));
#endif
- if (odp_likely(tmo_buf != ODP_BUFFER_INVALID)) {
+ if (odp_likely(tmo_event != ODP_EVENT_INVALID)) {
/* Fill in expiration tick for timeout events */
- if (odp_event_type(odp_buffer_to_event(tmo_buf)) ==
- ODP_EVENT_TIMEOUT) {
- /* Convert from buffer to timeout hdr */
+ if (odp_event_type(tmo_event) == ODP_EVENT_TIMEOUT) {
+ /* Convert from event to timeout hdr */
odp_timeout_hdr_t *tmo_hdr =
- timeout_hdr_from_buf(tmo_buf);
+ timeout_hdr_from_event(tmo_event);
tmo_hdr->expiration = exp_tck;
/* timer and user_ptr fields filled in when timer
* was set */
}
/* Else ignore events of other types */
/* Post the timeout to the destination queue */
- int rc = odp_queue_enq(tim->queue,
- odp_buffer_to_event(tmo_buf));
+ int rc = odp_queue_enq(tim->queue, tmo_event);
+
if (odp_unlikely(rc != 0)) {
- odp_buffer_free(tmo_buf);
- ODP_ABORT("Failed to enqueue timeout buffer (%d)\n",
+ _odp_event_free(tmo_event);
+ ODP_ABORT("Failed to enqueue timeout event (%d)\n",
rc);
}
}
@@ -1270,6 +1264,12 @@ int odp_timer_res_capability(odp_timer_clk_src_t clk_src,
return 0;
}
+void odp_timer_pool_param_init(odp_timer_pool_param_t *param)
+{
+ memset(param, 0, sizeof(odp_timer_pool_param_t));
+ param->clk_src = ODP_CLOCK_DEFAULT;
+}
+
odp_timer_pool_t odp_timer_pool_create(const char *name,
const odp_timer_pool_param_t *param)
{
@@ -1384,9 +1384,8 @@ odp_event_t odp_timer_free(odp_timer_t hdl)
{
timer_pool_t *tp = handle_to_tp(hdl);
uint32_t idx = handle_to_idx(hdl, tp);
- odp_buffer_t old_buf = timer_free(tp, idx);
- return odp_buffer_to_event(old_buf);
+ return timer_free(tp, idx);
}
int odp_timer_set_abs(odp_timer_t hdl,
@@ -1401,7 +1400,7 @@ int odp_timer_set_abs(odp_timer_t hdl,
return ODP_TIMER_TOO_NEAR;
if (odp_unlikely(abs_tck > cur_tick + tp->max_rel_tck))
return ODP_TIMER_TOO_FAR;
- if (timer_reset(idx, abs_tck, (odp_buffer_t *)tmo_ev, tp))
+ if (timer_reset(idx, abs_tck, tmo_ev, tp))
return ODP_TIMER_SUCCESS;
else
return ODP_TIMER_FAIL;
@@ -1420,7 +1419,7 @@ int odp_timer_set_rel(odp_timer_t hdl,
return ODP_TIMER_TOO_NEAR;
if (odp_unlikely(rel_tck > tp->max_rel_tck))
return ODP_TIMER_TOO_FAR;
- if (timer_reset(idx, abs_tck, (odp_buffer_t *)tmo_ev, tp))
+ if (timer_reset(idx, abs_tck, tmo_ev, tp))
return ODP_TIMER_SUCCESS;
else
return ODP_TIMER_FAIL;
@@ -1431,10 +1430,10 @@ int odp_timer_cancel(odp_timer_t hdl, odp_event_t *tmo_ev)
timer_pool_t *tp = handle_to_tp(hdl);
uint32_t idx = handle_to_idx(hdl, tp);
/* Set the expiration tick of the timer to TMO_INACTIVE */
- odp_buffer_t old_buf = timer_cancel(tp, idx);
+ odp_event_t old_event = timer_cancel(tp, idx);
- if (old_buf != ODP_BUFFER_INVALID) {
- *tmo_ev = odp_buffer_to_event(old_buf);
+ if (old_event != ODP_EVENT_INVALID) {
+ *tmo_ev = old_event;
return 0; /* Active timer cancelled, timeout returned */
} else {
return -1; /* Timer already expired, no timeout returned */
@@ -1498,9 +1497,8 @@ void *odp_timeout_user_ptr(odp_timeout_t tmo)
odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
{
- odp_timeout_t tmo;
+ odp_event_t event;
pool_t *pool;
- int ret;
ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
@@ -1508,17 +1506,16 @@ odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
- ret = _odp_buffer_alloc_multi(pool, (odp_buffer_hdr_t **)&tmo, 1);
-
- if (odp_likely(ret == 1))
- return tmo;
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_TIMEOUT_INVALID;
- return ODP_TIMEOUT_INVALID;
+ return odp_timeout_from_event(event);
}
void odp_timeout_free(odp_timeout_t tmo)
{
- _odp_buffer_free_multi((odp_buffer_hdr_t **)&tmo, 1);
+ _odp_event_free(odp_timeout_to_event(tmo));
}
void odp_timer_pool_print(odp_timer_pool_t timer_pool)
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index e741fd80c..e9e6822a1 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -33,6 +33,7 @@
#include <odp_errno_define.h>
#include <odp_global_data.h>
#include <odp_schedule_if.h>
+#include <odp_event_internal.h>
/* Local vars */
static const
@@ -166,19 +167,19 @@ static inline tm_node_obj_t *tm_nobj_from_index(uint32_t node_id)
return &tm_glb->node_obj.obj[node_id];
}
-static int queue_tm_reenq(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
+static int queue_tm_reenq(odp_queue_t queue, _odp_event_hdr_t *event_hdr)
{
odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE(odp_queue_context(queue));
- odp_packet_t pkt = packet_from_buf_hdr(buf_hdr);
+ odp_packet_t pkt = packet_from_event_hdr(event_hdr);
return odp_tm_enq(tm_queue, pkt);
}
-static int queue_tm_reenq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf[],
+static int queue_tm_reenq_multi(odp_queue_t queue, _odp_event_hdr_t *event[],
int num)
{
(void)queue;
- (void)buf;
+ (void)event;
(void)num;
ODP_ABORT("Invalid call to queue_tm_reenq_multi()\n");
return 0;
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 36c1ec06e..a0407ca62 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -305,7 +305,7 @@ static void pktmbuf_init(struct rte_mempool *mp, void *opaque_arg ODP_UNUSED,
void *buf_addr;
pkt_hdr = pkt_hdr_from_mbuf(m);
- buf_addr = pkt_hdr->buf_hdr.base_data - RTE_PKTMBUF_HEADROOM;
+ buf_addr = pkt_hdr->event_hdr.base_data - RTE_PKTMBUF_HEADROOM;
priv_size = rte_pktmbuf_priv_size(mp);
mbuf_size = sizeof(struct rte_mbuf);
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index b89252303..6a78415c5 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -787,7 +787,7 @@ static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
pool_t *pool;
pkt_hdr = packet_hdr(pkt);
- pool = pkt_hdr->buf_hdr.pool_ptr;
+ pool = pkt_hdr->event_hdr.pool_ptr;
if (pool->pool_idx != ipc_pool->pool_idx ||
odp_packet_has_ref(pkt)) {
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 666e7ce90..ae6403fe6 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -18,6 +18,7 @@
#include <odp_queue_if.h>
#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
+#include <odp_event_internal.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -97,7 +98,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_packet_t pkts[], int num)
{
int nbr, i;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
odp_queue_t queue;
odp_packet_hdr_t *pkt_hdr;
odp_packet_t pkt;
@@ -123,7 +124,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
for (i = 0; i < nbr; i++) {
uint32_t pkt_len;
- pkt = packet_from_buf_hdr(hdr_tbl[i]);
+ pkt = packet_from_event_hdr(hdr_tbl[i]);
pkt_len = odp_packet_len(pkt);
pkt_hdr = packet_hdr(pkt);
@@ -297,7 +298,7 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
const odp_packet_t pkt_tbl[], int num)
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
odp_queue_t queue;
int i;
int ret;
@@ -323,7 +324,7 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
}
break;
}
- hdr_tbl[i] = packet_to_buf_hdr(pkt_tbl[i]);
+ hdr_tbl[i] = packet_to_event_hdr(pkt_tbl[i]);
bytes += pkt_len;
/* Store cumulative byte counts to update 'stats.out_octets'
* correctly in case enq_multi() fails to enqueue all packets.
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
index 90709d86d..38da87617 100644
--- a/platform/linux-generic/test/inline-timer.conf
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.18"
+config_file_version = "0.1.19"
timer: {
# Enable inline timer implementation
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
index f9b39abf6..e3e4251cd 100644
--- a/platform/linux-generic/test/packet_align.conf
+++ b/platform/linux-generic/test/packet_align.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.18"
+config_file_version = "0.1.19"
pool: {
pkt: {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
index a4b5d3f39..7453ffa36 100644
--- a/platform/linux-generic/test/process-mode.conf
+++ b/platform/linux-generic/test/process-mode.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.18"
+config_file_version = "0.1.19"
# Shared memory options
shm: {
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
index 4ef0ab044..32abdb5b3 100644
--- a/platform/linux-generic/test/sched-basic.conf
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.18"
+config_file_version = "0.1.19"
# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
diff --git a/test/m4/configure.m4 b/test/m4/configure.m4
index 96a48832c..4335476ba 100644
--- a/test/m4/configure.m4
+++ b/test/m4/configure.m4
@@ -25,6 +25,7 @@ AC_CONFIG_FILES([test/common/Makefile
test/validation/api/comp/Makefile
test/validation/api/cpumask/Makefile
test/validation/api/crypto/Makefile
+ test/validation/api/dma/Makefile
test/validation/api/errno/Makefile
test/validation/api/event/Makefile
test/validation/api/hash/Makefile
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
index 0d70a025b..7952530a3 100644
--- a/test/performance/.gitignore
+++ b/test/performance/.gitignore
@@ -8,12 +8,14 @@ odp_crc
odp_crypto
odp_ipsec
odp_l2fwd
+odp_lock_perf
odp_mem_perf
odp_packet_gen
odp_pktio_ordered
odp_pktio_perf
odp_pool_perf
odp_queue_perf
+odp_random
odp_sched_latency
odp_sched_perf
odp_sched_pktio
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index 15c6f46e0..b6dbbd82e 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -8,16 +8,18 @@ EXECUTABLES = odp_atomic_perf \
odp_crc \
odp_crypto \
odp_ipsec \
+ odp_lock_perf \
odp_mem_perf \
odp_pktio_perf \
odp_pool_perf \
odp_queue_perf \
- odp_sched_perf
+ odp_random
COMPILE_ONLY = odp_l2fwd \
odp_packet_gen \
odp_pktio_ordered \
odp_sched_latency \
+ odp_sched_perf \
odp_sched_pktio \
odp_scheduling \
odp_timer_perf
@@ -25,6 +27,7 @@ COMPILE_ONLY = odp_l2fwd \
TESTSCRIPTS = odp_l2fwd_run.sh \
odp_packet_gen_run.sh \
odp_sched_latency_run.sh \
+ odp_sched_perf_run.sh \
odp_sched_pktio_run.sh \
odp_scheduling_run.sh \
odp_timer_perf_run.sh
@@ -47,6 +50,7 @@ odp_cpu_bench_SOURCES = odp_cpu_bench.c
odp_crc_SOURCES = odp_crc.c
odp_crypto_SOURCES = odp_crypto.c
odp_ipsec_SOURCES = odp_ipsec.c
+odp_lock_perf_SOURCES = odp_lock_perf.c
odp_mem_perf_SOURCES = odp_mem_perf.c
odp_packet_gen_SOURCES = odp_packet_gen.c
odp_pktio_ordered_SOURCES = odp_pktio_ordered.c dummy_crc.h
@@ -56,6 +60,7 @@ odp_scheduling_SOURCES = odp_scheduling.c
odp_pktio_perf_SOURCES = odp_pktio_perf.c
odp_pool_perf_SOURCES = odp_pool_perf.c
odp_queue_perf_SOURCES = odp_queue_perf.c
+odp_random_SOURCES = odp_random.c
odp_sched_perf_SOURCES = odp_sched_perf.c
odp_timer_perf_SOURCES = odp_timer_perf.c
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 6aa98ce96..85964d3b1 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -325,6 +325,70 @@ static inline int copy_packets(odp_packet_t *pkt_tbl, int pkts)
}
/*
+ * Return number of packets remaining in the pkt_tbl
+ */
+static int process_extra_features(odp_packet_t *pkt_tbl, int pkts, stats_t *stats)
+{
+ if (odp_unlikely(gbl_args->appl.extra_feat)) {
+ if (gbl_args->appl.packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
+ if (gbl_args->appl.chksum)
+ chksum_insert(pkt_tbl, pkts);
+
+ if (gbl_args->appl.error_check) {
+ int rx_drops;
+
+ /* Drop packets with errors */
+ rx_drops = drop_err_pkts(pkt_tbl, pkts);
+
+ if (odp_unlikely(rx_drops)) {
+ stats->s.rx_drops += rx_drops;
+ if (pkts == rx_drops)
+ return 0;
+
+ pkts -= rx_drops;
+ }
+ }
+ }
+ return pkts;
+}
+
+static void send_packets(odp_packet_t *pkt_tbl,
+ int pkts,
+ int use_event_queue,
+ odp_queue_t tx_queue,
+ odp_pktout_queue_t pktout_queue,
+ stats_t *stats)
+{
+ int sent;
+ unsigned int tx_drops;
+ int i;
+
+ if (odp_unlikely(use_event_queue))
+ sent = event_queue_send(tx_queue, pkt_tbl, pkts);
+ else
+ sent = odp_pktout_send(pktout_queue, pkt_tbl, pkts);
+
+ sent = odp_unlikely(sent < 0) ? 0 : sent;
+ tx_drops = pkts - sent;
+
+ if (odp_unlikely(tx_drops)) {
+ stats->s.tx_drops += tx_drops;
+
+ /* Drop rejected packets */
+ for (i = sent; i < pkts; i++)
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ stats->s.packets += pkts;
+}
+
+/*
* Packet IO worker thread using scheduled queues and vector mode.
*
* arg thread arguments of type 'thread_args_t *'
@@ -383,52 +447,35 @@ static int run_worker_sched_mode_vector(void *arg)
/* Loop packets */
while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
odp_event_t ev_tbl[MAX_PKT_BURST];
- int pktvs;
+ int events;
- pktvs = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
+ events = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
- if (pktvs <= 0)
+ if (events <= 0)
continue;
- for (i = 0; i < pktvs; i++) {
- odp_packet_vector_t pkt_vec;
+ for (i = 0; i < events; i++) {
+ odp_packet_vector_t pkt_vec = ODP_PACKET_VECTOR_INVALID;
odp_packet_t *pkt_tbl;
- unsigned int tx_drops;
+ odp_packet_t pkt;
int src_idx, dst_idx;
- int pkts, sent;
-
- ODPH_ASSERT(odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR);
- pkt_vec = odp_packet_vector_from_event(ev_tbl[i]);
- pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
-
- if (odp_unlikely(gbl_args->appl.extra_feat)) {
- if (gbl_args->appl.packet_copy) {
- int fails;
-
- fails = copy_packets(pkt_tbl, pkts);
- stats->s.copy_fails += fails;
- }
-
- if (gbl_args->appl.chksum)
- chksum_insert(pkt_tbl, pkts);
+ int pkts;
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops) {
- odp_packet_vector_free(pkt_vec);
- continue;
- }
+ if (odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev_tbl[i]);
+ pkt_tbl = &pkt;
+ pkts = 1;
+ } else {
+ ODPH_ASSERT(odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR);
+ pkt_vec = odp_packet_vector_from_event(ev_tbl[i]);
+ pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
+ }
- pkts -= rx_drops;
- odp_packet_vector_size_set(pkt_vec, pkts);
- }
- }
+ pkts = process_extra_features(pkt_tbl, pkts, stats);
+ if (odp_unlikely(pkts) == 0) {
+ if (pkt_vec != ODP_PACKET_VECTOR_INVALID)
+ odp_packet_vector_free(pkt_vec);
+ continue;
}
/* packets from the same queue are from the same interface */
@@ -437,31 +484,14 @@ static int run_worker_sched_mode_vector(void *arg)
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- if (odp_unlikely(use_event_queue)) {
- odp_event_t event = odp_packet_vector_to_event(pkt_vec);
-
- sent = odp_queue_enq(tx_queue[dst_idx], event);
- sent = odp_likely(sent == 0) ? pkts : 0;
- } else {
- sent = odp_pktout_send(pktout[dst_idx], pkt_tbl, pkts);
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- }
-
- tx_drops = pkts - sent;
- if (odp_unlikely(tx_drops)) {
- int j;
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue[dst_idx],
+ pktout[dst_idx],
+ stats);
- stats->s.tx_drops += tx_drops;
- /* Drop rejected packets */
- for (j = sent; j < pkts; j++)
- odp_packet_free(pkt_tbl[j]);
- }
-
- /* Free packet vector if sending failed or in direct mode. */
- if (tx_drops || !use_event_queue)
+ if (pkt_vec != ODP_PACKET_VECTOR_INVALID)
odp_packet_vector_free(pkt_vec);
-
- stats->s.packets += pkts;
}
}
@@ -567,8 +597,6 @@ static int run_worker_sched_mode(void *arg)
while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
odp_event_t ev_tbl[MAX_PKT_BURST];
odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int sent;
- unsigned tx_drops;
int src_idx;
pkts = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
@@ -578,32 +606,9 @@ static int run_worker_sched_mode(void *arg)
odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts);
- if (odp_unlikely(gbl_args->appl.extra_feat)) {
- if (gbl_args->appl.packet_copy) {
- int fails;
-
- fails = copy_packets(pkt_tbl, pkts);
- stats->s.copy_fails += fails;
- }
-
- if (gbl_args->appl.chksum)
- chksum_insert(pkt_tbl, pkts);
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
- }
+ pkts = process_extra_features(pkt_tbl, pkts, stats);
+ if (odp_unlikely(pkts) == 0)
+ continue;
/* packets from the same queue are from the same interface */
src_idx = odp_packet_input_index(pkt_tbl[0]);
@@ -611,24 +616,11 @@ static int run_worker_sched_mode(void *arg)
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue[dst_idx], pkt_tbl,
- pkts);
- else
- sent = odp_pktout_send(pktout[dst_idx], pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue[dst_idx],
+ pktout[dst_idx],
+ stats);
}
/*
@@ -707,8 +699,6 @@ static int run_worker_plain_queue_mode(void *arg)
/* Loop packets */
while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
- int sent;
- unsigned tx_drops;
odp_event_t event[MAX_PKT_BURST];
if (num_pktio > 1) {
@@ -729,54 +719,17 @@ static int run_worker_plain_queue_mode(void *arg)
odp_packet_from_event_multi(pkt_tbl, event, pkts);
- if (odp_unlikely(gbl_args->appl.extra_feat)) {
- if (gbl_args->appl.packet_copy) {
- int fails;
-
- fails = copy_packets(pkt_tbl, pkts);
- stats->s.copy_fails += fails;
- }
-
- if (gbl_args->appl.chksum)
- chksum_insert(pkt_tbl, pkts);
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
- }
+ pkts = process_extra_features(pkt_tbl, pkts, stats);
+ if (odp_unlikely(pkts) == 0)
+ continue;
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue, pkt_tbl, pkts);
- else
- sent = odp_pktout_send(pktout, pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- int i;
-
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue,
+ pktout,
+ stats);
}
/* Make sure that latest stat writes are visible to other threads */
@@ -842,9 +795,6 @@ static int run_worker_direct_mode(void *arg)
/* Loop packets */
while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
- int sent;
- unsigned tx_drops;
-
if (num_pktio > 1) {
dst_idx = thr_args->pktio[pktio].tx_idx;
pktin = thr_args->pktio[pktio].pktin;
@@ -861,54 +811,17 @@ static int run_worker_direct_mode(void *arg)
if (odp_unlikely(pkts <= 0))
continue;
- if (odp_unlikely(gbl_args->appl.extra_feat)) {
- if (gbl_args->appl.packet_copy) {
- int fails;
-
- fails = copy_packets(pkt_tbl, pkts);
- stats->s.copy_fails += fails;
- }
-
- if (gbl_args->appl.chksum)
- chksum_insert(pkt_tbl, pkts);
-
- if (gbl_args->appl.error_check) {
- int rx_drops;
-
- /* Drop packets with errors */
- rx_drops = drop_err_pkts(pkt_tbl, pkts);
-
- if (odp_unlikely(rx_drops)) {
- stats->s.rx_drops += rx_drops;
- if (pkts == rx_drops)
- continue;
-
- pkts -= rx_drops;
- }
- }
- }
+ pkts = process_extra_features(pkt_tbl, pkts, stats);
+ if (odp_unlikely(pkts) == 0)
+ continue;
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- if (odp_unlikely(use_event_queue))
- sent = event_queue_send(tx_queue, pkt_tbl, pkts);
- else
- sent = odp_pktout_send(pktout, pkt_tbl, pkts);
-
- sent = odp_unlikely(sent < 0) ? 0 : sent;
- tx_drops = pkts - sent;
-
- if (odp_unlikely(tx_drops)) {
- int i;
-
- stats->s.tx_drops += tx_drops;
-
- /* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
- }
-
- stats->s.packets += pkts;
+ send_packets(pkt_tbl, pkts,
+ use_event_queue,
+ tx_queue,
+ pktout,
+ stats);
}
/* Make sure that latest stat writes are visible to other threads */
diff --git a/test/performance/odp_lock_perf.c b/test/performance/odp_lock_perf.c
new file mode 100644
index 000000000..dfc814657
--- /dev/null
+++ b/test/performance/odp_lock_perf.c
@@ -0,0 +1,663 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/* Max number of workers if num_cpu=0 */
+#define DEFAULT_MAX_WORKERS 10
+
+/* Max number of counters */
+#define MAX_COUNTERS 8
+
+#define TEST_INFO(name, test, validate) { name, test, validate }
+
+typedef enum place_t {
+ PLACE_PACK,
+ PLACE_SEPARATE,
+ PLACE_ALL_SEPARATE,
+} place_t;
+
+/* Command line options */
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t type;
+ uint64_t num_round;
+ uint32_t num_counter;
+ place_t place;
+} test_options_t;
+
+/* command line options default values */
+static test_options_t test_options_def = {
+ .num_cpu = 0,
+ .type = 0,
+ .num_round = 100000,
+ .num_counter = 2,
+ .place = 2,
+};
+
+typedef struct test_global_t test_global_t;
+
+/* Test function template */
+typedef void (*test_fn_t)(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter);
+/* Test result validation function template */
+typedef int (*validate_fn_t)(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter);
+
+/* Worker thread context */
+typedef struct test_thread_ctx_t {
+ test_global_t *global;
+ test_fn_t func;
+ uint64_t nsec;
+ uint32_t idx;
+} test_thread_ctx_t;
+
+/* Global data */
+struct test_global_t {
+ test_options_t test_options;
+ uint32_t cur_type;
+ odp_barrier_t barrier;
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_thread_ctx_t thread_ctx[ODP_THREAD_COUNT_MAX];
+ struct {
+ struct ODP_ALIGNED_CACHE {
+ odp_spinlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } spinlock;
+ struct ODP_ALIGNED_CACHE {
+ odp_spinlock_recursive_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } spinlock_recursive;
+ struct ODP_ALIGNED_CACHE {
+ odp_rwlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } rwlock;
+ struct ODP_ALIGNED_CACHE {
+ odp_rwlock_recursive_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } rwlock_recursive;
+ struct ODP_ALIGNED_CACHE {
+ odp_ticketlock_t lock;
+ uint64_t counter[MAX_COUNTERS];
+ } ticketlock;
+ struct ODP_ALIGNED_CACHE {
+ uint64_t counter[MAX_COUNTERS];
+ } separate;
+ struct {
+ uint64_t ODP_ALIGNED_CACHE counter;
+ } all_separate[MAX_COUNTERS];
+ } item;
+};
+
+typedef struct {
+ const char *name;
+ test_fn_t test_fn;
+ validate_fn_t validate_fn;
+} test_case_t;
+
+static test_global_t *test_global;
+
+static inline void test_spinlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_spinlock_t *lock = &g->item.spinlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_spinlock_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_spinlock_unlock(lock);
+ }
+}
+
+static inline void test_spinlock_recursive(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_spinlock_recursive_t *lock = &g->item.spinlock_recursive.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_spinlock_recursive_lock(lock);
+ odp_spinlock_recursive_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_spinlock_recursive_unlock(lock);
+ odp_spinlock_recursive_unlock(lock);
+ }
+}
+
+static inline void test_rwlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_rwlock_t *lock = &g->item.rwlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_rwlock_write_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_rwlock_write_unlock(lock);
+ odp_rwlock_read_lock(lock);
+ for (uint32_t j = 1; j < num_counter; j++)
+ if (*counter[0] != *counter[j]) {
+ odp_rwlock_read_unlock(lock);
+ ODPH_ERR("Error: Counter mismatch\n");
+ return;
+ }
+ odp_rwlock_read_unlock(lock);
+ }
+}
+
+static inline void test_rwlock_recursive(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_rwlock_recursive_t *lock = &g->item.rwlock_recursive.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_rwlock_recursive_write_lock(lock);
+ odp_rwlock_recursive_write_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_rwlock_recursive_write_unlock(lock);
+ odp_rwlock_recursive_write_unlock(lock);
+ odp_rwlock_recursive_read_lock(lock);
+ odp_rwlock_recursive_read_lock(lock);
+ for (uint32_t j = 1; j < num_counter; j++)
+ if (*counter[0] != *counter[j]) {
+ odp_rwlock_recursive_read_unlock(lock);
+ odp_rwlock_recursive_read_unlock(lock);
+ ODPH_ERR("Error: Counter mismatch\n");
+ return;
+ }
+ odp_rwlock_recursive_read_unlock(lock);
+ odp_rwlock_recursive_read_unlock(lock);
+ }
+}
+
+static inline void test_ticketlock(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ odp_ticketlock_t *lock = &g->item.ticketlock.lock;
+
+ for (uint64_t i = 0; i < g->test_options.num_round; i++) {
+ odp_ticketlock_lock(lock);
+ for (uint32_t j = 0; j < num_counter; j++)
+ (*counter[j])++;
+ odp_ticketlock_unlock(lock);
+ }
+}
+
+static inline int validate_generic(test_global_t *g, uint64_t **counter,
+ uint32_t num_counter)
+{
+ uint64_t total = (uint64_t)g->test_options.num_cpu * g->test_options.num_round;
+
+ for (uint32_t i = 0; i < num_counter; i++)
+ if (*counter[i] != total)
+ return 1;
+
+ return 0;
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Lock performance test\n"
+ "\n"
+ "Usage: odp_lock_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs (or max %d) (default)\n"
+ " -t, --type Lock type to test. 0: all (default %u)\n"
+ " 1: odp_spinlock_t\n"
+ " 2: odp_spinlock_recursive_t\n"
+ " 3: odp_rwlock_t\n"
+ " 4: odp_rwlock_recursive_t\n"
+ " 5: odp_ticketlock_t\n"
+ " -r, --num_round Number of rounds (default %" PRIu64 ")\n"
+ " -o, --num_counter Number of counters (default %u)\n"
+ " -p, --place Counter placement (default %d)\n"
+ " 0: pack to same cache line with lock\n"
+ " 1: pack to separate cache line\n"
+ " 2: place each counter to separate cache line\n"
+ " -h, --help This help\n"
+ "\n",
+ DEFAULT_MAX_WORKERS, test_options_def.type,
+ test_options_def.num_round, test_options_def.num_counter,
+ test_options_def.place);
+}
+
+static void print_info(test_options_t *test_options)
+{
+ printf("\nLock performance test configuration:\n");
+ printf(" num cpu %u\n", test_options->num_cpu);
+ printf(" type %u\n", test_options->type);
+ printf(" num rounds %" PRIu64 "\n", test_options->num_round);
+ printf(" num counters %u\n", test_options->num_counter);
+ printf(" place %u\n", test_options->place);
+ printf("\n\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "num_cpu", required_argument, NULL, 'c' },
+ { "type", required_argument, NULL, 't' },
+ { "num_round", required_argument, NULL, 'r' },
+ { "num_counter", required_argument, NULL, 'o' },
+ { "place", required_argument, NULL, 'p' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+c:t:r:o:p:h";
+
+ *test_options = test_options_def;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 't':
+ test_options->type = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atoll(optarg);
+ break;
+ case 'o':
+ test_options->num_counter = atoi(optarg);
+ break;
+ case 'p':
+ test_options->place = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_round < 1) {
+ ODPH_ERR("Invalid number of test rounds: %" PRIu64 "\n",
+ test_options->num_round);
+ return -1;
+ }
+
+ if (test_options->num_counter < 1 ||
+ test_options->num_counter > MAX_COUNTERS) {
+ ODPH_ERR("Invalid number of counters: %" PRIu32 "\n",
+ test_options->num_counter);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret, max_num;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ max_num = num_cpu;
+ if (num_cpu == 0) {
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+ if (max_num > DEFAULT_MAX_WORKERS)
+ max_num = DEFAULT_MAX_WORKERS;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int init_test(test_global_t *g, const char *name)
+{
+ printf("TEST: %s\n", name);
+
+ memset(&g->item, 0, sizeof(g->item));
+ odp_spinlock_init(&g->item.spinlock.lock);
+ odp_spinlock_recursive_init(&g->item.spinlock_recursive.lock);
+ odp_rwlock_init(&g->item.rwlock.lock);
+ odp_rwlock_recursive_init(&g->item.rwlock_recursive.lock);
+ odp_ticketlock_init(&g->item.ticketlock.lock);
+
+ return 0;
+}
+
+static void fill_counter_ptrs(test_global_t *g, uint64_t **counter_out)
+{
+ test_options_t *test_options = &g->test_options;
+
+ memset(counter_out, 0, sizeof(uint64_t *) * MAX_COUNTERS);
+
+ switch (test_options->place) {
+ case PLACE_PACK:
+ for (uint32_t i = 0; i < test_options->num_counter; i++) {
+ switch (g->cur_type) {
+ case 0:
+ counter_out[i] = &g->item.spinlock.counter[i];
+ break;
+ case 1:
+ counter_out[i] = &g->item.spinlock_recursive.counter[i];
+ break;
+ case 2:
+ counter_out[i] = &g->item.rwlock.counter[i];
+ break;
+ case 3:
+ counter_out[i] = &g->item.rwlock_recursive.counter[i];
+ break;
+ case 4:
+ counter_out[i] = &g->item.ticketlock.counter[i];
+ break;
+ }
+ }
+ break;
+ case PLACE_SEPARATE:
+ for (uint32_t i = 0; i < test_options->num_counter; i++)
+ counter_out[i] = &g->item.separate.counter[i];
+ break;
+ case PLACE_ALL_SEPARATE:
+ for (uint32_t i = 0; i < test_options->num_counter; i++)
+ counter_out[i] = &g->item.all_separate[i].counter;
+ break;
+ }
+}
+
+static int run_test(void *arg)
+{
+ uint64_t nsec;
+ odp_time_t t1, t2;
+ test_thread_ctx_t *thread_ctx = arg;
+ test_global_t *global = thread_ctx->global;
+ test_options_t *test_options = &global->test_options;
+ test_fn_t test_func = thread_ctx->func;
+ uint64_t *counter[MAX_COUNTERS];
+
+ fill_counter_ptrs(global, counter);
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+ test_func(global, counter, test_options->num_counter);
+ t2 = odp_time_local();
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats */
+ thread_ctx->nsec = nsec;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance,
+ test_fn_t func)
+{
+ odph_thread_common_param_t param;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ odph_thread_common_param_init(&param);
+ param.instance = instance;
+ param.cpumask = &global->cpumask;
+
+ for (i = 0; i < num_cpu; i++) {
+ test_thread_ctx_t *thread_ctx = &global->thread_ctx[i];
+
+ thread_ctx->global = global;
+ thread_ctx->idx = i;
+ thread_ctx->func = func;
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].start = run_test;
+ thr_param[i].arg = thread_ctx;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &param, thr_param,
+ num_cpu);
+ if (ret != num_cpu) {
+ ODPH_ERR("Failed to create all threads %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_results(test_global_t *global, validate_fn_t validate)
+{
+ test_options_t *test_options = &global->test_options;
+ uint64_t *counter[MAX_COUNTERS];
+
+ fill_counter_ptrs(global, counter);
+
+ if (validate(global, counter, test_options->num_counter))
+ return -1;
+
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double nsec_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t num_round = test_options->num_round;
+ uint64_t nsec_sum = 0;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec_sum += global->thread_ctx[i].nsec;
+
+ if (nsec_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ nsec_ave = nsec_sum / num_cpu;
+ num = 0;
+
+ printf("------------------------------------------------\n");
+ printf("Per thread results (Millions of rounds per sec):\n");
+ printf("------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->thread_ctx[i].nsec) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%8.3f ", num_round / (global->thread_ctx[i].nsec / 1000.0));
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("Average results over %i threads:\n", num_cpu);
+ printf("------------------------------------------\n");
+ printf(" duration: %8.3f sec\n",
+ nsec_ave / ODP_TIME_SEC_IN_NS);
+ printf(" rounds per cpu: %8.3fM rounds/sec\n",
+ num_round / (nsec_ave / 1000.0));
+ printf(" total rounds: %8.3fM rounds/sec\n",
+ ((uint64_t)num_cpu * num_round) / (nsec_ave / 1000.0));
+ printf("\n\n");
+}
+
+/**
+ * Test functions
+ */
+static test_case_t test_suite[] = {
+ TEST_INFO("odp_spinlock", test_spinlock, validate_generic),
+ TEST_INFO("odp_spinlock_recursive", test_spinlock_recursive, validate_generic),
+ TEST_INFO("odp_rwlock", test_rwlock, validate_generic),
+ TEST_INFO("odp_rwlock_recursive", test_rwlock_recursive, validate_generic),
+ TEST_INFO("odp_ticketlock", test_ticketlock, validate_generic),
+};
+
+int main(int argc, char **argv)
+{
+ odph_helper_options_t helper_options;
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_options_t test_options;
+ int num_tests, i;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (parse_options(argc, argv, &test_options))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for global data from shared mem */
+ shm = odp_shm_reserve("test_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ test_global = odp_shm_addr(shm);
+ if (test_global == NULL) {
+ ODPH_ERR("Shared memory alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(test_global, 0, sizeof(test_global_t));
+ test_global->test_options = test_options;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(test_global))
+ exit(EXIT_FAILURE);
+
+ print_info(&test_global->test_options);
+
+ /* Loop all test cases */
+ num_tests = sizeof(test_suite) / sizeof(test_suite[0]);
+
+ for (i = 0; i < num_tests; i++) {
+ if (test_options.type && test_options.type != (uint32_t)i + 1)
+ continue;
+
+ test_global->cur_type = i;
+
+ /* Initialize test variables */
+ if (init_test(test_global, test_suite[i].name)) {
+ ODPH_ERR("Failed to initialize test.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Start workers */
+ if (start_workers(test_global, instance, test_suite[i].test_fn))
+ exit(EXIT_FAILURE);
+
+ /* Wait workers to exit */
+ odph_thread_join(test_global->thread_tbl,
+ test_global->test_options.num_cpu);
+
+ print_stat(test_global);
+
+ /* Validate test results */
+ if (validate_results(test_global, test_suite[i].validate_fn)) {
+ ODPH_ERR("Test %s result validation failed.\n",
+ test_suite[i].name);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shm free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_random.c b/test/performance/odp_random.c
new file mode 100644
index 000000000..7083349ff
--- /dev/null
+++ b/test/performance/odp_random.c
@@ -0,0 +1,377 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MB (1024ull * 1024ull)
+
+/* Command line options */
+typedef struct {
+ int num_threads;
+ uint32_t size;
+ uint32_t rounds;
+} options_t;
+
+static options_t options;
+static const options_t options_def = {
+ .num_threads = 1,
+ .size = 256,
+ .rounds = 100000,
+};
+
+static void print_usage(void)
+{
+ printf("\n"
+ "random data performance test\n"
+ "\n"
+ "Usage: odp_random [options]\n"
+ "\n"
+ " -t, --threads Number of worker threads (default %u)\n"
+ " -s, --size Size of buffer in bytes (default %u)\n"
+ " -r, --rounds Number of test rounds (default %u)\n"
+ " Rounded down to nearest multiple of 8\n"
+ " -h, --help This help\n"
+ "\n",
+ options_def.num_threads, options_def.size, options_def.rounds);
+}
+
+static int parse_options(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ { "threads", required_argument, NULL, 't' },
+ { "size", required_argument, NULL, 's' },
+ { "rounds", required_argument, NULL, 'r' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "+t:s:r:h";
+
+ options = options_def;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 't':
+ options.num_threads = atol(optarg);
+ break;
+ case 's':
+ options.size = atol(optarg);
+ break;
+ case 'r':
+ options.rounds = atol(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (options.size < 1) {
+ ODPH_ERR("Invalid size: %" PRIu32 "\n", options.size);
+ return -1;
+ }
+
+ return ret;
+}
+
+const char *shm_name = "odp_random_test";
+
+typedef struct test_shm_t {
+ odp_barrier_t barrier;
+ odp_random_kind_t type;
+ uint64_t nsec[ODP_THREAD_COUNT_MAX];
+} test_shm_t;
+
+static test_shm_t *shm_lookup(void)
+{
+ test_shm_t *shm = NULL;
+ odp_shm_t shm_hdl = odp_shm_lookup(shm_name);
+
+ if (shm_hdl != ODP_SHM_INVALID)
+ shm = (test_shm_t *)odp_shm_addr(shm_hdl);
+
+ return shm;
+}
+
+static int test_random(void *p)
+{
+ (void)p;
+
+ uint8_t *buf, *data;
+ const unsigned long page = ODP_PAGE_SIZE;
+ odp_time_t start;
+ uint64_t nsec;
+ test_shm_t *shm = shm_lookup();
+
+ if (!shm) {
+ ODPH_ERR("Failed to look up shm %s\n", shm_name);
+ exit(EXIT_FAILURE);
+ }
+
+ /* One extra page for alignment. */
+ buf = (uint8_t *)malloc(options.size + page);
+
+ if (!buf) {
+ ODPH_ERR("Memory allocation failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Align to start of page. */
+ data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ odp_barrier_wait(&shm->barrier);
+ start = odp_time_local();
+
+ for (uint32_t i = 0; i < options.rounds; i++) {
+ uint32_t pos = 0;
+
+ while (pos < options.size) {
+ int32_t n = odp_random_data(data + pos,
+ options.size - pos,
+ shm->type);
+
+ if (n < 0) {
+ ODPH_ERR("odp_random_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += n;
+ }
+ }
+
+ nsec = odp_time_diff_ns(odp_time_local(), start);
+ shm->nsec[odp_thread_id()] = nsec;
+ free(buf);
+
+ return 0;
+}
+
+static int test_random_test(void *p)
+{
+ (void)p;
+
+ uint8_t *buf, *data;
+ const unsigned long page = ODP_PAGE_SIZE;
+ odp_time_t start;
+ uint64_t nsec;
+ uint64_t seed = 0;
+ test_shm_t *shm = shm_lookup();
+
+ if (!shm) {
+ ODPH_ERR("Failed to look up shm %s\n", shm_name);
+ exit(EXIT_FAILURE);
+ }
+
+ /* One extra page for alignment. */
+ buf = (uint8_t *)malloc(options.size + page);
+
+ if (!buf) {
+ ODPH_ERR("Memory allocation failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Align to start of page. */
+ data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ odp_barrier_wait(&shm->barrier);
+ start = odp_time_local();
+
+ for (uint32_t i = 0; i < options.rounds; i++) {
+ uint32_t pos = 0;
+
+ while (pos < options.size) {
+ int32_t n = odp_random_test_data(data + pos,
+ options.size - pos,
+ &seed);
+
+ if (n < 0) {
+ ODPH_ERR("odp_random_data() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos += n;
+ }
+ }
+
+ nsec = odp_time_diff_ns(odp_time_local(), start);
+ shm->nsec[odp_thread_id()] = nsec;
+ free(buf);
+
+ return 0;
+}
+
+static void test_type(odp_instance_t instance, test_shm_t *shm,
+ odp_random_kind_t type)
+{
+ memset(shm, 0, sizeof(test_shm_t));
+ shm->type = type;
+ odp_barrier_init(&shm->barrier, options.num_threads);
+
+ odp_cpumask_t cpumask;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odph_thread_t thr_worker[options.num_threads];
+
+ if (odp_cpumask_default_worker(&cpumask, options.num_threads) !=
+ options.num_threads) {
+ ODPH_ERR("Failed to get default CPU mask.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.thr_type = ODP_THREAD_WORKER;
+ thr_param.start = test_random;
+
+ if (type == (odp_random_kind_t)-1)
+ thr_param.start = test_random_test;
+
+ memset(&thr_worker, 0, sizeof(thr_worker));
+
+ if (odph_thread_create(thr_worker, &thr_common, &thr_param,
+ options.num_threads) != options.num_threads) {
+ ODPH_ERR("Failed to create worker threads.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odph_thread_join(thr_worker, options.num_threads) !=
+ options.num_threads) {
+ ODPH_ERR("Failed to join worker threads.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ double mb, seconds, nsec = 0;
+
+ for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec += shm->nsec[i];
+
+ nsec /= options.num_threads;
+
+ switch (type) {
+ case ODP_RANDOM_BASIC:
+ printf("ODP_RANDOM_BASIC\n");
+ break;
+ case ODP_RANDOM_CRYPTO:
+ printf("ODP_RANDOM_CRYPTO\n");
+ break;
+ case ODP_RANDOM_TRUE:
+ printf("ODP_RANDOM_TRUE\n");
+ break;
+ default:
+ printf("odp_random_test_data\n");
+ }
+
+ printf("--------------------\n");
+ printf("threads: %d size: %u B rounds: %u ", options.num_threads,
+ options.size, options.rounds);
+ mb = (uint64_t)options.num_threads * (uint64_t)options.size *
+ (uint64_t)options.rounds;
+ mb /= MB;
+ seconds = (double)nsec / (double)ODP_TIME_SEC_IN_NS;
+ printf("MB: %.3f seconds: %.3f ", mb, seconds);
+ printf("MB/s: %.3f ", mb / seconds);
+ printf("MB/s/thread: %.3f", mb / seconds / (double)options.num_threads);
+ printf("\n\n");
+}
+
+int main(int argc, char **argv)
+{
+ odp_instance_t instance;
+ odp_init_t init;
+
+ if (parse_options(argc, argv))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_sys_info_print();
+
+ test_shm_t *shm = NULL;
+ odp_shm_t shm_hdl = odp_shm_reserve(shm_name, sizeof(test_shm_t), 64,
+ ODP_SHM_SW_ONLY);
+
+ if (shm_hdl != ODP_SHM_INVALID)
+ shm = (test_shm_t *)odp_shm_addr(shm_hdl);
+
+ if (!shm) {
+ ODPH_ERR("Failed to reserve shm %s\n", shm_name);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (odp_random_max_kind()) {
+ case ODP_RANDOM_TRUE:
+ test_type(instance, shm, ODP_RANDOM_TRUE);
+ /* fall through */
+ case ODP_RANDOM_CRYPTO:
+ test_type(instance, shm, ODP_RANDOM_CRYPTO);
+ /* fall through */
+ default:
+ test_type(instance, shm, ODP_RANDOM_BASIC);
+ test_type(instance, shm, -1);
+ }
+
+ if (odp_shm_free(shm_hdl)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index af3a11c97..40a24155e 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -38,9 +38,11 @@ typedef struct test_options_t {
int num_group;
uint32_t num_join;
uint32_t max_burst;
+ odp_pool_type_t pool_type;
int queue_type;
int forward;
int fairness;
+ uint32_t event_size;
uint32_t queue_size;
uint32_t tot_queue;
uint32_t tot_event;
@@ -125,6 +127,7 @@ static void print_usage(void)
" -l, --ctx_rw_words Number of queue context words (uint64_t) to modify on every event. Default: 0.\n"
" -n, --rd_words Number of event data words (uint64_t) to read before enqueueing it. Default: 0.\n"
" -m, --rw_words Number of event data words (uint64_t) to modify before enqueueing it. Default: 0.\n"
+ " -p, --pool_type Pool type. 0: buffer, 1: packet. Default: 0.\n"
" -v, --verbose Verbose output.\n"
" -h, --help This help\n"
"\n");
@@ -135,6 +138,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
int opt, long_index, num_group, num_join;
int ret = 0;
uint32_t ctx_size = 0;
+ int pool_type = 0;
static const struct option longopts[] = {
{"num_cpu", required_argument, NULL, 'c'},
@@ -155,12 +159,13 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{"ctx_rw_words", required_argument, NULL, 'l'},
{"rd_words", required_argument, NULL, 'n'},
{"rw_words", required_argument, NULL, 'm'},
+ {"pool_type", required_argument, NULL, 'p'},
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:a:w:k:l:n:m:vh";
+ static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:a:w:k:l:n:m:p:vh";
test_options->num_cpu = 1;
test_options->num_queue = 1;
@@ -240,6 +245,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'm':
test_options->rw_words = atoi(optarg);
break;
+ case 'p':
+ pool_type = atoi(optarg);
+ break;
case 'w':
test_options->wait_ns = atoll(optarg);
break;
@@ -254,6 +262,14 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
break;
}
}
+ if (pool_type == 0) {
+ test_options->pool_type = ODP_POOL_BUFFER;
+ } else if (pool_type == 1) {
+ test_options->pool_type = ODP_POOL_PACKET;
+ } else {
+ ODPH_ERR("Invalid pool type: %d.\n", pool_type);
+ ret = -1;
+ }
test_options->touch_data = test_options->rd_words ||
test_options->rw_words;
@@ -384,6 +400,7 @@ static int create_pool(test_global_t *global)
event_size = test_options->rd_words + test_options->rw_words;
event_size = 8 * event_size;
}
+ test_options->event_size = event_size;
printf("\nScheduler performance test\n");
printf(" num sched %u\n", num_sched);
@@ -427,33 +444,48 @@ static int create_pool(test_global_t *global)
}
if (odp_pool_capability(&pool_capa)) {
- printf("Error: Pool capa failed.\n");
+ ODPH_ERR("Error: pool capa failed\n");
return -1;
}
- max_num = pool_capa.buf.max_num;
- max_size = pool_capa.buf.max_size;
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ printf(" pool type buffer\n");
+ max_num = pool_capa.buf.max_num;
+ max_size = pool_capa.buf.max_size;
+
+ } else {
+ printf(" pool type packet\n");
+ max_num = pool_capa.pkt.max_num;
+ max_size = pool_capa.pkt.max_seg_len;
+ }
if (max_num && tot_event > max_num) {
- printf("Error: max events supported %u\n", max_num);
+ ODPH_ERR("Error: max events supported %u\n", max_num);
return -1;
}
if (max_size && event_size > max_size) {
- printf("Error: max supported event size %u\n", max_size);
+ ODPH_ERR("Error: max supported event size %u\n", max_size);
return -1;
}
odp_pool_param_init(&pool_param);
- pool_param.type = ODP_POOL_BUFFER;
- pool_param.buf.num = tot_event;
- pool_param.buf.size = event_size;
- pool_param.buf.align = 8;
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = tot_event;
+ pool_param.buf.size = event_size;
+ pool_param.buf.align = 8;
+ } else {
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = tot_event;
+ pool_param.pkt.len = event_size;
+ pool_param.pkt.seg_len = event_size;
+ pool_param.pkt.align = 8;
+ }
pool = odp_pool_create("sched perf", &pool_param);
-
if (pool == ODP_POOL_INVALID) {
- printf("Error: Pool create failed.\n");
+ ODPH_ERR("Error: pool create failed\n");
return -1;
}
@@ -506,12 +538,12 @@ static int create_queues(test_global_t *global)
{
odp_queue_param_t queue_param;
odp_queue_t queue;
- odp_buffer_t buf;
odp_schedule_sync_t sync;
odp_schedule_prio_t prio;
const char *type_str;
uint32_t i, j, first;
test_options_t *test_options = &global->test_options;
+ uint32_t event_size = test_options->event_size;
uint32_t num_event = test_options->num_event;
uint32_t queue_size = test_options->queue_size;
uint32_t tot_queue = test_options->tot_queue;
@@ -665,15 +697,27 @@ static int create_queues(test_global_t *global)
}
for (j = 0; j < num_event; j++) {
- buf = odp_buffer_alloc(pool);
+ odp_event_t ev;
- if (buf == ODP_BUFFER_INVALID) {
- printf("Error: Alloc failed %u/%u\n", i, j);
- return -1;
- }
+ if (test_options->pool_type == ODP_POOL_BUFFER) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
- if (odp_queue_enq(queue, odp_buffer_to_event(buf))) {
- printf("Error: Enqueue failed %u/%u\n", i, j);
+ if (buf == ODP_BUFFER_INVALID) {
+ ODPH_ERR("Error: alloc failed %u/%u\n", i, j);
+ return -1;
+ }
+ ev = odp_buffer_to_event(buf);
+ } else {
+ odp_packet_t pkt = odp_packet_alloc(pool, event_size);
+
+ if (pkt == ODP_PACKET_INVALID) {
+ ODPH_ERR("Error: alloc failed %u/%u\n", i, j);
+ return -1;
+ }
+ ev = odp_packet_to_event(pkt);
+ }
+ if (odp_queue_enq(queue, ev)) {
+ ODPH_ERR("Error: enqueue failed %u/%u\n", i, j);
return -1;
}
}
@@ -830,17 +874,18 @@ static inline uint64_t rw_ctx_data(void *ctx, uint32_t offset,
}
static uint64_t rw_data(odp_event_t ev[], int num,
- uint32_t rd_words, uint32_t rw_words)
+ uint32_t rd_words, uint32_t rw_words, odp_pool_type_t pool_type)
{
- odp_buffer_t buf;
uint64_t *data;
int i;
uint32_t j;
uint64_t sum = 0;
for (i = 0; i < num; i++) {
- buf = odp_buffer_from_event(ev[i]);
- data = odp_buffer_addr(buf);
+ if (pool_type == ODP_POOL_BUFFER)
+ data = odp_buffer_addr(odp_buffer_from_event(ev[i]));
+ else
+ data = odp_packet_data(odp_packet_from_event(ev[i]));
for (j = 0; j < rd_words; j++)
sum += data[j];
@@ -876,6 +921,7 @@ static int test_sched(void *arg)
uint32_t ctx_size = test_options->ctx_size;
uint32_t ctx_rd_words = test_options->ctx_rd_words;
uint32_t ctx_rw_words = test_options->ctx_rw_words;
+ odp_pool_type_t pool_type = test_options->pool_type;
int touch_ctx = ctx_rd_words || ctx_rw_words;
uint32_t ctx_offset = 0;
uint32_t sched_retries = 0;
@@ -961,7 +1007,7 @@ static int test_sched(void *arg)
if (odp_unlikely(touch_data))
data_sum += rw_data(ev, num, rd_words,
- rw_words);
+ rw_words, pool_type);
if (odp_unlikely(wait_ns)) {
waits++;
diff --git a/test/performance/odp_sched_perf_run.sh b/test/performance/odp_sched_perf_run.sh
new file mode 100755
index 000000000..8e7911290
--- /dev/null
+++ b/test/performance/odp_sched_perf_run.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (c) 2021, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+echo odp_sched_perf: buffer pool
+echo ===============================================
+
+$TEST_DIR/odp_sched_perf${EXEEXT} -p 0
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf -p 0: FAILED
+ exit $RET_VAL
+fi
+
+echo odp_sched_perf: packet pool
+echo ===============================================
+
+$TEST_DIR/odp_sched_perf${EXEEXT} -p 1
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf -p 1: FAILED
+ exit $RET_VAL
+fi
+
+exit 0
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index 589b58d97..9a31c2647 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -1266,8 +1266,7 @@ static int create_timers(test_global_t *test_global)
return -1;
}
- memset(&timer_param, 0, sizeof(odp_timer_pool_param_t));
-
+ odp_timer_pool_param_init(&timer_param);
timer_param.res_ns = res_ns;
timer_param.min_tmo = timeout_ns;
timer_param.max_tmo = timeout_ns;
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
index a45081643..f1c4943bf 100644
--- a/test/performance/odp_timer_perf.c
+++ b/test/performance/odp_timer_perf.c
@@ -345,8 +345,7 @@ static int create_timer_pools(test_global_t *global)
return -1;
}
- memset(&timer_pool_param, 0, sizeof(odp_timer_pool_param_t));
-
+ odp_timer_pool_param_init(&timer_pool_param);
timer_pool_param.res_ns = res_ns;
timer_pool_param.min_tmo = min_tmo_ns;
timer_pool_param.max_tmo = max_tmo_ns;
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
index 591fe8a82..c1a2539bc 100644
--- a/test/validation/api/Makefile.am
+++ b/test/validation/api/Makefile.am
@@ -6,6 +6,7 @@ ODP_MODULES = atomic \
comp \
cpumask \
crypto \
+ dma \
errno \
event \
hash \
@@ -41,6 +42,7 @@ TESTS = \
comp/comp_main$(EXEEXT) \
cpumask/cpumask_main$(EXEEXT) \
crypto/crypto_main$(EXEEXT) \
+ dma/dma_main$(EXEEXT) \
errno/errno_main$(EXEEXT) \
event/event_main$(EXEEXT) \
hash/hash_main$(EXEEXT) \
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index 907624bb0..54bd5ee3e 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -752,8 +752,7 @@ static int atomic_init(odp_instance_t *inst)
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
+ sizeof(global_shared_mem_t), 64, 0);
if (ODP_SHM_INVALID == global_shm) {
fprintf(stderr, "Unable reserve memory for global_shm\n");
return -1;
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index c03151765..aa0ca90b3 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -349,8 +349,7 @@ static int barrier_init(odp_instance_t *inst)
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
+ sizeof(global_shared_mem_t), 64, 0);
if (ODP_SHM_INVALID == global_shm) {
fprintf(stderr, "Unable reserve memory for global_shm\n");
return -1;
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
index d23093619..3aeb89462 100644
--- a/test/validation/api/classification/odp_classification_basic.c
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -10,6 +10,23 @@
#define PMR_SET_NUM 5
+static void classification_test_default_values(void)
+{
+ odp_cls_cos_param_t cos_param;
+ odp_pmr_param_t pmr_param;
+
+ memset(&cos_param, 0x55, sizeof(cos_param));
+ odp_cls_cos_param_init(&cos_param);
+ CU_ASSERT_EQUAL(cos_param.num_queue, 1);
+ CU_ASSERT_EQUAL(cos_param.red.enable, false);
+ CU_ASSERT_EQUAL(cos_param.bp.enable, false);
+ CU_ASSERT_EQUAL(cos_param.vector.enable, false);
+
+ memset(&pmr_param, 0x55, sizeof(pmr_param));
+ odp_cls_pmr_param_init(&pmr_param);
+ CU_ASSERT_EQUAL(pmr_param.range_term, false);
+}
+
static void classification_test_create_cos(void)
{
odp_cos_t cos;
@@ -326,6 +343,7 @@ static void classification_test_pmr_composite_create(void)
}
odp_testinfo_t classification_suite_basic[] = {
+ ODP_TEST_INFO(classification_test_default_values),
ODP_TEST_INFO(classification_test_create_cos),
ODP_TEST_INFO(classification_test_destroy_cos),
ODP_TEST_INFO(classification_test_create_pmr_match),
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index dfbafc687..dd8373b04 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -207,7 +207,7 @@ odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns, odp_bool_t enable_p
if (ev == ODP_EVENT_INVALID)
return ODP_PACKET_INVALID;
- if (!enable_pktv && odp_event_type(ev) == ODP_EVENT_PACKET) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
return odp_packet_from_event(ev);
} else if (enable_pktv && odp_event_type(ev) == ODP_EVENT_PACKET_VECTOR) {
odp_packet_vector_t pktv;
diff --git a/test/validation/api/cpumask/cpumask.c b/test/validation/api/cpumask/cpumask.c
index eb131a142..0983bb4b5 100644
--- a/test/validation/api/cpumask/cpumask.c
+++ b/test/validation/api/cpumask/cpumask.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,9 +15,9 @@
static void cpumask_test_odp_cpumask_def_control(void)
{
- unsigned num;
- unsigned mask_count;
- unsigned max_cpus = mask_capacity();
+ unsigned int num, max_num;
+ unsigned int mask_count;
+ unsigned int max_cpus = mask_capacity();
odp_cpumask_t mask;
num = odp_cpumask_default_control(&mask, ALL_AVAILABLE);
@@ -25,13 +26,21 @@ static void cpumask_test_odp_cpumask_def_control(void)
CU_ASSERT(mask_count == num);
CU_ASSERT(num > 0);
CU_ASSERT(num <= max_cpus);
+
+ max_num = odp_cpumask_default_control(&mask, max_cpus);
+ mask_count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(max_num > 0);
+ CU_ASSERT(max_num == mask_count);
+ CU_ASSERT(max_num <= max_cpus);
+ CU_ASSERT(max_num <= num);
}
static void cpumask_test_odp_cpumask_def_worker(void)
{
- unsigned num;
- unsigned mask_count;
- unsigned max_cpus = mask_capacity();
+ unsigned int num, max_num;
+ unsigned int mask_count;
+ unsigned int max_cpus = mask_capacity();
odp_cpumask_t mask;
num = odp_cpumask_default_worker(&mask, ALL_AVAILABLE);
@@ -40,6 +49,14 @@ static void cpumask_test_odp_cpumask_def_worker(void)
CU_ASSERT(mask_count == num);
CU_ASSERT(num > 0);
CU_ASSERT(num <= max_cpus);
+
+ max_num = odp_cpumask_default_worker(&mask, max_cpus);
+ mask_count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(max_num > 0);
+ CU_ASSERT(max_num == mask_count);
+ CU_ASSERT(max_num <= max_cpus);
+ CU_ASSERT(max_num <= num);
}
static void cpumask_test_odp_cpumask_def(void)
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index a5987d2e7..f620d44a8 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -605,24 +605,35 @@ static void alg_test(odp_crypto_op_t op,
uint32_t max_shift;
odp_crypto_ses_create_err_t status;
odp_crypto_session_param_t ses_params;
+ uint8_t cipher_key_data[ref->cipher_key_length];
+ uint8_t auth_key_data[ref->auth_key_length];
+ uint8_t cipher_iv_data[ref->cipher_iv_length];
+ uint8_t auth_iv_data[ref->auth_iv_length];
odp_crypto_key_t cipher_key = {
- .data = ref->cipher_key,
+ .data = cipher_key_data,
.length = ref->cipher_key_length
};
odp_crypto_key_t auth_key = {
- .data = ref->auth_key,
+ .data = auth_key_data,
.length = ref->auth_key_length
};
odp_crypto_iv_t cipher_iv = {
- .data = ovr_iv ? NULL : ref->cipher_iv,
+ .data = ovr_iv ? NULL : cipher_iv_data,
.length = ref->cipher_iv_length
};
odp_crypto_iv_t auth_iv = {
- .data = ovr_iv ? NULL : ref->auth_iv,
+ .data = ovr_iv ? NULL : auth_iv_data,
.length = ref->auth_iv_length
};
alg_test_param_t test_param;
+ memcpy(cipher_key_data, ref->cipher_key, ref->cipher_key_length);
+ memcpy(auth_key_data, ref->auth_key, ref->auth_key_length);
+ if (!ovr_iv) {
+ memcpy(cipher_iv_data, ref->cipher_iv, ref->cipher_iv_length);
+ memcpy(auth_iv_data, ref->auth_iv, ref->auth_iv_length);
+ }
+
/* Create a crypto session */
odp_crypto_session_param_init(&ses_params);
ses_params.op = op;
@@ -646,6 +657,16 @@ static void alg_test(odp_crypto_op_t op,
CU_ASSERT(odp_crypto_session_to_u64(session) !=
odp_crypto_session_to_u64(ODP_CRYPTO_SESSION_INVALID));
+ /*
+ * Clear session creation parameters so that we might notice if
+ * the implementation still tried to use them.
+ */
+ memset(cipher_key_data, 0, sizeof(cipher_key_data));
+ memset(auth_key_data, 0, sizeof(auth_key_data));
+ memset(cipher_iv_data, 0, sizeof(cipher_iv_data));
+ memset(auth_iv_data, 0, sizeof(auth_iv_data));
+ memset(&ses_params, 0, sizeof(ses_params));
+
memset(&test_param, 0, sizeof(test_param));
test_param.session = session;
test_param.op = op;
diff --git a/test/validation/api/dma/.gitignore b/test/validation/api/dma/.gitignore
new file mode 100644
index 000000000..cc14794d5
--- /dev/null
+++ b/test/validation/api/dma/.gitignore
@@ -0,0 +1 @@
+dma_main
diff --git a/test/validation/api/dma/Makefile.am b/test/validation/api/dma/Makefile.am
new file mode 100644
index 000000000..795825c6b
--- /dev/null
+++ b/test/validation/api/dma/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = dma_main
+dma_main_SOURCES = dma.c
diff --git a/test/validation/api/dma/dma.c b/test/validation/api/dma/dma.c
new file mode 100644
index 000000000..4df81ca7a
--- /dev/null
+++ b/test/validation/api/dma/dma.c
@@ -0,0 +1,1202 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#define COMPL_POOL_NAME "DMA compl pool"
+
+#define SHM_SIZE (1024 * 1024)
+#define SHM_ALIGN ODP_CACHE_LINE_SIZE
+#define NUM_COMPL 10
+#define RETRIES 5
+#define TIMEOUT 5
+#define OFFSET 10
+#define TRAILER 10
+#define MULTI 1
+#define RESULT 1
+
+typedef struct global_t {
+ odp_dma_capability_t dma_capa;
+ odp_shm_t shm;
+ int disabled;
+ uint8_t *src_addr;
+ uint8_t *dst_addr;
+ uint32_t data_size;
+ uint32_t len;
+ odp_pool_t pkt_pool;
+ uint32_t pkt_len;
+ odp_queue_t queue;
+ odp_pool_t compl_pool;
+ uint32_t event_count;
+ uint32_t cache_size;
+
+} global_t;
+
+static global_t global;
+
+static int dma_suite_init(void)
+{
+ odp_shm_t shm;
+ odp_pool_param_t pool_param;
+ odp_dma_pool_param_t dma_pool_param;
+ odp_pool_capability_t pool_capa;
+ odp_queue_param_t queue_param;
+ uint32_t pkt_len;
+ void *addr;
+
+ memset(&global, 0, sizeof(global_t));
+ global.shm = ODP_SHM_INVALID;
+ global.pkt_pool = ODP_POOL_INVALID;
+ global.queue = ODP_QUEUE_INVALID;
+ global.compl_pool = ODP_POOL_INVALID;
+
+ if (odp_dma_capability(&global.dma_capa)) {
+ ODPH_ERR("DMA capability failed\n");
+ return -1;
+ }
+
+ if (global.dma_capa.max_sessions == 0) {
+ global.disabled = 1;
+ ODPH_DBG("DMA test disabled\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("DMA test", SHM_SIZE, SHM_ALIGN, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("SHM reserve failed\n");
+ return -1;
+ }
+
+ addr = odp_shm_addr(shm);
+
+ if (addr == NULL) {
+ ODPH_ERR("SHM addr failed\n");
+ return -1;
+ }
+
+ global.shm = shm;
+ global.data_size = SHM_SIZE / 2;
+ global.src_addr = addr;
+ global.dst_addr = (uint8_t *)global.src_addr + global.data_size;
+ global.len = global.data_size - OFFSET - TRAILER;
+
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("Pool capa failed\n");
+ return -1;
+ }
+
+ pkt_len = pool_capa.pkt.max_len;
+ if (pkt_len == 0)
+ pkt_len = 4000;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_PACKET;
+ pool_param.pkt.num = 4;
+ pool_param.pkt.len = pkt_len;
+ pool_param.pkt.max_len = pkt_len;
+
+ global.pkt_len = pkt_len;
+ global.pkt_pool = odp_pool_create("DMA test pkt pool", &pool_param);
+
+ if (global.pkt_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet pool create failed\n");
+ return -1;
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ global.queue = odp_queue_create("DMA test queue", &queue_param);
+
+ if (global.queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Queue create failed\n");
+ return -1;
+ }
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) {
+ if (global.dma_capa.pool.max_num < NUM_COMPL) {
+ ODPH_ERR("Too small DMA compl pool %u\n", global.dma_capa.pool.max_num);
+ return -1;
+ }
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = NUM_COMPL;
+ global.cache_size = dma_pool_param.cache_size;
+
+ global.compl_pool = odp_dma_pool_create(COMPL_POOL_NAME, &dma_pool_param);
+
+ if (global.compl_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Completion pool create failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int dma_suite_term(void)
+{
+ if (global.compl_pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global.compl_pool)) {
+ ODPH_ERR("Completion pool destroy failed\n");
+ return -1;
+ }
+
+ if (global.queue != ODP_QUEUE_INVALID &&
+ odp_queue_destroy(global.queue)) {
+ ODPH_ERR("Queue destroy failed\n");
+ return -1;
+ }
+
+ if (global.pkt_pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global.pkt_pool)) {
+ ODPH_ERR("Packet pool destroy failed\n");
+ return -1;
+ }
+
+ if (global.shm != ODP_SHM_INVALID &&
+ odp_shm_free(global.shm)) {
+ ODPH_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void test_dma_capability(void)
+{
+ odp_dma_capability_t capa;
+
+ memset(&capa, 0, sizeof(odp_dma_capability_t));
+ CU_ASSERT(odp_dma_capability(&capa) == 0);
+
+ if (capa.max_sessions == 0)
+ return;
+
+ CU_ASSERT(capa.max_transfers > 0);
+ CU_ASSERT(capa.max_src_segs > 0);
+ CU_ASSERT(capa.max_dst_segs > 0);
+ CU_ASSERT(capa.max_segs > 1);
+ CU_ASSERT(capa.max_segs > capa.max_src_segs);
+ CU_ASSERT(capa.max_segs > capa.max_dst_segs);
+ CU_ASSERT(capa.max_seg_len > 0);
+ CU_ASSERT(capa.compl_mode_mask & ODP_DMA_COMPL_SYNC);
+
+ if (capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) {
+ CU_ASSERT(capa.queue_type_sched || capa.queue_type_plain);
+ CU_ASSERT(capa.pool.max_pools > 0);
+ CU_ASSERT(capa.pool.max_num > 0);
+ CU_ASSERT(capa.pool.min_cache_size <= capa.pool.max_cache_size);
+ }
+}
+
+static void test_dma_param(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_compl_param_t compl_param;
+ odp_dma_pool_param_t dma_pool_param;
+
+ odp_dma_param_init(&dma_param);
+ CU_ASSERT(dma_param.direction == ODP_DMA_MAIN_TO_MAIN);
+ CU_ASSERT(dma_param.type == ODP_DMA_TYPE_COPY);
+ CU_ASSERT(dma_param.mt_mode == ODP_DMA_MT_SAFE);
+ CU_ASSERT(dma_param.order == ODP_DMA_ORDER_NONE);
+
+ odp_dma_transfer_param_init(&trs_param);
+ CU_ASSERT(trs_param.src_format == ODP_DMA_FORMAT_ADDR);
+ CU_ASSERT(trs_param.dst_format == ODP_DMA_FORMAT_ADDR);
+ CU_ASSERT(trs_param.num_src == 1);
+ CU_ASSERT(trs_param.num_dst == 1);
+
+ odp_dma_compl_param_init(&compl_param);
+ CU_ASSERT(compl_param.user_ptr == NULL);
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ CU_ASSERT(dma_pool_param.cache_size <= global.dma_capa.pool.max_cache_size);
+ CU_ASSERT(dma_pool_param.cache_size >= global.dma_capa.pool.min_cache_size);
+}
+
+static void test_dma_debug(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_t dma, dma2;
+ uint64_t u64;
+ const char *name = "dma_debug";
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create(name, &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ dma2 = odp_dma_lookup(name);
+ CU_ASSERT(dma2 != ODP_DMA_INVALID);
+ CU_ASSERT(dma2 == dma);
+
+ u64 = odp_dma_to_u64(dma);
+ CU_ASSERT(u64 != odp_dma_to_u64(ODP_DMA_INVALID));
+ printf("\n DMA handle: 0x%" PRIx64 "\n", u64);
+
+ odp_dma_print(dma);
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_compl_pool(void)
+{
+ odp_pool_t pool;
+ odp_pool_info_t pool_info;
+ odp_dma_compl_t compl;
+ uint64_t u64;
+ int ret;
+ const char *name = COMPL_POOL_NAME;
+
+ CU_ASSERT_FATAL(global.compl_pool != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT(pool == global.compl_pool);
+
+ memset(&pool_info, 0x55, sizeof(odp_pool_info_t));
+ ret = odp_pool_info(global.compl_pool, &pool_info);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT(strcmp(pool_info.name, name) == 0);
+ CU_ASSERT(pool_info.pool_ext == 0);
+ CU_ASSERT(pool_info.type == ODP_POOL_DMA_COMPL);
+ CU_ASSERT(pool_info.dma_pool_param.num == NUM_COMPL);
+ CU_ASSERT(pool_info.dma_pool_param.cache_size == global.cache_size);
+
+ compl = odp_dma_compl_alloc(global.compl_pool);
+
+ u64 = odp_dma_compl_to_u64(compl);
+ CU_ASSERT(u64 != odp_dma_compl_to_u64(ODP_DMA_COMPL_INVALID));
+ printf("\n DMA compl handle: 0x%" PRIx64 "\n", u64);
+ odp_dma_compl_print(compl);
+
+ odp_dma_compl_free(compl);
+}
+
+static void init_source(uint8_t *src, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ src[i] = i;
+}
+
+static int check_equal(uint8_t *src, uint8_t *dst, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ if (src[i] != dst[i])
+ return -1;
+
+ return 0;
+}
+
+static int check_zero(uint8_t *ptr, uint32_t len)
+{
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ if (ptr[i])
+ return -1;
+
+ return 0;
+}
+
+static int do_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param, int multi, int res)
+{
+ int i, ret;
+ odp_dma_result_t result;
+ const odp_dma_transfer_param_t *trs_ptr[1] = {trs_param};
+ odp_dma_result_t *result_ptr[1] = {&result};
+
+ memset(&result, 0, sizeof(odp_dma_result_t));
+
+ for (i = 0; i < RETRIES; i++) {
+ if (!multi && !res)
+ ret = odp_dma_transfer(dma, trs_param, NULL);
+ else if (!multi && res)
+ ret = odp_dma_transfer(dma, trs_param, &result);
+ else if (multi && !res)
+ ret = odp_dma_transfer_multi(dma, trs_ptr, NULL, 1);
+ else
+ ret = odp_dma_transfer_multi(dma, trs_ptr, result_ptr, 1);
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+
+ if (res)
+ CU_ASSERT(result.success);
+
+ return ret;
+}
+
+static int do_transfer_async(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param,
+ odp_dma_compl_mode_t compl_mode, int multi)
+{
+ odp_dma_compl_param_t compl_param;
+ odp_event_t ev;
+ odp_dma_compl_t compl;
+ int i, ret, done, dummy;
+ odp_dma_result_t result;
+ odp_dma_transfer_id_t transfer_id = ODP_DMA_TRANSFER_ID_INVALID;
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+ uint64_t sched_wait = odp_schedule_wait_time(wait_ns);
+ void *user_ptr = &dummy;
+
+ odp_dma_compl_param_init(&compl_param);
+ compl_param.compl_mode = compl_mode;
+
+ if (compl_mode == ODP_DMA_COMPL_EVENT) {
+ compl = odp_dma_compl_alloc(global.compl_pool);
+
+ CU_ASSERT(compl != ODP_DMA_COMPL_INVALID);
+ if (compl == ODP_DMA_COMPL_INVALID)
+ return -1;
+
+ compl_param.event = odp_dma_compl_to_event(compl);
+ compl_param.queue = global.queue;
+ } else if (compl_mode == ODP_DMA_COMPL_POLL) {
+ transfer_id = odp_dma_transfer_id_alloc(dma);
+
+ CU_ASSERT(transfer_id != ODP_DMA_TRANSFER_ID_INVALID);
+ if (transfer_id == ODP_DMA_TRANSFER_ID_INVALID)
+ return -1;
+
+ compl_param.transfer_id = transfer_id;
+ } else if (compl_mode != ODP_DMA_COMPL_NONE) {
+ ODPH_ERR("Wrong compl mode: %u\n", compl_mode);
+ return -1;
+ }
+
+ for (i = 0; i < RETRIES; i++) {
+ compl_param.user_ptr = user_ptr;
+
+ if (multi) {
+ const odp_dma_compl_param_t *compl_ptr[1] = {&compl_param};
+ const odp_dma_transfer_param_t *trs_ptr[1] = {trs_param};
+
+ ret = odp_dma_transfer_start_multi(dma, trs_ptr, compl_ptr, 1);
+ } else {
+ ret = odp_dma_transfer_start(dma, trs_param, &compl_param);
+ }
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret == 1);
+
+ if (ret < 1)
+ return ret;
+
+ memset(&result, 0, sizeof(odp_dma_result_t));
+
+ if (compl_mode == ODP_DMA_COMPL_POLL) {
+ for (i = 0; i < TIMEOUT; i++) {
+ done = odp_dma_transfer_done(dma, transfer_id, &result);
+ if (done)
+ break;
+
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT(done == 1);
+ CU_ASSERT(result.success);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ odp_dma_transfer_id_free(dma, transfer_id);
+
+ return done;
+
+ } else if (compl_mode == ODP_DMA_COMPL_EVENT) {
+ odp_queue_t from = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < TIMEOUT; i++) {
+ ev = odp_schedule(&from, sched_wait);
+ if (ev != ODP_EVENT_INVALID)
+ break;
+ }
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ if (ev == ODP_EVENT_INVALID)
+ return -1;
+
+ CU_ASSERT(from == global.queue);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_DMA_COMPL);
+
+ compl = odp_dma_compl_from_event(ev);
+ CU_ASSERT(compl != ODP_DMA_COMPL_INVALID);
+
+ CU_ASSERT(odp_dma_compl_result(compl, &result) == 0);
+ CU_ASSERT(result.success);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ /* Test also without result struct output */
+ CU_ASSERT(odp_dma_compl_result(compl, NULL) == 0);
+
+ /* Test compl event print on the first event */
+ if (global.event_count == 0) {
+ printf("\n\n");
+ odp_dma_compl_print(compl);
+ }
+
+ /* Test both ways to free the event */
+ if (global.event_count % 2)
+ odp_event_free(ev);
+ else
+ odp_dma_compl_free(compl);
+
+ global.event_count++;
+ }
+
+ return 1;
+}
+
+static void test_dma_addr_to_addr(odp_dma_compl_mode_t compl_mode_mask, uint32_t num,
+ int multi, int res)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg[num];
+ odp_dma_seg_t dst_seg[num];
+ int ret;
+ uint32_t i, cur_len;
+ uint8_t *src = global.src_addr + OFFSET;
+ uint8_t *dst = global.dst_addr + OFFSET;
+ uint32_t len = global.len;
+ uint32_t seg_len = len / num;
+ uint32_t offset = 0;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ memset(src_seg, 0, sizeof(src_seg));
+ memset(dst_seg, 0, sizeof(dst_seg));
+
+ for (i = 0; i < num; i++) {
+ cur_len = seg_len;
+ if (i == num - 1)
+ cur_len = len - seg_len * i;
+
+ src_seg[i].addr = src + offset;
+ src_seg[i].len = cur_len;
+ dst_seg[i].addr = dst + offset;
+ dst_seg[i].len = cur_len;
+ offset += cur_len;
+ }
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.num_src = num;
+ trs_param.num_dst = num;
+ trs_param.src_seg = src_seg;
+ trs_param.dst_seg = dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, res);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_addr_trs(odp_dma_compl_mode_t compl_mode_mask, uint32_t num_trs,
+ int multi, int res)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int compl_none;
+ uint32_t i, cur_len;
+ odp_dma_compl_mode_t compl_mode;
+ uint8_t *src = global.src_addr + OFFSET;
+ uint8_t *dst = global.dst_addr + OFFSET;
+ uint32_t len = global.len;
+ uint32_t trs_len = len / num_trs;
+ uint32_t offset = 0;
+ int ret = -1;
+
+ compl_none = 0;
+ if (compl_mode_mask & ODP_DMA_COMPL_NONE)
+ compl_none = 1;
+
+ init_source(global.src_addr, global.data_size);
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ memset(&src_seg, 0, sizeof(src_seg));
+ memset(&dst_seg, 0, sizeof(dst_seg));
+
+ for (i = 0; i < num_trs; i++) {
+ compl_mode = compl_mode_mask;
+ if (compl_none)
+ compl_mode = ODP_DMA_COMPL_NONE;
+
+ cur_len = trs_len;
+ if (i == num_trs - 1) {
+ cur_len = len - trs_len * i;
+ compl_mode = compl_mode_mask & ~ODP_DMA_COMPL_NONE;
+ }
+
+ src_seg.addr = src + offset;
+ src_seg.len = cur_len;
+ dst_seg.addr = dst + offset;
+ dst_seg.len = cur_len;
+ offset += cur_len;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, res);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode, multi);
+
+ if (ret < 1)
+ break;
+ }
+
+ if (ret > 0) {
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_addr_to_pkt(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *src, *pkt_data;
+ odp_packet_t pkt;
+ uint32_t len, seg_len;
+
+ init_source(global.src_addr, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("addr_to_pkt", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ memset(pkt_data, 0, seg_len);
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ len = seg_len - OFFSET - TRAILER;
+ if (len > global.len)
+ len = global.len;
+
+ src = global.src_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.addr = src;
+ src_seg.len = len;
+ dst_seg.packet = pkt;
+ dst_seg.offset = OFFSET;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *dst = pkt_data + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(pkt_data, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_to_addr(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *dst, *pkt_data;
+ odp_packet_t pkt;
+ uint32_t len, seg_len;
+
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("pkt_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ init_source(pkt_data, seg_len);
+
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ len = seg_len - OFFSET - TRAILER;
+ if (len > global.len)
+ len = global.len;
+
+ dst = global.dst_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.addr = dst;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *src = pkt_data + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_to_pkt(odp_dma_compl_mode_t compl_mode_mask, int multi)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *pkt_data, *pkt_data_2;
+ odp_packet_t pkt, pkt_2;
+ uint32_t len, seg_len, seg_len_2;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = compl_mode_mask;
+ dma = odp_dma_create("pkt_to_pkt", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_2 = odp_packet_alloc(global.pkt_pool, global.pkt_len);
+ CU_ASSERT_FATAL(pkt_2 != ODP_PACKET_INVALID);
+
+ seg_len = odp_packet_seg_len(pkt);
+ pkt_data = odp_packet_data(pkt);
+ init_source(pkt_data, seg_len);
+
+ seg_len_2 = odp_packet_seg_len(pkt_2);
+ pkt_data_2 = odp_packet_data(pkt_2);
+ memset(pkt_data_2, 0, seg_len_2);
+
+ CU_ASSERT_FATAL(seg_len > OFFSET + TRAILER);
+
+ if (seg_len > seg_len_2)
+ seg_len = seg_len_2;
+
+ len = seg_len - OFFSET - TRAILER;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.packet = pkt_2;
+ dst_seg.offset = OFFSET;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ if (compl_mode_mask == ODP_DMA_COMPL_SYNC)
+ ret = do_transfer(dma, &trs_param, multi, 0);
+ else
+ ret = do_transfer_async(dma, &trs_param, compl_mode_mask, multi);
+
+ if (ret > 0) {
+ uint8_t *src = pkt_data + OFFSET;
+ uint8_t *dst = pkt_data_2 + OFFSET;
+
+ CU_ASSERT(check_equal(src, dst, len) == 0);
+ CU_ASSERT(check_zero(pkt_data_2, OFFSET) == 0);
+ CU_ASSERT(check_zero(dst + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ odp_packet_free(pkt_2);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static void test_dma_pkt_segs_to_addr_sync(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_transfer_param_t trs_param;
+ odp_dma_t dma;
+ odp_dma_seg_t src_seg;
+ odp_dma_seg_t dst_seg;
+ int ret;
+ uint8_t *dst;
+ odp_packet_t pkt;
+ uint32_t i, len, num_segs;
+ uint32_t pkt_len = global.pkt_len;
+
+ memset(global.dst_addr, 0, global.data_size);
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create("pkt_segs_to_addr", &dma_param);
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+
+ pkt = odp_packet_alloc(global.pkt_pool, pkt_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ num_segs = odp_packet_num_segs(pkt);
+ if (num_segs > global.dma_capa.max_src_segs)
+ num_segs = global.dma_capa.max_src_segs;
+
+ init_source(global.src_addr, global.data_size);
+ CU_ASSERT_FATAL(odp_packet_copy_from_mem(pkt, 0, pkt_len, global.src_addr) == 0);
+
+ len = pkt_len - OFFSET - TRAILER;
+ if (len > global.len)
+ len = global.len;
+
+ dst = global.dst_addr + OFFSET;
+
+ memset(&src_seg, 0, sizeof(odp_dma_seg_t));
+ memset(&dst_seg, 0, sizeof(odp_dma_seg_t));
+ src_seg.packet = pkt;
+ src_seg.offset = OFFSET;
+ src_seg.len = len;
+ dst_seg.addr = dst;
+ dst_seg.len = len;
+
+ odp_dma_transfer_param_init(&trs_param);
+ trs_param.src_format = ODP_DMA_FORMAT_PACKET;
+ trs_param.dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param.src_seg = &src_seg;
+ trs_param.dst_seg = &dst_seg;
+
+ for (i = 0; i < RETRIES; i++) {
+ ret = odp_dma_transfer(dma, &trs_param, NULL);
+
+ if (ret)
+ break;
+ }
+
+ CU_ASSERT(ret > 0);
+
+ if (ret > 0) {
+ odp_packet_seg_t pkt_seg = odp_packet_first_seg(pkt);
+ uint8_t *src = odp_packet_data(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+
+ src += OFFSET;
+ seg_len -= OFFSET;
+
+ for (i = 0; i < num_segs; i++) {
+ if (i == (num_segs - 1))
+ seg_len -= TRAILER;
+
+ CU_ASSERT(check_equal(src, dst, seg_len) == 0);
+
+ dst += seg_len;
+ pkt_seg = odp_packet_next_seg(pkt, pkt_seg);
+ if (pkt_seg != ODP_PACKET_SEG_INVALID) {
+ src = odp_packet_seg_data(pkt, pkt_seg);
+ seg_len = odp_packet_seg_data_len(pkt, pkt_seg);
+ }
+ }
+
+ CU_ASSERT(check_zero(global.dst_addr, OFFSET) == 0);
+ CU_ASSERT(check_zero(global.dst_addr + OFFSET + len, TRAILER) == 0);
+ }
+
+ odp_packet_free(pkt);
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
+static int check_sync(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_event(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_scheduled(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.queue_type_sched &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_poll(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_sched_none(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.queue_type_sched &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) &&
+ (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_NONE))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_poll_none(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL &&
+ global.dma_capa.compl_mode_mask & ODP_DMA_COMPL_NONE)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void test_dma_addr_to_addr_sync(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_SYNC, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 3, 0, 0);
+}
+
+static void test_dma_addr_to_addr_sync_res(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, 0, RESULT);
+}
+
+static void test_dma_addr_to_pkt_sync(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_pkt_to_addr_sync(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_pkt_to_pkt_sync(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_SYNC, 0);
+}
+
+static void test_dma_addr_to_addr_poll(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_poll_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_POLL, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_poll_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 3, 0, 0);
+}
+
+static void test_dma_addr_to_pkt_poll(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_pkt_to_addr_poll(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_pkt_to_pkt_poll(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_POLL, 0);
+}
+
+static void test_dma_addr_to_addr_event(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 1, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_mtrs(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_EVENT, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_mseg(void)
+{
+ if (global.dma_capa.max_src_segs > 1 && global.dma_capa.max_dst_segs > 1)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 2, 0, 0);
+
+ if (global.dma_capa.max_src_segs > 2 && global.dma_capa.max_dst_segs > 2)
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 3, 0, 0);
+}
+
+static void test_dma_addr_to_pkt_event(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_pkt_to_addr_event(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_pkt_to_pkt_event(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_EVENT, 0);
+}
+
+static void test_dma_addr_to_addr_poll_none(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_POLL | ODP_DMA_COMPL_NONE, 2, 0, 0);
+}
+
+static void test_dma_addr_to_addr_event_none(void)
+{
+ test_dma_addr_to_addr_trs(ODP_DMA_COMPL_EVENT | ODP_DMA_COMPL_NONE, 2, 0, 0);
+}
+
+static void test_dma_multi_addr_to_addr_sync(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_addr_sync_res(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_SYNC, 1, MULTI, RESULT);
+}
+
+static void test_dma_multi_addr_to_pkt_sync(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_sync(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_sync(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_SYNC, MULTI);
+}
+
+static void test_dma_multi_addr_to_addr_poll(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_POLL, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_pkt_poll(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_poll(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_poll(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_POLL, MULTI);
+}
+
+static void test_dma_multi_addr_to_addr_event(void)
+{
+ test_dma_addr_to_addr(ODP_DMA_COMPL_EVENT, 1, MULTI, 0);
+}
+
+static void test_dma_multi_addr_to_pkt_event(void)
+{
+ test_dma_addr_to_pkt(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+static void test_dma_multi_pkt_to_addr_event(void)
+{
+ test_dma_pkt_to_addr(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+static void test_dma_multi_pkt_to_pkt_event(void)
+{
+ test_dma_pkt_to_pkt(ODP_DMA_COMPL_EVENT, MULTI);
+}
+
+odp_testinfo_t dma_suite[] = {
+ ODP_TEST_INFO(test_dma_capability),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_param, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_debug, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool, check_event),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_mtrs, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_mseg, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_sync_res, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_mtrs, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_mseg, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_mtrs, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_mseg, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_poll_none, check_poll_none),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_addr_to_addr_event_none, check_sched_none),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_sync_res, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_sync, check_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_poll, check_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_addr_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_addr_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_multi_pkt_to_pkt_event, check_scheduled),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_pkt_segs_to_addr_sync, check_sync),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t dma_suites[] = {
+ {"DMA", dma_suite_init, dma_suite_term, dma_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(dma_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index aab480bdd..799988c38 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1623,6 +1623,7 @@ static void ipsec_test_default_values(void)
CU_ASSERT(!config.inbound.reass_inline);
CU_ASSERT(config.outbound.all_chksum == 0);
CU_ASSERT(!config.stats_en);
+ CU_ASSERT(!config.vector.enable);
odp_ipsec_sa_param_init(&sa_param);
CU_ASSERT(sa_param.proto == ODP_IPSEC_ESP);
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index 33698d6ef..394cd820a 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -1183,8 +1183,7 @@ static int lock_init(odp_instance_t *inst)
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t), 64,
- ODP_SHM_SW_ONLY);
+ sizeof(global_shared_mem_t), 64, 0);
if (ODP_SHM_INVALID == global_shm) {
fprintf(stderr, "Unable reserve memory for global_shm\n");
return -1;
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 3b6cf52dd..10a2297ef 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -685,7 +685,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
/* convert events to packets, discarding any non-packet events */
for (i = 0; i < num_evts; ++i) {
- if (!vector_mode && odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET) {
+ if (odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET) {
pkt_tbl[num_pkts++] = odp_packet_from_event(evt_tbl[i]);
} else if (vector_mode && odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET_VECTOR &&
num_pkts < num) {
@@ -1579,6 +1579,57 @@ static void pktio_test_mac(void)
CU_ASSERT(0 == ret);
}
+static void pktio_test_default_values(void)
+{
+ odp_pktio_param_t pktio_p;
+ odp_pktin_queue_param_t qp_in;
+ odp_pktout_queue_param_t qp_out;
+ odp_pktio_config_t pktio_conf;
+
+ memset(&pktio_p, 0x55, sizeof(pktio_p));
+ odp_pktio_param_init(&pktio_p);
+ CU_ASSERT_EQUAL(pktio_p.in_mode, ODP_PKTIN_MODE_DIRECT);
+ CU_ASSERT_EQUAL(pktio_p.out_mode, ODP_PKTOUT_MODE_DIRECT);
+
+ memset(&qp_in, 0x55, sizeof(qp_in));
+ odp_pktin_queue_param_init(&qp_in);
+ CU_ASSERT_EQUAL(qp_in.op_mode, ODP_PKTIO_OP_MT);
+ CU_ASSERT_EQUAL(qp_in.classifier_enable, 0);
+ CU_ASSERT_EQUAL(qp_in.hash_enable, 0);
+ CU_ASSERT_EQUAL(qp_in.hash_proto.all_bits, 0);
+ CU_ASSERT_EQUAL(qp_in.num_queues, 1);
+ CU_ASSERT_EQUAL(qp_in.queue_param.enq_mode, ODP_QUEUE_OP_MT);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.prio, odp_schedule_default_prio());
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.sync, ODP_SCHED_SYNC_PARALLEL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.group, ODP_SCHED_GROUP_ALL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.sched.lock_count, 0);
+ CU_ASSERT_EQUAL(qp_in.queue_param.order, ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT_EQUAL(qp_in.queue_param.nonblocking, ODP_BLOCKING);
+ CU_ASSERT_EQUAL(qp_in.queue_param.context, NULL);
+ CU_ASSERT_EQUAL(qp_in.queue_param.context_len, 0);
+ CU_ASSERT_EQUAL(qp_in.queue_param_ovr, NULL);
+ CU_ASSERT_EQUAL(qp_in.vector.enable, false);
+
+ memset(&qp_out, 0x55, sizeof(qp_out));
+ odp_pktout_queue_param_init(&qp_out);
+ CU_ASSERT_EQUAL(qp_out.op_mode, ODP_PKTIO_OP_MT);
+ CU_ASSERT_EQUAL(qp_out.num_queues, 1);
+
+ memset(&pktio_conf, 0x55, sizeof(pktio_conf));
+ odp_pktio_config_init(&pktio_conf);
+ CU_ASSERT_EQUAL(pktio_conf.pktin.all_bits, 0);
+ CU_ASSERT_EQUAL(pktio_conf.pktout.all_bits, 0);
+ CU_ASSERT_EQUAL(pktio_conf.parser.layer, ODP_PROTO_LAYER_ALL);
+ CU_ASSERT_EQUAL(pktio_conf.enable_loop, false);
+ CU_ASSERT_EQUAL(pktio_conf.inbound_ipsec, false);
+ CU_ASSERT_EQUAL(pktio_conf.outbound_ipsec, false);
+ CU_ASSERT_EQUAL(pktio_conf.enable_lso, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv4, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv6, false);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.max_wait_time, 0);
+ CU_ASSERT_EQUAL(pktio_conf.reassembly.max_num_frags, 2);
+}
+
static void pktio_test_open(void)
{
odp_pktio_t pktio;
@@ -4752,6 +4803,7 @@ static int pktv_suite_term(void)
}
odp_testinfo_t pktio_suite_unsegmented[] = {
+ ODP_TEST_INFO(pktio_test_default_values),
ODP_TEST_INFO(pktio_test_open),
ODP_TEST_INFO(pktio_test_lookup),
ODP_TEST_INFO(pktio_test_index),
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 5d7900eb6..3bd164350 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2019, Nokia
- * Copyright (c) 2014-2018, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,6 +7,7 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
#include <stdlib.h>
#define ALIGN_SIZE (128)
@@ -71,6 +72,9 @@ typedef struct {
int data[BIG_MEM];
} shared_test_data_big_t;
+/* SHM capability saved at suite init phase */
+static odp_shm_capability_t _global_shm_capa;
+
/*
* thread part for the shmem_test_basic test
*/
@@ -212,6 +216,35 @@ static void shmem_test_multi_thread(void)
CU_ASSERT(0 == odp_shm_free(shm));
}
+static void shmem_test_capability(void)
+{
+ odp_shm_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_shm_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_blocks);
+
+ printf("\nSHM capability\n--------------\n");
+
+ printf(" max_blocks: %u\n", capa.max_blocks);
+ printf(" max_size: %" PRIu64 "\n", capa.max_size);
+ printf(" max_align: %" PRIu64 "\n", capa.max_align);
+ printf(" flags: ");
+ if (capa.flags & ODP_SHM_PROC)
+ printf("ODP_SHM_PROC ");
+ if (capa.flags & ODP_SHM_SINGLE_VA)
+ printf("ODP_SHM_SINGLE_VA ");
+ if (capa.flags & ODP_SHM_EXPORT)
+ printf("ODP_SHM_EXPORT ");
+ if (capa.flags & ODP_SHM_HP)
+ printf("ODP_SHM_HP ");
+ if (capa.flags & ODP_SHM_HW_ACCESS)
+ printf("ODP_SHM_HW_ACCESS ");
+ if (capa.flags & ODP_SHM_NO_HP)
+ printf("ODP_SHM_NO_HP ");
+ printf("\n\n");
+}
+
static void shmem_test_reserve(void)
{
odp_shm_t shm;
@@ -229,6 +262,13 @@ static void shmem_test_reserve(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static int shmem_check_flag_hp(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_HP)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
/*
* test reserving memory from huge pages
*/
@@ -268,17 +308,47 @@ static void shmem_test_flag_hp(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static int shmem_check_flag_no_hp(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_NO_HP)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
+/*
+ * Test reserving memory from normal pages
+ */
+static void shmem_test_flag_no_hp(void)
+{
+ odp_shm_t shm;
+ odp_shm_info_t info;
+
+ shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t), 0,
+ ODP_SHM_NO_HP);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ /* Make sure that the memory is reserved from normal pages */
+ CU_ASSERT_FATAL(odp_shm_info(shm, &info) == 0);
+
+ CU_ASSERT(info.page_size == odp_sys_page_size());
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static int shmem_check_flag_proc(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_PROC)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
static void shmem_test_flag_proc(void)
{
odp_shm_t shm;
void *addr;
shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_PROC);
-
- if (shm == ODP_SHM_INVALID) {
- printf(" ODP_SHM_PROC flag not supported\n");
- return;
- }
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
addr = odp_shm_addr(shm);
@@ -290,17 +360,20 @@ static void shmem_test_flag_proc(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static int shmem_check_flag_export(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_EXPORT)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
static void shmem_test_flag_export(void)
{
odp_shm_t shm;
void *addr;
shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_EXPORT);
-
- if (shm == ODP_SHM_INVALID) {
- printf(" ODP_SHM_EXPORT flag not supported\n");
- return;
- }
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
addr = odp_shm_addr(shm);
@@ -312,6 +385,13 @@ static void shmem_test_flag_export(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static int shmem_check_flag_hw_access(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_HW_ACCESS)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
static void shmem_test_flag_hw_access(void)
{
odp_shm_t shm;
@@ -319,11 +399,7 @@ static void shmem_test_flag_hw_access(void)
shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE,
ODP_SHM_HW_ACCESS);
-
- if (shm == ODP_SHM_INVALID) {
- printf(" ODP_SHM_HW_ACCESS flag not supported\n");
- return;
- }
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
addr = odp_shm_addr(shm);
@@ -645,6 +721,13 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
return CU_get_number_of_failures();
}
+static int shmem_check_flag_single_va(void)
+{
+ if (_global_shm_capa.flags & ODP_SHM_SINGLE_VA)
+ return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
+}
+
/*
* test sharing memory reserved after odp_thread creation (e.g. fork()):
* with single VA flag.
@@ -793,7 +876,9 @@ static int run_test_stress(void *arg ODP_UNUSED)
/* we just play with the VA flag. randomly setting
* the mlock flag may exceed user ulimit -l
*/
- flags = random_bytes[2] & ODP_SHM_SINGLE_VA;
+ flags = (_global_shm_capa.flags & ODP_SHM_SINGLE_VA) ?
+ (random_bytes[2] & ODP_SHM_SINGLE_VA) : 0;
+
align = (random_bytes[3] + 1) << 6;/* up to 16Kb */
data = random_bytes[4];
@@ -938,22 +1023,33 @@ static void shmem_test_stress(void)
/* check that no memory is left over: */
}
+static int shm_suite_init(void)
+{
+ if (odp_shm_capability(&_global_shm_capa)) {
+ ODPH_ERR("Failed to read SHM capability\n");
+ return -1;
+ }
+ return 0;
+}
+
odp_testinfo_t shmem_suite[] = {
+ ODP_TEST_INFO(shmem_test_capability),
ODP_TEST_INFO(shmem_test_reserve),
- ODP_TEST_INFO(shmem_test_flag_hp),
- ODP_TEST_INFO(shmem_test_flag_proc),
- ODP_TEST_INFO(shmem_test_flag_export),
- ODP_TEST_INFO(shmem_test_flag_hw_access),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hp, shmem_check_flag_hp),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_no_hp, shmem_check_flag_no_hp),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_proc, shmem_check_flag_proc),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_export, shmem_check_flag_export),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hw_access, shmem_check_flag_hw_access),
ODP_TEST_INFO(shmem_test_max_reserve),
ODP_TEST_INFO(shmem_test_multi_thread),
ODP_TEST_INFO(shmem_test_reserve_after_fork),
- ODP_TEST_INFO(shmem_test_singleva_after_fork),
+ ODP_TEST_INFO_CONDITIONAL(shmem_test_singleva_after_fork, shmem_check_flag_single_va),
ODP_TEST_INFO(shmem_test_stress),
ODP_TEST_INFO_NULL,
};
odp_suiteinfo_t shmem_suites[] = {
- {"Shared Memory", NULL, NULL, shmem_suite},
+ {"Shared Memory", shm_suite_init, NULL, shmem_suite},
ODP_SUITE_INFO_NULL,
};
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index 59b92e45f..6499140a3 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -44,7 +44,7 @@ static int thread_global_init(odp_instance_t *inst)
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
sizeof(global_shared_mem_t),
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ ODP_CACHE_LINE_SIZE, 0);
if (global_shm == ODP_SHM_INVALID) {
fprintf(stderr, "Unable reserve memory for global_shm\n");
return -1;
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 4cdbf05c2..09dcd8c95 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -112,7 +112,7 @@ static int timer_global_init(odp_instance_t *inst)
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
sizeof(global_shared_mem_t),
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ ODP_CACHE_LINE_SIZE, 0);
if (global_shm == ODP_SHM_INVALID) {
fprintf(stderr, "Unable reserve memory for global_shm\n");
return -1;
@@ -298,6 +298,20 @@ static void timer_test_capa(void)
}
}
+static void timer_test_param_init(void)
+{
+ odp_timer_pool_param_t tp_param;
+
+ memset(&tp_param, 0x55, sizeof(odp_timer_pool_param_t));
+
+ odp_timer_pool_param_init(&tp_param);
+ CU_ASSERT(tp_param.res_ns == 0);
+ CU_ASSERT(tp_param.res_hz == 0);
+ CU_ASSERT(tp_param.min_tmo == 0);
+ CU_ASSERT(tp_param.priv == 0);
+ CU_ASSERT(tp_param.clk_src == ODP_CLOCK_DEFAULT);
+}
+
static void timer_test_timeout_pool_alloc(void)
{
odp_pool_t pool;
@@ -408,7 +422,7 @@ static void timer_pool_create_destroy(void)
queue = odp_queue_create("timer_queue", &queue_param);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
- memset(&tparam, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tparam);
tparam.res_ns = global_mem->param.res_ns;
tparam.min_tmo = global_mem->param.min_tmo;
tparam.max_tmo = global_mem->param.max_tmo;
@@ -501,7 +515,7 @@ static void timer_pool_max_res(void)
/* Highest resolution: first in nsec, then in hz */
for (i = 0; i < 2; i++) {
- memset(&tp_param, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tp_param);
if (i == 0) {
printf("\n Highest resolution %" PRIu64 " nsec\n",
@@ -567,7 +581,7 @@ static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
/* Highest resolution */
- memset(&tp_param, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tp_param);
tp_param.res_hz = capa.max_res.res_hz;
tp_param.min_tmo = capa.max_res.min_tmo;
tp_param.max_tmo = capa.max_res.max_tmo;
@@ -663,7 +677,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
int num = 5;
odp_timer_t timer[num];
- memset(&timer_param, 0, sizeof(timer_param));
+ odp_timer_pool_param_init(&timer_param);
timer_param.res_ns = global_mem->param.res_ns;
timer_param.min_tmo = global_mem->param.min_tmo;
period_ns = 2 * global_mem->param.min_tmo;
@@ -858,7 +872,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
res_ns = global_mem->param.res_ns;
- memset(&tparam, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tparam);
tparam.res_ns = global_mem->param.res_ns;
tparam.min_tmo = global_mem->param.min_tmo;
tparam.max_tmo = global_mem->param.max_tmo;
@@ -1038,7 +1052,7 @@ static void timer_test_cancel(void)
if (pool == ODP_POOL_INVALID)
CU_FAIL_FATAL("Timeout pool create failed");
- memset(&tparam, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tparam);
tparam.res_ns = global_mem->param.res_ns;
tparam.min_tmo = global_mem->param.min_tmo;
tparam.max_tmo = global_mem->param.max_tmo;
@@ -1147,7 +1161,7 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
max_tmo = timer_capa.max_tmo.max_tmo;
}
- memset(&timer_param, 0, sizeof(timer_param));
+ odp_timer_pool_param_init(&timer_param);
timer_param.res_ns = res_ns;
timer_param.min_tmo = min_tmo;
timer_param.max_tmo = max_tmo;
@@ -1712,7 +1726,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
max_tmo = global_mem->param.max_tmo;
min_tmo = global_mem->param.min_tmo;
- memset(&tparam, 0, sizeof(odp_timer_pool_param_t));
+ odp_timer_pool_param_init(&tparam);
tparam.res_ns = res_ns;
tparam.min_tmo = min_tmo;
tparam.max_tmo = max_tmo;
@@ -1818,6 +1832,7 @@ static void timer_test_sched_all(void)
odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_capa),
+ ODP_TEST_INFO(timer_test_param_init),
ODP_TEST_INFO(timer_test_timeout_pool_alloc),
ODP_TEST_INFO(timer_test_timeout_pool_free),
ODP_TEST_INFO(timer_pool_create_destroy),