aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2021-08-19 16:50:53 +0300
committerGitHub <noreply@github.com>2021-08-19 16:50:53 +0300
commit9aec81d8e15798d23c4a090c4cd6067976074866 (patch)
treed6be901f263355f3c2b624dd36b813b42add16bc
parent6b6de8d7044d7a484b671a80768cf068f5b56814 (diff)
parentcf4d3329d4a9b53663ee27a904ee88f2cd3f50d8 (diff)
Merge ODP v1.31.0.0v1.31.0.0_DPDK_19.11
Merge ODP linux-generic v1.31.0.0 into ODP-DPDK.
-rw-r--r--.github/workflows/ci-pipeline-arm64.yml22
-rw-r--r--.github/workflows/ci-pipeline.yml18
-rw-r--r--CHANGELOG62
-rw-r--r--config/odp-linux-dpdk.conf10
-rw-r--r--config/odp-linux-generic.conf13
-rw-r--r--configure.ac6
-rw-r--r--doc/implementers-guide/implementers-guide.adoc2
-rw-r--r--helper/include/odp/helper/ipsec.h19
-rw-r--r--helper/ipsec.c61
-rw-r--r--include/Makefile.am60
-rw-r--r--include/odp/api/abi-default/packet.h143
-rw-r--r--include/odp/api/abi-default/packet_types.h169
-rw-r--r--include/odp/api/abi-default/proto_stats.h20
-rw-r--r--include/odp/api/abi-default/proto_stats_types.h37
-rw-r--r--include/odp/api/abi-default/queue.h17
-rw-r--r--include/odp/api/abi-default/queue_types.h36
-rw-r--r--include/odp/api/abi-default/std.h (renamed from include/odp/api/abi-default/std_clib.h)12
-rw-r--r--include/odp/api/abi-default/std_types.h8
-rw-r--r--include/odp/api/abi-default/traffic_mngr.h4
-rw-r--r--include/odp/api/classification.h4
-rw-r--r--include/odp/api/comp.h2
-rw-r--r--include/odp/api/crypto.h4
-rw-r--r--include/odp/api/packet.h2
-rw-r--r--include/odp/api/packet_io.h4
-rw-r--r--include/odp/api/packet_types.h28
-rw-r--r--include/odp/api/proto_stats.h31
-rw-r--r--include/odp/api/proto_stats_types.h28
-rw-r--r--include/odp/api/queue.h1
-rw-r--r--include/odp/api/queue_types.h28
-rw-r--r--include/odp/api/spec/classification.h173
-rw-r--r--include/odp/api/spec/ipsec.h40
-rw-r--r--include/odp/api/spec/packet.h559
-rw-r--r--include/odp/api/spec/packet_io.h5
-rw-r--r--include/odp/api/spec/packet_types.h466
-rw-r--r--include/odp/api/spec/pool.h620
-rw-r--r--include/odp/api/spec/pool_types.h772
-rw-r--r--include/odp/api/spec/proto_stats.h132
-rw-r--r--include/odp/api/spec/proto_stats_types.h126
-rw-r--r--include/odp/api/spec/queue.h17
-rw-r--r--include/odp/api/spec/queue_types.h15
-rw-r--r--include/odp/api/spec/schedule.h2
-rw-r--r--include/odp/api/spec/std.h (renamed from include/odp/api/spec/std_clib.h)28
-rw-r--r--include/odp/api/spec/std_types.h14
-rw-r--r--include/odp/api/spec/traffic_mngr.h380
-rw-r--r--include/odp/api/std.h (renamed from include/odp/api/std_clib.h)8
-rw-r--r--include/odp/api/timer.h2
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/std.h (renamed from include/odp/arch/mips64-linux/odp/api/abi/std_clib.h)2
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/std.h (renamed from include/odp/arch/arm64-linux/odp/api/abi/std_clib.h)2
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/std.h (renamed from include/odp/arch/default-linux/odp/api/abi/std_clib.h)2
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/std.h (renamed from include/odp/arch/power64-linux/odp/api/abi/std_clib.h)2
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/std.h (renamed from include/odp/arch/arm32-linux/odp/api/abi/std_clib.h)2
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/std.h7
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/std_clib.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/packet_types.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/proto_stats.h5
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/proto_stats_types.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/queue_types.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/std.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/std_clib.h7
-rw-r--r--include/odp_api.h3
-rw-r--r--platform/linux-dpdk/Makefile.am12
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/packet.h110
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h139
l---------platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/std.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/std_clib.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h2
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inlines.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/std_inlines.h (renamed from platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h)4
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h10
-rw-r--r--platform/linux-dpdk/m4/odp_libconfig.m42
-rw-r--r--platform/linux-dpdk/odp_crypto.c9
-rw-r--r--platform/linux-dpdk/odp_packet.c64
-rw-r--r--platform/linux-dpdk/odp_pool.c61
-rw-r--r--platform/linux-dpdk/odp_std_api.c (renamed from platform/linux-dpdk/odp_std_clib_api.c)4
-rw-r--r--platform/linux-dpdk/test/crypto.conf2
-rw-r--r--platform/linux-dpdk/test/sched-basic.conf5
-rw-r--r--platform/linux-generic/Makefile.am12
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet.h112
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_types.h141
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats.h27
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue.h17
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue_types.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/std.h (renamed from platform/linux-generic/include-abi/odp/api/abi/std_clib.h)6
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inline_types.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/std_inlines.h (renamed from platform/linux-generic/include/odp/api/plat/std_clib_inlines.h)4
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h15
-rw-r--r--platform/linux-generic/include/odp_classification_internal.h47
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h4
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h20
-rw-r--r--platform/linux-generic/include/odp_traffic_mngr_internal.h7
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m42
-rw-r--r--platform/linux-generic/odp_classification.c72
-rw-r--r--platform/linux-generic/odp_ipsec.c4
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c6
-rw-r--r--platform/linux-generic/odp_packet.c181
-rw-r--r--platform/linux-generic/odp_packet_io.c95
-rw-r--r--platform/linux-generic/odp_packet_vector.c2
-rw-r--r--platform/linux-generic/odp_pool.c517
-rw-r--r--platform/linux-generic/odp_schedule_basic.c297
-rw-r--r--platform/linux-generic/odp_std.c (renamed from platform/linux-generic/odp_fractional.c)2
-rw-r--r--platform/linux-generic/odp_std_api.c (renamed from platform/linux-generic/odp_std_clib_api.c)4
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c153
-rw-r--r--platform/linux-generic/pktio/dpdk.c41
-rw-r--r--platform/linux-generic/test/inline-timer.conf2
-rw-r--r--platform/linux-generic/test/packet_align.conf2
-rw-r--r--platform/linux-generic/test/process-mode.conf2
-rw-r--r--platform/linux-generic/test/sched-basic.conf5
-rw-r--r--test/common/Makefile.am3
-rw-r--r--test/common/test_common_macros.h17
-rw-r--r--test/m4/configure.m42
-rw-r--r--test/performance/odp_sched_perf.c125
-rw-r--r--test/validation/api/Makefile.am4
-rw-r--r--test/validation/api/classification/odp_classification_tests.c80
-rw-r--r--test/validation/api/ipsec/ipsec.c4
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c58
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c2
-rw-r--r--test/validation/api/pktio/pktio.c182
-rw-r--r--test/validation/api/pool/pool.c564
-rw-r--r--test/validation/api/std/.gitignore1
-rw-r--r--test/validation/api/std/Makefile.am4
-rw-r--r--test/validation/api/std/std.c (renamed from test/validation/api/std_clib/std_clib.c)20
-rw-r--r--test/validation/api/std_clib/.gitignore1
-rw-r--r--test/validation/api/std_clib/Makefile.am4
-rw-r--r--test/validation/api/system/system.c8
-rw-r--r--test/validation/api/traffic_mngr/traffic_mngr.c449
155 files changed, 6231 insertions, 2046 deletions
diff --git a/.github/workflows/ci-pipeline-arm64.yml b/.github/workflows/ci-pipeline-arm64.yml
index 936a4ac2d..ec7268463 100644
--- a/.github/workflows/ci-pipeline-arm64.yml
+++ b/.github/workflows/ci-pipeline-arm64.yml
@@ -71,16 +71,6 @@ jobs:
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/out_of_tree.sh
- Build_sched_config:
- if: ${{ github.repository == 'OpenDataPlane/odp' }}
- runs-on: [self-hosted, ARM64]
- steps:
- - uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
- -e CONF="--enable-debug=full" -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/sched-basic.conf
- $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
-
Run_distcheck:
if: ${{ github.repository == 'OpenDataPlane/odp' }}
runs-on: [self-hosted, ARM64]
@@ -135,6 +125,18 @@ jobs:
if: ${{ failure() }}
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
+ Run_sched_config:
+ if: ${{ github.repository == 'OpenDataPlane/odp' }}
+ runs-on: [self-hosted, ARM64]
+ steps:
+ - uses: AutoModality/action-clean@v1.1.0
+ - uses: actions/checkout@v2
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
+ -e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/sched-basic.conf $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
+ - name: Failure log
+ if: ${{ failure() }}
+ run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
+
Run_scheduler_sp:
if: ${{ github.repository == 'OpenDataPlane/odp' }}
runs-on: [self-hosted, ARM64]
diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml
index 47cfb3922..84d056d3e 100644
--- a/.github/workflows/ci-pipeline.yml
+++ b/.github/workflows/ci-pipeline.yml
@@ -178,14 +178,6 @@ jobs:
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ODP_LIB_NAME="libodp-linux"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- Build_sched_config:
- runs-on: ubuntu-18.04
- steps:
- - uses: actions/checkout@v2
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
- -e CONF="--enable-debug=full" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/sched-basic.conf
- $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
-
Run_coverage:
runs-on: ubuntu-18.04
steps:
@@ -246,6 +238,16 @@ jobs:
if: ${{ failure() }}
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
+ Run_sched_config:
+ runs-on: ubuntu-18.04
+ steps:
+ - uses: actions/checkout@v2
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
+ -e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/sched-basic.conf $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
+ - name: Failure log
+ if: ${{ failure() }}
+ run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
+
Run_scheduler_sp:
runs-on: ubuntu-18.04
steps:
diff --git a/CHANGELOG b/CHANGELOG
index 21c13be48..6eaac8ef7 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,65 @@
+== OpenDataPlane (1.31.0.0)
+=== Backward incompatible API changes
+==== Traffic Manager
+* Added new TM feature capabilities and an egress specific capability function
+`odp_tm_egress_capabilities()`
+* Deprecated `odp_tm_capabilities()` function which is replaced by
+`odp_tm_egress_capabilities()`
+* Added `odp_tm_capabilities_t.max_schedulers_per_node` capability to express
+the maximum number of schedulers per TM node
+* Added support for non-global packet priority mode
+
+=== Backward compatible API changes
+==== Classifier
+* Added queue specific statistics counters (`odp_cls_queue_stats()`)
+
+==== IPsec
+* Added ICV length into SA configuration parameters
+(`odp_ipsec_crypto_param_t.icv_len`)
+
+==== Packet
+* Moved packet type definitions into a separate `packet_types.h` header to
+enable easier function inlining
+* Added `odp_packet_disassemble()`, `odp_packet_reassemble()`, and other new
+functions for packets allocated from external memory pools
+* Added packet protocol statistics functions `odp_packet_proto_stats_request()`
+and `odp_packet_proto_stats()`
+
+==== Packet IO
+* Added `odp_pktout_config_opt_t.bit.proto_stats_ena` option for enabling packet
+protocol statistics updates
+
+==== Pool
+* Added new concept of external memory pools, which are populated with
+application provided memory
+* Added new `odp_pool_type_t` enumeration
+* Moved pool type definitions into a separate `pool_types.h` header to enable
+easier function inlining
+
+==== Protocol Stats
+* Added new generic protocol statistics framework
+
+==== Queue
+* Moved queue type definitions into a separate `queue_types.h` header to enable
+easier function inlining
+
+==== Std
+* Renamed std_clib module std and moved all generic ODP data types and functions
+there
+
+==== Traffic Manager
+* Increased scheduling weight parameter size (`uint8_t` to `uint32_t`)
+* Added queue specific statistics counters (`odp_tm_queue_stats()`)
+* Added `odp_tm_enq_multi()` function for enqueueing multiple packets at a time
+* Added `odp_tm_queue_params_t.ordered_enqueue` option which can be used to
+control if ordering is enabled
+
+=== Helper (1.1.1)
+* Added `odph_ipsec_auth_icv_len_default()` function for returning the default
+ICV length of an algorithm
+* Added support for `AES-CMAC` into `odph_ipsec_alg_check()`
+* Added default ICV length check into `odph_ipsec_alg_check()`
+
== OpenDataPlane (1.30.1.0)
=== API
==== Packet IO
diff --git a/config/odp-linux-dpdk.conf b/config/odp-linux-dpdk.conf
index cc63c1576..c266a5489 100644
--- a/config/odp-linux-dpdk.conf
+++ b/config/odp-linux-dpdk.conf
@@ -16,7 +16,7 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.12"
+config_file_version = "0.1.13"
# System options
system: {
@@ -111,6 +111,14 @@ sched_basic: {
# counts as non-preferred queues are served less often
prio_spread_weight = 63
+ # Dynamic load balance of scheduler internal queues
+ #
+ # When enabled (1), scheduler checks periodically internal queue load levels and
+ # moves event queues from one spread to another in order to even out the loads.
+ # Load level of an internal queue (group/prio/spread) is measures as number of
+ # event queues allocated to it, divided by number of threads serving it.
+ load_balance = 1
+
# Burst size configuration per priority. The first array element
# represents the highest queue priority. The scheduler tries to get
# burst_size_default[prio] events from a queue and stashes those that
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf
index 3ac3e3e58..01c622fe8 100644
--- a/config/odp-linux-generic.conf
+++ b/config/odp-linux-generic.conf
@@ -16,7 +16,7 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
# System options
system: {
@@ -119,6 +119,9 @@ pktio_dpdk: {
# Store RX RSS hash result as ODP flow hash
set_flow_hash = 0
+ # Enable reception of Ethernet frames sent to any multicast group
+ multicast_en = 1
+
# Driver specific options (use PMD names from DPDK)
net_ixgbe: {
rx_drop_en = 1
@@ -164,6 +167,14 @@ sched_basic: {
# counts as non-preferred queues are served less often
prio_spread_weight = 63
+ # Dynamic load balance of scheduler internal queues
+ #
+ # When enabled (1), scheduler checks periodically internal queue load levels and
+ # moves event queues from one spread to another in order to even out the loads.
+ # Load level of an internal queue (group/prio/spread) is measures as number of
+ # event queues allocated to it, divided by number of threads serving it.
+ load_balance = 1
+
# Burst size configuration per priority. The first array element
# represents the highest queue priority. The scheduler tries to get
# burst_size_default[prio] events from a queue and stashes those that
diff --git a/configure.ac b/configure.ac
index 2f6f4af89..8c17ba6c1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,8 +3,8 @@ AC_PREREQ([2.5])
# ODP API version
##########################################################################
m4_define([odpapi_generation_version], [1])
-m4_define([odpapi_major_version], [30])
-m4_define([odpapi_minor_version], [1])
+m4_define([odpapi_major_version], [31])
+m4_define([odpapi_minor_version], [0])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
@@ -22,7 +22,7 @@ AC_SUBST(ODP_VERSION_API_MINOR)
##########################################################################
m4_define([odph_version_generation], [1])
m4_define([odph_version_major], [1])
-m4_define([odph_version_minor], [0])
+m4_define([odph_version_minor], [1])
m4_define([odph_version],
[odph_version_generation.odph_version_major.odph_version_minor])
diff --git a/doc/implementers-guide/implementers-guide.adoc b/doc/implementers-guide/implementers-guide.adoc
index 9bc8d8b13..922188770 100644
--- a/doc/implementers-guide/implementers-guide.adoc
+++ b/doc/implementers-guide/implementers-guide.adoc
@@ -327,7 +327,7 @@ TESTS = validation/api/pktio/pktio_run.sh \
$(ALL_API_VALIDATION)/queue/queue_main$(EXEEXT) \
$(ALL_API_VALIDATION)/random/random_main$(EXEEXT) \
$(ALL_API_VALIDATION)/scheduler/scheduler_main$(EXEEXT) \
- $(ALL_API_VALIDATION)/std_clib/std_clib_main$(EXEEXT) \
+ $(ALL_API_VALIDATION)/std/std_main$(EXEEXT) \
$(ALL_API_VALIDATION)/thread/thread_main$(EXEEXT) \
$(ALL_API_VALIDATION)/time/time_main$(EXEEXT) \
$(ALL_API_VALIDATION)/timer/timer_main$(EXEEXT) \
diff --git a/helper/include/odp/helper/ipsec.h b/helper/include/odp/helper/ipsec.h
index 66bed5399..1b2dbb77b 100644
--- a/helper/include/odp/helper/ipsec.h
+++ b/helper/include/odp/helper/ipsec.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -74,9 +75,9 @@ ODP_STATIC_ASSERT(sizeof(odph_ahhdr_t) == ODPH_AHHDR_LEN,
* Check IPSEC algorithm support
*
* Based on the capabilities exposed by the ODP implementation, check whether
- * the specified IPSEC algorithm configuration is supported by the
- * implementation. The caller provides the IPSEC capability structure as an
- * argument to the helper function.
+ * the specified IPSEC algorithm configuration with the default ICV length
+ * is supported by the implementation. The caller provides the IPSEC
+ * capability structure as an argument to the helper function.
*
* @param capa IPSEC capability structure
* @param cipher_alg Cipher algorithm
@@ -94,6 +95,18 @@ int odph_ipsec_alg_check(odp_ipsec_capability_t capa,
uint32_t auth_key_len);
/**
+ * Return the default ICV length of an algorithm
+ *
+ * IPsec API specifies default ICV length for each authentication and
+ * combined mode algorithm. This function returns the default ICV length.
+ *
+ * @param auth_alg Authentication algorithm
+ *
+ * @return The default ICV length in bytes
+ */
+uint32_t odph_ipsec_auth_icv_len_default(odp_auth_alg_t auth_alg);
+
+/**
* @}
*/
diff --git a/helper/ipsec.c b/helper/ipsec.c
index 41daefbfb..3b54bb07f 100644
--- a/helper/ipsec.c
+++ b/helper/ipsec.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2017-2018, Linaro Limited
* Copyright (c) 2020 Marvell
+ * Copyright (c) 2021 Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,6 +9,55 @@
#include <odp/helper/ipsec.h>
#include <odp/helper/odph_debug.h>
+uint32_t odph_ipsec_auth_icv_len_default(odp_auth_alg_t auth_alg)
+{
+ uint32_t icv_len;
+
+ switch (auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ icv_len = 0;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ icv_len = 12;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ icv_len = 12;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ icv_len = 16;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ icv_len = 24;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ icv_len = 32;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ icv_len = 16;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ icv_len = 16;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ icv_len = 16;
+ break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ icv_len = 12;
+ break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ icv_len = 12;
+ break;
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ icv_len = 16;
+ break;
+ default:
+ ODPH_DBG("Unsupported authentication algorithm\n");
+ icv_len = 0;
+ break;
+ }
+ return icv_len;
+}
+
int odph_ipsec_alg_check(odp_ipsec_capability_t capa,
odp_cipher_alg_t cipher_alg,
uint32_t cipher_key_len,
@@ -15,6 +65,7 @@ int odph_ipsec_alg_check(odp_ipsec_capability_t capa,
uint32_t auth_key_len)
{
int i, num, max_capa;
+ uint32_t default_icv_len;
odp_bool_t found;
/* Check whether requested cipher algorithm is supported */
@@ -98,6 +149,10 @@ int odph_ipsec_alg_check(odp_ipsec_capability_t capa,
if (!capa.auths.bit.aes_ccm)
return -1;
break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ if (!capa.auths.bit.aes_cmac)
+ return -1;
+ break;
case ODP_AUTH_ALG_CHACHA20_POLY1305:
if (!capa.auths.bit.chacha20_poly1305)
return -1;
@@ -146,16 +201,18 @@ int odph_ipsec_alg_check(odp_ipsec_capability_t capa,
return -1;
}
+ default_icv_len = odph_ipsec_auth_icv_len_default(auth_alg);
found = false;
for (i = 0; i < num; i++) {
- if (auth_capa[i].key_len == auth_key_len) {
+ if (auth_capa[i].key_len == auth_key_len &&
+ auth_capa[i].icv_len == default_icv_len) {
found = 1;
break;
}
}
if (!found) {
- ODPH_DBG("Unsupported auth key length\n");
+ ODPH_DBG("Unsupported auth key length & ICV length pair\n");
return -1;
}
diff --git a/include/Makefile.am b/include/Makefile.am
index 98cb9117f..55a21a539 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -31,12 +31,16 @@ odpapiinclude_HEADERS = \
odp/api/init.h \
odp/api/ipsec.h \
odp/api/packet.h \
+ odp/api/packet_types.h \
odp/api/packet_flags.h \
odp/api/packet_io.h \
odp/api/packet_io_stats.h \
odp/api/protocols.h \
odp/api/pool.h \
+ odp/api/proto_stats.h \
+ odp/api/proto_stats_types.h \
odp/api/queue.h \
+ odp/api/queue_types.h \
odp/api/random.h \
odp/api/reassembly.h \
odp/api/rwlock.h \
@@ -47,7 +51,7 @@ odpapiinclude_HEADERS = \
odp/api/spinlock.h \
odp/api/spinlock_recursive.h \
odp/api/stash.h \
- odp/api/std_clib.h \
+ odp/api/std.h \
odp/api/std_types.h \
odp/api/support.h \
odp/api/sync.h \
@@ -83,11 +87,15 @@ odpapispecinclude_HEADERS = \
odp/api/spec/init.h \
odp/api/spec/ipsec.h \
odp/api/spec/packet.h \
+ odp/api/spec/packet_types.h \
odp/api/spec/packet_flags.h \
odp/api/spec/packet_io.h \
odp/api/spec/packet_io_stats.h \
odp/api/spec/protocols.h \
odp/api/spec/pool.h \
+ odp/api/spec/pool_types.h \
+ odp/api/spec/proto_stats.h \
+ odp/api/spec/proto_stats_types.h \
odp/api/spec/queue.h \
odp/api/spec/queue_types.h \
odp/api/spec/random.h \
@@ -100,7 +108,7 @@ odpapispecinclude_HEADERS = \
odp/api/spec/spinlock.h \
odp/api/spec/spinlock_recursive.h \
odp/api/spec/stash.h \
- odp/api/spec/std_clib.h \
+ odp/api/spec/std.h \
odp/api/spec/std_types.h \
odp/api/spec/support.h \
odp/api/spec/sync.h \
@@ -137,10 +145,14 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/init.h \
odp/api/abi-default/ipsec.h \
odp/api/abi-default/packet.h \
+ odp/api/abi-default/packet_types.h \
odp/api/abi-default/packet_flags.h \
odp/api/abi-default/packet_io.h \
+ odp/api/abi-default/proto_stats.h \
+ odp/api/abi-default/proto_stats_types.h \
odp/api/abi-default/pool.h \
odp/api/abi-default/queue.h \
+ odp/api/abi-default/queue_types.h \
odp/api/abi-default/rwlock.h \
odp/api/abi-default/rwlock_recursive.h \
odp/api/abi-default/schedule.h \
@@ -149,7 +161,7 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/spinlock.h \
odp/api/abi-default/spinlock_recursive.h \
odp/api/abi-default/stash.h \
- odp/api/abi-default/std_clib.h \
+ odp/api/abi-default/std.h \
odp/api/abi-default/std_types.h \
odp/api/abi-default/sync.h \
odp/api/abi-default/thread.h \
@@ -183,10 +195,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/init.h \
odp/arch/arm32-linux/odp/api/abi/ipsec.h \
odp/arch/arm32-linux/odp/api/abi/packet.h \
+ odp/arch/arm32-linux/odp/api/abi/packet_types.h \
odp/arch/arm32-linux/odp/api/abi/packet_flags.h \
odp/arch/arm32-linux/odp/api/abi/packet_io.h \
odp/arch/arm32-linux/odp/api/abi/pool.h \
+ odp/arch/arm32-linux/odp/api/abi/proto_stats.h \
+ odp/arch/arm32-linux/odp/api/abi/proto_stats_types.h \
odp/arch/arm32-linux/odp/api/abi/queue.h \
+ odp/arch/arm32-linux/odp/api/abi/queue_types.h \
odp/arch/arm32-linux/odp/api/abi/rwlock.h \
odp/arch/arm32-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/arm32-linux/odp/api/abi/schedule.h \
@@ -195,7 +211,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/spinlock.h \
odp/arch/arm32-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/arm32-linux/odp/api/abi/stash.h \
- odp/arch/arm32-linux/odp/api/abi/std_clib.h \
+ odp/arch/arm32-linux/odp/api/abi/std.h \
odp/arch/arm32-linux/odp/api/abi/std_types.h \
odp/arch/arm32-linux/odp/api/abi/sync.h \
odp/arch/arm32-linux/odp/api/abi/thread.h \
@@ -225,10 +241,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/init.h \
odp/arch/arm64-linux/odp/api/abi/ipsec.h \
odp/arch/arm64-linux/odp/api/abi/packet.h \
+ odp/arch/arm64-linux/odp/api/abi/packet_types.h \
odp/arch/arm64-linux/odp/api/abi/packet_flags.h \
odp/arch/arm64-linux/odp/api/abi/packet_io.h \
odp/arch/arm64-linux/odp/api/abi/pool.h \
+ odp/arch/arm64-linux/odp/api/abi/proto_stats.h \
+ odp/arch/arm64-linux/odp/api/abi/proto_stats_types.h \
odp/arch/arm64-linux/odp/api/abi/queue.h \
+ odp/arch/arm64-linux/odp/api/abi/queue_types.h \
odp/arch/arm64-linux/odp/api/abi/rwlock.h \
odp/arch/arm64-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/arm64-linux/odp/api/abi/schedule.h \
@@ -237,7 +257,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/spinlock.h \
odp/arch/arm64-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/arm64-linux/odp/api/abi/stash.h \
- odp/arch/arm64-linux/odp/api/abi/std_clib.h \
+ odp/arch/arm64-linux/odp/api/abi/std.h \
odp/arch/arm64-linux/odp/api/abi/std_types.h \
odp/arch/arm64-linux/odp/api/abi/sync.h \
odp/arch/arm64-linux/odp/api/abi/thread.h \
@@ -267,10 +287,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/init.h \
odp/arch/default-linux/odp/api/abi/ipsec.h \
odp/arch/default-linux/odp/api/abi/packet.h \
+ odp/arch/default-linux/odp/api/abi/packet_types.h \
odp/arch/default-linux/odp/api/abi/packet_flags.h \
odp/arch/default-linux/odp/api/abi/packet_io.h \
odp/arch/default-linux/odp/api/abi/pool.h \
+ odp/arch/default-linux/odp/api/abi/proto_stats.h \
+ odp/arch/default-linux/odp/api/abi/proto_stats_types.h \
odp/arch/default-linux/odp/api/abi/queue.h \
+ odp/arch/default-linux/odp/api/abi/queue_types.h \
odp/arch/default-linux/odp/api/abi/rwlock.h \
odp/arch/default-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/default-linux/odp/api/abi/schedule.h \
@@ -279,7 +303,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/spinlock.h \
odp/arch/default-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/default-linux/odp/api/abi/stash.h \
- odp/arch/default-linux/odp/api/abi/std_clib.h \
+ odp/arch/default-linux/odp/api/abi/std.h \
odp/arch/default-linux/odp/api/abi/std_types.h \
odp/arch/default-linux/odp/api/abi/sync.h \
odp/arch/default-linux/odp/api/abi/thread.h \
@@ -309,10 +333,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/mips64-linux/odp/api/abi/init.h \
odp/arch/mips64-linux/odp/api/abi/ipsec.h \
odp/arch/mips64-linux/odp/api/abi/packet.h \
+ odp/arch/mips64-linux/odp/api/abi/packet_types.h \
odp/arch/mips64-linux/odp/api/abi/packet_flags.h \
odp/arch/mips64-linux/odp/api/abi/packet_io.h \
odp/arch/mips64-linux/odp/api/abi/pool.h \
+ odp/arch/mips64-linux/odp/api/abi/proto_stats.h \
+ odp/arch/mips64-linux/odp/api/abi/proto_stats_types.h \
odp/arch/mips64-linux/odp/api/abi/queue.h \
+ odp/arch/mips64-linux/odp/api/abi/queue_types.h \
odp/arch/mips64-linux/odp/api/abi/rwlock.h \
odp/arch/mips64-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/mips64-linux/odp/api/abi/schedule.h \
@@ -321,7 +349,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/mips64-linux/odp/api/abi/spinlock.h \
odp/arch/mips64-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/mips64-linux/odp/api/abi/stash.h \
- odp/arch/mips64-linux/odp/api/abi/std_clib.h \
+ odp/arch/mips64-linux/odp/api/abi/std.h \
odp/arch/mips64-linux/odp/api/abi/std_types.h \
odp/arch/mips64-linux/odp/api/abi/sync.h \
odp/arch/mips64-linux/odp/api/abi/thread.h \
@@ -351,10 +379,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/init.h \
odp/arch/power64-linux/odp/api/abi/ipsec.h \
odp/arch/power64-linux/odp/api/abi/packet.h \
+ odp/arch/power64-linux/odp/api/abi/packet_types.h \
odp/arch/power64-linux/odp/api/abi/packet_flags.h \
odp/arch/power64-linux/odp/api/abi/packet_io.h \
odp/arch/power64-linux/odp/api/abi/pool.h \
+ odp/arch/power64-linux/odp/api/abi/proto_stats.h \
+ odp/arch/power64-linux/odp/api/abi/proto_stats_types.h \
odp/arch/power64-linux/odp/api/abi/queue.h \
+ odp/arch/power64-linux/odp/api/abi/queue_types.h \
odp/arch/power64-linux/odp/api/abi/rwlock.h \
odp/arch/power64-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/power64-linux/odp/api/abi/schedule.h \
@@ -363,7 +395,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/spinlock.h \
odp/arch/power64-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/power64-linux/odp/api/abi/stash.h \
- odp/arch/power64-linux/odp/api/abi/std_clib.h \
+ odp/arch/power64-linux/odp/api/abi/std.h \
odp/arch/power64-linux/odp/api/abi/std_types.h \
odp/arch/power64-linux/odp/api/abi/sync.h \
odp/arch/power64-linux/odp/api/abi/thread.h \
@@ -393,10 +425,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/init.h \
odp/arch/x86_32-linux/odp/api/abi/ipsec.h \
odp/arch/x86_32-linux/odp/api/abi/packet.h \
+ odp/arch/x86_32-linux/odp/api/abi/packet_types.h \
odp/arch/x86_32-linux/odp/api/abi/packet_flags.h \
odp/arch/x86_32-linux/odp/api/abi/packet_io.h \
odp/arch/x86_32-linux/odp/api/abi/pool.h \
+ odp/arch/x86_32-linux/odp/api/abi/proto_stats.h \
+ odp/arch/x86_32-linux/odp/api/abi/proto_stats_types.h \
odp/arch/x86_32-linux/odp/api/abi/queue.h \
+ odp/arch/x86_32-linux/odp/api/abi/queue_types.h \
odp/arch/x86_32-linux/odp/api/abi/rwlock.h \
odp/arch/x86_32-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/x86_32-linux/odp/api/abi/schedule.h \
@@ -405,7 +441,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/spinlock.h \
odp/arch/x86_32-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/x86_32-linux/odp/api/abi/stash.h \
- odp/arch/x86_32-linux/odp/api/abi/std_clib.h \
+ odp/arch/x86_32-linux/odp/api/abi/std.h \
odp/arch/x86_32-linux/odp/api/abi/std_types.h \
odp/arch/x86_32-linux/odp/api/abi/sync.h \
odp/arch/x86_32-linux/odp/api/abi/thread.h \
@@ -435,10 +471,14 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/init.h \
odp/arch/x86_64-linux/odp/api/abi/ipsec.h \
odp/arch/x86_64-linux/odp/api/abi/packet.h \
+ odp/arch/x86_64-linux/odp/api/abi/packet_types.h \
odp/arch/x86_64-linux/odp/api/abi/packet_flags.h \
odp/arch/x86_64-linux/odp/api/abi/packet_io.h \
odp/arch/x86_64-linux/odp/api/abi/pool.h \
+ odp/arch/x86_64-linux/odp/api/abi/proto_stats.h \
+ odp/arch/x86_64-linux/odp/api/abi/proto_stats_types.h \
odp/arch/x86_64-linux/odp/api/abi/queue.h \
+ odp/arch/x86_64-linux/odp/api/abi/queue_types.h \
odp/arch/x86_64-linux/odp/api/abi/rwlock.h \
odp/arch/x86_64-linux/odp/api/abi/rwlock_recursive.h \
odp/arch/x86_64-linux/odp/api/abi/schedule.h \
@@ -447,7 +487,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/spinlock.h \
odp/arch/x86_64-linux/odp/api/abi/spinlock_recursive.h \
odp/arch/x86_64-linux/odp/api/abi/stash.h \
- odp/arch/x86_64-linux/odp/api/abi/std_clib.h \
+ odp/arch/x86_64-linux/odp/api/abi/std.h \
odp/arch/x86_64-linux/odp/api/abi/std_types.h \
odp/arch/x86_64-linux/odp/api/abi/sync.h \
odp/arch/x86_64-linux/odp/api/abi/thread.h \
diff --git a/include/odp/api/abi-default/packet.h b/include/odp/api/abi-default/packet.h
index 712f83ef6..3f6e82c5c 100644
--- a/include/odp/api/abi-default/packet.h
+++ b/include/odp/api/abi-default/packet.h
@@ -11,148 +11,7 @@
extern "C" {
#endif
-#include <stdint.h>
-
-/** @internal Dummy type for strong typing */
-typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_packet_t;
-
-/** @internal Dummy type for strong typing */
-typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_seg_t;
-
-/** @internal Dummy type for strong typing */
-typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_vector_t;
-
-/** @internal Dummy type for strong typing */
-typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_tx_compl_t;
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef _odp_abi_packet_t *odp_packet_t;
-typedef _odp_abi_packet_seg_t *odp_packet_seg_t;
-typedef _odp_abi_packet_vector_t *odp_packet_vector_t;
-typedef _odp_abi_packet_tx_compl_t *odp_packet_tx_compl_t;
-
-#define ODP_PACKET_INVALID ((odp_packet_t)0)
-#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)0)
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-#define ODP_PACKET_VECTOR_INVALID ((odp_packet_vector_t)0)
-#define ODP_PACKET_TX_COMPL_INVALID ((odp_packet_tx_compl_t)0)
-
-typedef uint8_t odp_proto_l2_type_t;
-
-#define ODP_PROTO_L2_TYPE_NONE 0
-#define ODP_PROTO_L2_TYPE_ETH 1
-
-typedef uint8_t odp_proto_l3_type_t;
-
-#define ODP_PROTO_L3_TYPE_NONE 0
-#define ODP_PROTO_L3_TYPE_ARP 1
-#define ODP_PROTO_L3_TYPE_RARP 2
-#define ODP_PROTO_L3_TYPE_MPLS 3
-#define ODP_PROTO_L3_TYPE_IPV4 4
-#define ODP_PROTO_L3_TYPE_IPV6 6
-
-typedef uint8_t odp_proto_l4_type_t;
-
-/* Numbers from IANA Assigned Internet Protocol Numbers list */
-#define ODP_PROTO_L4_TYPE_NONE 0
-#define ODP_PROTO_L4_TYPE_ICMPV4 1
-#define ODP_PROTO_L4_TYPE_IGMP 2
-#define ODP_PROTO_L4_TYPE_IPV4 4
-#define ODP_PROTO_L4_TYPE_TCP 6
-#define ODP_PROTO_L4_TYPE_UDP 17
-#define ODP_PROTO_L4_TYPE_IPV6 41
-#define ODP_PROTO_L4_TYPE_GRE 47
-#define ODP_PROTO_L4_TYPE_ESP 50
-#define ODP_PROTO_L4_TYPE_AH 51
-#define ODP_PROTO_L4_TYPE_ICMPV6 58
-#define ODP_PROTO_L4_TYPE_NO_NEXT 59
-#define ODP_PROTO_L4_TYPE_IPCOMP 108
-#define ODP_PROTO_L4_TYPE_SCTP 132
-#define ODP_PROTO_L4_TYPE_ROHC 142
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-typedef enum {
- ODP_PACKET_CHKSUM_UNKNOWN = 0,
- ODP_PACKET_CHKSUM_BAD,
- ODP_PACKET_CHKSUM_OK
-} odp_packet_chksum_status_t;
-
-/** Parse result flags */
-typedef struct odp_packet_parse_result_flag_t {
- /** Flags union */
- union {
- /** All flags as a 64 bit word */
- uint64_t all;
-
- /** Flags as a bitfield struct */
- struct {
- /** @see odp_packet_has_error() */
- uint64_t has_error : 1;
- /** @see odp_packet_has_l2_error() */
- uint64_t has_l2_error : 1;
- /** @see odp_packet_has_l3_error() */
- uint64_t has_l3_error : 1;
- /** @see odp_packet_has_l4_error() */
- uint64_t has_l4_error : 1;
- /** @see odp_packet_has_l2() */
- uint64_t has_l2 : 1;
- /** @see odp_packet_has_l3() */
- uint64_t has_l3 : 1;
- /** @see odp_packet_has_l4() */
- uint64_t has_l4 : 1;
- /** @see odp_packet_has_eth() */
- uint64_t has_eth : 1;
- /** @see odp_packet_has_eth_bcast() */
- uint64_t has_eth_bcast : 1;
- /** @see odp_packet_has_eth_mcast() */
- uint64_t has_eth_mcast : 1;
- /** @see odp_packet_has_jumbo() */
- uint64_t has_jumbo : 1;
- /** @see odp_packet_has_vlan() */
- uint64_t has_vlan : 1;
- /** @see odp_packet_has_vlan_qinq() */
- uint64_t has_vlan_qinq : 1;
- /** @see odp_packet_has_arp() */
- uint64_t has_arp : 1;
- /** @see odp_packet_has_ipv4() */
- uint64_t has_ipv4 : 1;
- /** @see odp_packet_has_ipv6() */
- uint64_t has_ipv6 : 1;
- /** @see odp_packet_has_ip_bcast() */
- uint64_t has_ip_bcast : 1;
- /** @see odp_packet_has_ip_mcast() */
- uint64_t has_ip_mcast : 1;
- /** @see odp_packet_has_ipfrag() */
- uint64_t has_ipfrag : 1;
- /** @see odp_packet_has_ipopt() */
- uint64_t has_ipopt : 1;
- /** @see odp_packet_has_ipsec() */
- uint64_t has_ipsec : 1;
- /** @see odp_packet_has_udp() */
- uint64_t has_udp : 1;
- /** @see odp_packet_has_tcp() */
- uint64_t has_tcp : 1;
- /** @see odp_packet_has_sctp() */
- uint64_t has_sctp : 1;
- /** @see odp_packet_has_icmp() */
- uint64_t has_icmp : 1;
- };
- };
-
-} odp_packet_parse_result_flag_t;
-
-/**
- * @}
- */
+/* Empty header required due to the packet inline functions */
#ifdef __cplusplus
}
diff --git a/include/odp/api/abi-default/packet_types.h b/include/odp/api/abi-default/packet_types.h
new file mode 100644
index 000000000..9b886aa10
--- /dev/null
+++ b/include/odp/api/abi-default/packet_types.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_PACKET_TYPES_H_
+#define ODP_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_packet_t;
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_seg_t;
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_buf_t;
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_vector_t;
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_tx_compl_t;
+
+/** @ingroup odp_packet
+ * @{
+ */
+
+typedef _odp_abi_packet_t *odp_packet_t;
+typedef _odp_abi_packet_seg_t *odp_packet_seg_t;
+typedef _odp_abi_packet_buf_t *odp_packet_buf_t;
+typedef _odp_abi_packet_vector_t *odp_packet_vector_t;
+typedef _odp_abi_packet_tx_compl_t *odp_packet_tx_compl_t;
+
+#define ODP_PACKET_INVALID ((odp_packet_t)0)
+#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)0)
+#define ODP_PACKET_BUF_INVALID ((odp_packet_buf_t)0)
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+#define ODP_PACKET_VECTOR_INVALID ((odp_packet_vector_t)0)
+#define ODP_PACKET_TX_COMPL_INVALID ((odp_packet_tx_compl_t)0)
+
+typedef uint8_t odp_proto_l2_type_t;
+
+#define ODP_PROTO_L2_TYPE_NONE 0
+#define ODP_PROTO_L2_TYPE_ETH 1
+
+typedef uint8_t odp_proto_l3_type_t;
+
+#define ODP_PROTO_L3_TYPE_NONE 0
+#define ODP_PROTO_L3_TYPE_ARP 1
+#define ODP_PROTO_L3_TYPE_RARP 2
+#define ODP_PROTO_L3_TYPE_MPLS 3
+#define ODP_PROTO_L3_TYPE_IPV4 4
+#define ODP_PROTO_L3_TYPE_IPV6 6
+
+typedef uint8_t odp_proto_l4_type_t;
+
+/* Numbers from IANA Assigned Internet Protocol Numbers list */
+#define ODP_PROTO_L4_TYPE_NONE 0
+#define ODP_PROTO_L4_TYPE_ICMPV4 1
+#define ODP_PROTO_L4_TYPE_IGMP 2
+#define ODP_PROTO_L4_TYPE_IPV4 4
+#define ODP_PROTO_L4_TYPE_TCP 6
+#define ODP_PROTO_L4_TYPE_UDP 17
+#define ODP_PROTO_L4_TYPE_IPV6 41
+#define ODP_PROTO_L4_TYPE_GRE 47
+#define ODP_PROTO_L4_TYPE_ESP 50
+#define ODP_PROTO_L4_TYPE_AH 51
+#define ODP_PROTO_L4_TYPE_ICMPV6 58
+#define ODP_PROTO_L4_TYPE_NO_NEXT 59
+#define ODP_PROTO_L4_TYPE_IPCOMP 108
+#define ODP_PROTO_L4_TYPE_SCTP 132
+#define ODP_PROTO_L4_TYPE_ROHC 142
+
+/** Packet Color */
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+/** Packet Checksum Status */
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+/** Parse result flags */
+typedef struct odp_packet_parse_result_flag_t {
+ /** Flags union */
+ union {
+ /** All flags as a 64 bit word */
+ uint64_t all;
+
+ /** Flags as a bitfield struct */
+ struct {
+ /** @see odp_packet_has_error() */
+ uint64_t has_error : 1;
+ /** @see odp_packet_has_l2_error() */
+ uint64_t has_l2_error : 1;
+ /** @see odp_packet_has_l3_error() */
+ uint64_t has_l3_error : 1;
+ /** @see odp_packet_has_l4_error() */
+ uint64_t has_l4_error : 1;
+ /** @see odp_packet_has_l2() */
+ uint64_t has_l2 : 1;
+ /** @see odp_packet_has_l3() */
+ uint64_t has_l3 : 1;
+ /** @see odp_packet_has_l4() */
+ uint64_t has_l4 : 1;
+ /** @see odp_packet_has_eth() */
+ uint64_t has_eth : 1;
+ /** @see odp_packet_has_eth_bcast() */
+ uint64_t has_eth_bcast : 1;
+ /** @see odp_packet_has_eth_mcast() */
+ uint64_t has_eth_mcast : 1;
+ /** @see odp_packet_has_jumbo() */
+ uint64_t has_jumbo : 1;
+ /** @see odp_packet_has_vlan() */
+ uint64_t has_vlan : 1;
+ /** @see odp_packet_has_vlan_qinq() */
+ uint64_t has_vlan_qinq : 1;
+ /** @see odp_packet_has_arp() */
+ uint64_t has_arp : 1;
+ /** @see odp_packet_has_ipv4() */
+ uint64_t has_ipv4 : 1;
+ /** @see odp_packet_has_ipv6() */
+ uint64_t has_ipv6 : 1;
+ /** @see odp_packet_has_ip_bcast() */
+ uint64_t has_ip_bcast : 1;
+ /** @see odp_packet_has_ip_mcast() */
+ uint64_t has_ip_mcast : 1;
+ /** @see odp_packet_has_ipfrag() */
+ uint64_t has_ipfrag : 1;
+ /** @see odp_packet_has_ipopt() */
+ uint64_t has_ipopt : 1;
+ /** @see odp_packet_has_ipsec() */
+ uint64_t has_ipsec : 1;
+ /** @see odp_packet_has_udp() */
+ uint64_t has_udp : 1;
+ /** @see odp_packet_has_tcp() */
+ uint64_t has_tcp : 1;
+ /** @see odp_packet_has_sctp() */
+ uint64_t has_sctp : 1;
+ /** @see odp_packet_has_icmp() */
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/abi-default/proto_stats.h b/include/odp/api/abi-default/proto_stats.h
new file mode 100644
index 000000000..dd7ff09b7
--- /dev/null
+++ b/include/odp/api/abi-default/proto_stats.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_PROTO_STATS_H_
+#define ODP_ABI_PROTO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Empty header required to enable API function inlining */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/abi-default/proto_stats_types.h b/include/odp/api/abi-default/proto_stats_types.h
new file mode 100644
index 000000000..2e8e4aeef
--- /dev/null
+++ b/include/odp/api/abi-default/proto_stats_types.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_PROTO_STATS_TYPES_H_
+#define ODP_ABI_PROTO_STATS_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_proto_stats_t;
+
+/** @ingroup odp_proto_stats
+ * Operations on a proto stats object.
+ * @{
+ */
+
+typedef _odp_abi_proto_stats_t *odp_proto_stats_t;
+
+#define ODP_PROTO_STATS_INVALID ((odp_proto_stats_t)0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/abi-default/queue.h b/include/odp/api/abi-default/queue.h
index 007ded719..f74080dbb 100644
--- a/include/odp/api/abi-default/queue.h
+++ b/include/odp/api/abi-default/queue.h
@@ -11,22 +11,7 @@
extern "C" {
#endif
-/** @internal Dummy type for strong typing */
-typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_queue_t;
-
-/** @ingroup odp_queue
- * @{
- */
-
-typedef _odp_abi_queue_t *odp_queue_t;
-
-#define ODP_QUEUE_INVALID ((odp_queue_t)0)
-
-#define ODP_QUEUE_NAME_LEN 32
-
-/**
- * @}
- */
+/* Empty header required due to the queue inline functions */
#ifdef __cplusplus
}
diff --git a/include/odp/api/abi-default/queue_types.h b/include/odp/api/abi-default/queue_types.h
new file mode 100644
index 000000000..bf9862149
--- /dev/null
+++ b/include/odp/api/abi-default/queue_types.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_QUEUE_TYPES_H_
+#define ODP_ABI_QUEUE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_queue_t;
+
+/** @ingroup odp_queue
+ * @{
+ */
+
+typedef _odp_abi_queue_t *odp_queue_t;
+
+#define ODP_QUEUE_INVALID ((odp_queue_t)0)
+
+#define ODP_QUEUE_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/abi-default/std_clib.h b/include/odp/api/abi-default/std.h
index 15bb79536..2500af800 100644
--- a/include/odp/api/abi-default/std_clib.h
+++ b/include/odp/api/abi-default/std.h
@@ -4,19 +4,15 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-/**
- * @file
- *
- * ODP barrier
- */
-
-#ifndef ODP_ABI_STD_CLIB_H_
-#define ODP_ABI_STD_CLIB_H_
+#ifndef ODP_ABI_STD_H_
+#define ODP_ABI_STD_H_
#ifdef __cplusplus
extern "C" {
#endif
+/* Empty header required due to the inline functions */
+
#ifdef __cplusplus
}
#endif
diff --git a/include/odp/api/abi-default/std_types.h b/include/odp/api/abi-default/std_types.h
index 27c6a75cc..df7bd8db5 100644
--- a/include/odp/api/abi-default/std_types.h
+++ b/include/odp/api/abi-default/std_types.h
@@ -4,12 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-/**
- * @file
- *
- * Standard C language types and definitions for ODP.
- */
-
#ifndef ODP_ABI_STD_TYPES_H_
#define ODP_ABI_STD_TYPES_H_
@@ -26,7 +20,7 @@ extern "C" {
/* true and false for odp_bool_t */
#include <stdbool.h>
-/** @addtogroup odp_system ODP SYSTEM
+/** @addtogroup odp_std
* @{
*/
diff --git a/include/odp/api/abi-default/traffic_mngr.h b/include/odp/api/abi-default/traffic_mngr.h
index 9c01ef98f..155123304 100644
--- a/include/odp/api/abi-default/traffic_mngr.h
+++ b/include/odp/api/abi-default/traffic_mngr.h
@@ -46,13 +46,13 @@ extern "C" {
/**
* The smallest SCHED weight is 1 (i.e. 0 is not a legal WFQ/WRR value).
*/
-#define ODP_TM_MIN_SCHED_WEIGHT 1
+#define ODP_TM_MIN_SCHED_WEIGHT 1U
/** The ODP_TM_MAX_SCHED_WEIGHT constant is the largest weight any TM system
* can support (at least from a configuration standpoint). A given TM system
* could have a smaller value.
*/
-#define ODP_TM_MAX_SCHED_WEIGHT 255
+#define ODP_TM_MAX_SCHED_WEIGHT 255U
/** The ODP_TM_MAX_TM_QUEUES constant is the largest number of tm_queues
* that can be handled by any one TM system.
diff --git a/include/odp/api/classification.h b/include/odp/api/classification.h
index 643033228..e96e9c906 100644
--- a/include/odp/api/classification.h
+++ b/include/odp/api/classification.h
@@ -20,9 +20,9 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/abi/pool.h>
#include <odp/api/abi/classification.h>
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/abi/packet_io.h>
-#include <odp/api/abi/queue.h>
+#include <odp/api/abi/queue_types.h>
#include <odp/api/spec/classification.h>
diff --git a/include/odp/api/comp.h b/include/odp/api/comp.h
index 59d4f52a3..c3294e79e 100644
--- a/include/odp/api/comp.h
+++ b/include/odp/api/comp.h
@@ -19,7 +19,7 @@ extern "C" {
#include <odp/api/abi/comp.h>
#include <odp/api/abi/event.h>
-#include <odp/api/abi/queue.h>
+#include <odp/api/abi/queue_types.h>
#include <odp/api/spec/comp.h>
diff --git a/include/odp/api/crypto.h b/include/odp/api/crypto.h
index 5f68e6701..1986d1b2e 100644
--- a/include/odp/api/crypto.h
+++ b/include/odp/api/crypto.h
@@ -18,11 +18,11 @@ extern "C" {
#endif
#include <odp/api/std_types.h>
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/abi/crypto.h>
#include <odp/api/abi/buffer.h>
#include <odp/api/abi/pool.h>
-#include <odp/api/queue.h>
+#include <odp/api/abi/queue_types.h>
/** @ingroup odp_crypto
* @{
diff --git a/include/odp/api/packet.h b/include/odp/api/packet.h
index e4b2427a0..231a5aecd 100644
--- a/include/odp/api/packet.h
+++ b/include/odp/api/packet.h
@@ -20,8 +20,8 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/abi/event.h>
#include <odp/api/abi/packet_io.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/abi/packet.h>
-#include <odp/api/abi/queue.h>
#include <odp/api/abi/buffer.h>
#include <odp/api/abi/pool.h>
diff --git a/include/odp/api/packet_io.h b/include/odp/api/packet_io.h
index a7edf17cb..9de59f51e 100644
--- a/include/odp/api/packet_io.h
+++ b/include/odp/api/packet_io.h
@@ -20,9 +20,9 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/abi/pool.h>
#include <odp/api/abi/classification.h>
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/abi/packet_io.h>
-#include <odp/api/abi/queue.h>
+#include <odp/api/abi/queue_types.h>
/** @ingroup odp_packet_io
* @{
diff --git a/include/odp/api/packet_types.h b/include/odp/api/packet_types.h
new file mode 100644
index 000000000..02ffe956b
--- /dev/null
+++ b/include/odp/api/packet_types.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet
+ */
+
+#ifndef ODP_API_PACKET_TYPES_H_
+#define ODP_API_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/packet_types.h>
+
+#include <odp/api/spec/packet_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/proto_stats.h b/include/odp/api/proto_stats.h
new file mode 100644
index 000000000..bceaeaac9
--- /dev/null
+++ b/include/odp/api/proto_stats.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats
+ */
+
+#ifndef ODP_API_PROTO_STATS_H_
+#define ODP_API_PROTO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/abi/queue.h>
+#include <odp/api/abi/proto_stats_types.h>
+#include <odp/api/abi/proto_stats.h>
+
+#include <odp/api/spec/proto_stats.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/proto_stats_types.h b/include/odp/api/proto_stats_types.h
new file mode 100644
index 000000000..b12a0f34f
--- /dev/null
+++ b/include/odp/api/proto_stats_types.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats types
+ */
+
+#ifndef ODP_API_PROTO_STATS_TYPES_H_
+#define ODP_API_PROTO_STATS_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/proto_stats_types.h>
+
+#include <odp/api/spec/proto_stats_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/queue.h b/include/odp/api/queue.h
index b8b5e48cc..4046d4e1b 100644
--- a/include/odp/api/queue.h
+++ b/include/odp/api/queue.h
@@ -19,6 +19,7 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/abi/event.h>
+#include <odp/api/abi/queue_types.h>
#include <odp/api/abi/queue.h>
#include <odp/api/abi/buffer.h>
#include <odp/api/abi/pool.h>
diff --git a/include/odp/api/queue_types.h b/include/odp/api/queue_types.h
new file mode 100644
index 000000000..7c9a83bfe
--- /dev/null
+++ b/include/odp/api/queue_types.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP queue
+ */
+
+#ifndef ODP_API_QUEUE_TYPES_H_
+#define ODP_API_QUEUE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/queue_types.h>
+
+#include <odp/api/spec/queue_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index 31ce3e794..f3cfea78b 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -128,8 +129,9 @@ typedef union odp_cls_pmr_terms_t {
* threshold values. RED is enabled when 'red_enable' boolean is true and
* the resource usage is equal to or greater than the minimum threshold value.
* Resource usage could be defined either as the percentage of pool being full
- * or the number of packets/bytes occupied in the queue depening on the platform
- * capabilities.
+ * or the number of packets/bytes occupied in the queue depending on the
+ * platform capabilities.
+ *
* When RED is enabled for a particular flow then further incoming packets are
* assigned a drop probability based on the size of the pool/queue.
*
@@ -178,6 +180,64 @@ typedef struct odp_bp_param_t {
} odp_bp_param_t;
/**
+ * Classifier queue specific statistics counters
+ *
+ * Counters are incremented per packet destined to the queue per originating
+ * CoS. Note that a single queue can be a destination for multiple CoS's.
+ */
+typedef struct odp_cls_queue_stats_t {
+ /** Number of octets in successfully delivered packets. In case of
+ * Ethernet, packet size includes MAC header. */
+ uint64_t octets;
+
+ /** Number of successfully delivered packets. */
+ uint64_t packets;
+
+ /** Number of discarded packets due to other reasons (e.g. RED) than
+ * errors. */
+ uint64_t discards;
+
+ /** Number of packets with errors. Depending on packet input
+ * configuration, packets with errors may be dropped or not. */
+ uint64_t errors;
+
+} odp_cls_queue_stats_t;
+
+/**
+ * Classifier statistics capabilities
+ */
+typedef struct odp_cls_stats_capability_t {
+ /** Queue level capabilities */
+ struct {
+ /** Supported counters */
+ union {
+ /** Statistics counters in a bit field structure */
+ struct {
+ /** @see odp_cls_queue_stats_t::octets */
+ uint64_t octets : 1;
+
+ /** @see odp_cls_queue_stats_t::packets */
+ uint64_t packets : 1;
+
+ /** @see odp_cls_queue_stats_t::discards */
+ uint64_t discards : 1;
+
+ /** @see odp_cls_queue_stats_t::errors */
+ uint64_t errors : 1;
+
+ } counter;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or
+ * for bitwise operations over the entire structure. */
+ uint64_t all_counters;
+ };
+ } queue;
+
+} odp_cls_stats_capability_t;
+
+/**
* Classification capabilities
* This capability structure defines system level classification capability
*/
@@ -196,7 +256,7 @@ typedef struct odp_cls_capability_t {
/** Maximum number of CoS supported */
unsigned int max_cos;
- /** Maximun number of queue supported per CoS
+ /** Maximun number of queues supported per CoS
* if the value is 1, then hashing is not supported*/
unsigned int max_hash_queues;
@@ -221,14 +281,17 @@ typedef struct odp_cls_capability_t {
/** Maximum value of odp_pmr_create_opt_t::mark */
uint64_t max_mark;
+ /** Statistics counters capabilities */
+ odp_cls_stats_capability_t stats;
+
} odp_cls_capability_t;
/**
* class of service packet drop policies
*/
typedef enum {
- ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */
- ODP_COS_DROP_NEVER, /**< Never drop, ignoring buffer pool policy */
+ ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */
+ ODP_COS_DROP_NEVER, /**< Never drop, ignoring buffer pool policy */
} odp_cls_drop_t;
/**
@@ -264,12 +327,12 @@ typedef struct odp_cls_cos_param {
* and application need not configure any queue to the class of service.
* When hashing is disabled application has to configure the queue to
* the class of service.
- * Depening on the implementation this number might be rounded-off to
+ * Depending on the implementation this number might be rounded-off to
* nearest supported value (e.g power of 2)
*/
uint32_t num_queue;
- /** Variant mapping for queue hash configurataion */
+ /** Variant mapping for queue hash configuration */
union {
/** Mapping used when num_queue = 1, hashing is disabled in
* this case and application has to configure this queue and
@@ -339,8 +402,8 @@ int odp_cls_capability(odp_cls_capability_t *capability);
* @retval ODP_COS_INVALID on failure.
*
* @note ODP_QUEUE_INVALID and ODP_POOL_INVALID are valid values for queue
- * and pool associated with a class of service and when any one of these values
- * are configured as INVALID then the packets assigned to the CoS gets dropped.
+ * and pool associated with a class of service. When either of these values
+ * is configured as INVALID packets assigned to the CoS get dropped.
*/
odp_cos_t odp_cls_cos_create(const char *name,
const odp_cls_cos_param_t *param);
@@ -351,7 +414,7 @@ odp_cos_t odp_cls_cos_create(const char *name,
* based on the packet parameters and hash protocol field configured with the
* class of service.
*
- * @param cos class of service
+ * @param cos CoS handle
* @param packet Packet handle
*
* @retval Returns the queue handle on which this packet will be enqueued.
@@ -364,62 +427,61 @@ odp_queue_t odp_cls_hash_result(odp_cos_t cos, odp_packet_t packet);
/**
* Discard a class-of-service along with all its associated resources
*
- * @param cos_id class-of-service instance.
+ * @param cos CoS handle
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_cos_destroy(odp_cos_t cos_id);
+int odp_cos_destroy(odp_cos_t cos);
/**
* Assign a queue for a class-of-service
*
- * @param cos_id class-of-service instance.
- * @param queue_id Identifier of a queue where all packets of this specific
+ * @param cos CoS handle
+ * @param queue Handle of the queue where all packets of this specific
* class of service will be enqueued.
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_cos_queue_set(odp_cos_t cos_id, odp_queue_t queue_id);
+int odp_cos_queue_set(odp_cos_t cos, odp_queue_t queue);
/**
* Get the queue associated with the specific class-of-service
*
-* @param cos_id class-of-service instance.
+* @param cos CoS handle
*
* @retval Queue handle associated with the given class-of-service
* @retval ODP_QUEUE_INVALID on failure
*/
-odp_queue_t odp_cos_queue(odp_cos_t cos_id);
+odp_queue_t odp_cos_queue(odp_cos_t cos);
/**
* Get the number of queues linked with the specific class-of-service
*
- * @param cos_id class-of-service instance.
+ * @param cos CoS handle
*
* @return Number of queues linked with the class-of-service.
*/
-uint32_t odp_cls_cos_num_queue(odp_cos_t cos_id);
+uint32_t odp_cls_cos_num_queue(odp_cos_t cos);
/**
* Get the list of queue associated with the specific class-of-service
*
- * @param cos_id class-of-service instance.
+ * @param cos CoS handle
* @param[out] queue Array of queue handles associated with
* the class-of-service.
* @param num Maximum number of queue handles to output.
*
* @return Number of queues linked with CoS
- * @retval on 0 failure
+ * @retval 0 on failure
*/
-uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[],
- uint32_t num);
+uint32_t odp_cls_cos_queues(odp_cos_t cos, odp_queue_t queue[], uint32_t num);
/**
* Assign packet drop policy for specific class-of-service
*
- * @param cos_id class-of-service instance.
+ * @param cos CoS handle
* @param drop_policy Desired packet drop policy for this class.
*
* @retval 0 on success
@@ -427,16 +489,16 @@ uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[],
*
* @note Optional.
*/
-int odp_cos_drop_set(odp_cos_t cos_id, odp_cls_drop_t drop_policy);
+int odp_cos_drop_set(odp_cos_t cos, odp_cls_drop_t drop_policy);
/**
* Get the drop policy configured for a specific class-of-service instance.
*
-* @param cos_id class-of-service instance.
+* @param cos CoS handle
*
* @retval Drop policy configured with the given class-of-service
*/
-odp_cls_drop_t odp_cos_drop(odp_cos_t cos_id);
+odp_cls_drop_t odp_cos_drop(odp_cos_t cos);
/**
* Request to override per-port class of service
@@ -479,6 +541,28 @@ int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
odp_bool_t l3_preference);
/**
+ * Get statistics for a queue assigned to a CoS
+ *
+ * The statistics counters are incremented only for packets originating from the
+ * given CoS. Queue handles can be requested with odp_cos_queue() and
+ * odp_cls_cos_queues().
+ *
+ * Counters not supported by the queue are set to zero.
+ *
+ * It's implementation defined if odp_pktio_stats_reset() call affects these
+ * counters.
+ *
+ * @param cos CoS handle
+ * @param queue Queue handle
+ * @param[out] stats Statistics structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_cls_queue_stats(odp_cos_t cos, odp_queue_t queue,
+ odp_cls_queue_stats_t *stats);
+
+/**
* @typedef odp_pmr_t
* PMR - Packet Matching Rule
* Up to 32 bit of ternary matching of one of the available header fields
@@ -608,7 +692,8 @@ typedef enum {
* an exception to this (uint32_t in CPU endian).
*/
typedef struct odp_pmr_param_t {
- odp_cls_pmr_term_t term; /**< Packet Matching Rule term */
+ /** Packet Matching Rule term */
+ odp_cls_pmr_term_t term;
/** True if the value is range and false if match */
odp_bool_t range_term;
@@ -700,6 +785,7 @@ void odp_cls_pmr_create_opt_init(odp_pmr_create_opt_t *opt);
* This packet matching rule is applied on all packets arriving at the source
* class of service and packets satisfying this PMR are sent to the destination
* class of service.
+ *
* A composite PMR rule is created when the number of terms in the match rule
* is more than one. The composite rule is considered as matching only if
* the packet satisfies all the terms in Packet Match Rule.
@@ -751,50 +837,53 @@ odp_pmr_t odp_cls_pmr_create_opt(const odp_pmr_create_opt_t *opt,
odp_cos_t src_cos, odp_cos_t dst_cos);
/**
* Function to destroy a packet match rule
+ *
* Destroying a PMR removes the link between the source and destination
* class of service and this PMR will no longer be applied for packets arriving
- * at the source class of service. All the resource associated with the PMR
- * be release but the class of service will remain intact.
+ * at the source class of service. All the resources associated with the PMR
+ * will be released but the class of service will remain intact.
+ *
* Depending on the implementation details, destroying a composite rule
* may not guarantee the availability of hardware resources to create the
* same or essentially similar rule.
*
- * @param pmr_id Identifier of the PMR to be destroyed
+ * @param pmr Handle of the PMR to be destroyed
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_cls_pmr_destroy(odp_pmr_t pmr_id);
+int odp_cls_pmr_destroy(odp_pmr_t pmr);
/**
-* Assigns a packet pool for a specific class of service.
+* Assigns a packet pool for a specific class of service
+*
* All the packets belonging to the given class of service will
* be allocated from the assigned packet pool.
* The packet pool associated with class of service will supersede the
* packet pool associated with the pktio interface.
*
-* @param cos_id class of service handle
-* @param pool_id packet pool handle
+* @param cos CoS handle
+* @param pool_id Packet pool handle
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id);
+int odp_cls_cos_pool_set(odp_cos_t cos, odp_pool_t pool_id);
/**
* Get the pool associated with the given class of service
*
-* @param cos_id class of service handle
+* @param cos CoS handle
*
* @retval pool handle of the associated pool
* @retval ODP_POOL_INVALID if no associated pool found or in case of an error
*/
-odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id);
+odp_pool_t odp_cls_cos_pool(odp_cos_t cos);
/**
* Get printable value for an odp_cos_t
*
- * @param hdl odp_cos_t handle to be printed
+ * @param cos CoS handle to be printed
*
* @return uint64_t value that can be used to print/display this handle
*
@@ -802,12 +891,12 @@ odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id);
* to enable applications to generate a printable value that represents
* an odp_cos_t handle.
*/
-uint64_t odp_cos_to_u64(odp_cos_t hdl);
+uint64_t odp_cos_to_u64(odp_cos_t cos);
/**
* Get printable value for an odp_pmr_t
*
- * @param hdl odp_pmr_t handle to be printed
+ * @param pmr odp_pmr_t handle to be printed
*
* @return uint64_t value that can be used to print/display this handle
*
@@ -815,7 +904,7 @@ uint64_t odp_cos_to_u64(odp_cos_t hdl);
* to enable applications to generate a printable value that represents
* an odp_pmr_t handle.
*/
-uint64_t odp_pmr_to_u64(odp_pmr_t hdl);
+uint64_t odp_pmr_to_u64(odp_pmr_t pmr);
/**
* Print classifier info
diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h
index f1a9d4b7d..3441d83c9 100644
--- a/include/odp/api/spec/ipsec.h
+++ b/include/odp/api/spec/ipsec.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -392,6 +393,8 @@ typedef struct odp_ipsec_auth_capability_t {
/** Key length in bytes */
uint32_t key_len;
+ /** ICV length in bytes */
+ uint32_t icv_len;
} odp_ipsec_auth_capability_t;
/**
@@ -543,6 +546,37 @@ typedef struct odp_ipsec_crypto_param_t {
*/
odp_crypto_key_t auth_key_extra;
+ /**
+ * Length of integrity check value (ICV) in bytes.
+ *
+ * Some algorithms support multiple ICV lengths when used with IPsec.
+ * This field can be used to select a non-default ICV length.
+ *
+ * Zero value indicates that the default ICV length shall be used.
+ * The default length depends on the selected algorithm as follows:
+ *
+ * Algorithm Default length Other lengths
+ * ----------------------------------------------------------------
+ * ODP_AUTH_ALG_NULL 0
+ * ODP_AUTH_ALG_MD5_HMAC 12
+ * ODP_AUTH_ALG_SHA1_HMAC 12
+ * ODP_AUTH_ALG_SHA256_HMAC 16
+ * ODP_AUTH_ALG_SHA384_HMAC 24
+ * ODP_AUTH_ALG_SHA512_HMAC 32
+ * ODP_AUTH_ALG_AES_GCM 16 8, 12
+ * ODP_AUTH_ALG_AES_GMAC 16
+ * ODP_AUTH_ALG_AES_CCM 16 8, 12
+ * ODP_AUTH_ALG_AES_CMAC 12
+ * ODP_AUTH_ALG_AES_XCBC_MAC 12
+ * ODP_AUTH_ALG_CHACHA20_POLY1305 16
+ *
+ * The requested ICV length must be supported for the selected
+ * algorithm as indicated by odp_ipsec_auth_capability().
+ *
+ * The default value is 0.
+ */
+ uint32_t icv_len;
+
} odp_ipsec_crypto_param_t;
/** IPv4 header parameters */
@@ -1065,11 +1099,9 @@ int odp_ipsec_cipher_capability(odp_cipher_alg_t cipher,
* Query supported IPSEC authentication algorithm capabilities
*
* Outputs all supported configuration options for the algorithm. Output is
- * sorted (from the smallest to the largest) first by digest length, then by key
+ * sorted (from the smallest to the largest) first by ICV length, then by key
* length. Use this information to select key lengths, etc authentication
- * algorithm options for SA creation (odp_ipsec_crypto_param_t). Application
- * must ignore values for AAD length capabilities as those are not relevant for
- * IPSEC API (fixed in IPSEC RFCs).
+ * algorithm options for SA creation (odp_ipsec_crypto_param_t).
*
* @param auth Authentication algorithm
* @param[out] capa Array of capability structures for output
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h
index eef7bae8a..2285a857d 100644
--- a/include/odp/api/spec/packet.h
+++ b/include/odp/api/spec/packet.h
@@ -19,7 +19,9 @@
extern "C" {
#endif
+#include <odp/api/proto_stats_types.h>
#include <odp/api/time.h>
+#include <odp/api/packet_types.h>
/** @defgroup odp_packet ODP PACKET
* Packet event metadata and operations.
@@ -27,259 +29,6 @@ extern "C" {
*/
/**
- * @typedef odp_packet_t
- * ODP packet
- */
-
-/**
- * @def ODP_PACKET_INVALID
- * Invalid packet
- */
-
-/**
- * @def ODP_PACKET_OFFSET_INVALID
- * Invalid packet offset
- */
-
-/**
- * @typedef odp_packet_seg_t
- * ODP packet segment
- */
-
-/**
- * @def ODP_PACKET_SEG_INVALID
- * Invalid packet segment
- */
-
-/**
- * @enum odp_packet_color_t
- * Color of packet for shaper/drop processing
- *
- * @var ODP_PACKET_GREEN
- * Packet is green
- *
- * @var ODP_PACKET_YELLOW
- * Packet is yellow
- *
- * @var ODP_PACKET_RED
- * Packet is red
- */
-
-/**
- * Maximum number of packet colors which accommodates ODP_PACKET_GREEN, ODP_PACKET_YELLOW and
- * ODP_PACKET_RED.
- */
-#define ODP_NUM_PACKET_COLORS 3
-
-/**
- * @typedef odp_proto_l2_type_t
- * Layer 2 protocol type
- */
-
-/**
- * @def ODP_PROTO_L2_TYPE_NONE
- * Layer 2 protocol type not defined
- *
- * @def ODP_PROTO_L2_TYPE_ETH
- * Layer 2 protocol is Ethernet
- */
-
-/**
- * @typedef odp_proto_l3_type_t
- * Layer 3 protocol type
- */
-
-/**
- * @def ODP_PROTO_L3_TYPE_NONE
- * Layer 3 protocol type not defined
- *
- * @def ODP_PROTO_L3_TYPE_ARP
- * Layer 3 protocol is ARP
- *
- * @def ODP_PROTO_L3_TYPE_RARP
- * Layer 3 protocol is RARP
- *
- * @def ODP_PROTO_L3_TYPE_MPLS
- * Layer 3 protocol is MPLS
- *
- * @def ODP_PROTO_L3_TYPE_IPV4
- * Layer 3 protocol type is IPv4
- *
- * @def ODP_PROTO_L3_TYPE_IPV6
- * Layer 3 protocol type is IPv6
- */
-
-/**
- * @typedef odp_proto_l4_type_t
- * Layer 4 protocol type
- */
-
-/**
- * @def ODP_PROTO_L4_TYPE_NONE
- * Layer 4 protocol type not defined
- *
- * @def ODP_PROTO_L4_TYPE_ICMPV4
- * Layer 4 protocol type is ICMPv4
- *
- * @def ODP_PROTO_L4_TYPE_IGMP
- * Layer 4 protocol type is IGMP
- *
- * @def ODP_PROTO_L4_TYPE_IPV4
- * Layer 4 protocol type is IPv4
- *
- * @def ODP_PROTO_L4_TYPE_TCP
- * Layer 4 protocol type is TCP
- *
- * @def ODP_PROTO_L4_TYPE_UDP
- * Layer 4 protocol type is UDP
- *
- * @def ODP_PROTO_L4_TYPE_IPV6
- * Layer 4 protocol type is IPv6
- *
- * @def ODP_PROTO_L4_TYPE_GRE
- * Layer 4 protocol type is GRE
- *
- * @def ODP_PROTO_L4_TYPE_ESP
- * Layer 4 protocol type is IPSEC ESP
- *
- * @def ODP_PROTO_L4_TYPE_AH
- * Layer 4 protocol type is IPSEC AH
- *
- * @def ODP_PROTO_L4_TYPE_ICMPV6
- * Layer 4 protocol type is ICMPv6
- *
- * @def ODP_PROTO_L4_TYPE_NO_NEXT
- * Layer 4 protocol type is "No Next Header".
- * Protocol / next header number is 59.
- *
- * @def ODP_PROTO_L4_TYPE_IPCOMP
- * Layer 4 protocol type is IP Payload Compression Protocol
- *
- * @def ODP_PROTO_L4_TYPE_SCTP
- * Layer 4 protocol type is SCTP
- *
- * @def ODP_PROTO_L4_TYPE_ROHC
- * Layer 4 protocol type is ROHC
- */
-
-/**
- * @enum odp_packet_chksum_status_t
- * Checksum check status in packet
- *
- * @var ODP_PACKET_CHKSUM_UNKNOWN
- * Checksum was not checked. Checksum check was not
- * attempted or the attempt failed.
- *
- * @var ODP_PACKET_CHKSUM_BAD
- * Checksum was checked and it was not correct.
- *
- * @var ODP_PACKET_CHKSUM_OK
- * Checksum was checked and it was correct.
- */
-
-/**
- * @typedef odp_packet_vector_t
- * ODP packet vector
- */
-
-/**
- * @def ODP_PACKET_VECTOR_INVALID
- * Invalid packet vector
- */
-
-/**
- * @typedef odp_packet_tx_compl_t
- * ODP Packet Tx completion
- */
-
-/**
- * @def ODP_PACKET_TX_COMPL_INVALID
- * Invalid packet Tx completion
- */
-
-/**
- * Protocol
- */
-typedef enum odp_proto_t {
- /** No protocol defined */
- ODP_PROTO_NONE = 0,
-
- /** Ethernet (including VLAN) */
- ODP_PROTO_ETH,
-
- /** IP version 4 */
- ODP_PROTO_IPV4,
-
- /** IP version 6 */
- ODP_PROTO_IPV6
-
-} odp_proto_t;
-
-/**
- * Protocol layer
- */
-typedef enum odp_proto_layer_t {
- /** No layers */
- ODP_PROTO_LAYER_NONE = 0,
-
- /** Layer L2 protocols (Ethernet, VLAN, etc) */
- ODP_PROTO_LAYER_L2,
-
- /** Layer L3 protocols (IPv4, IPv6, ICMP, IPSEC, etc) */
- ODP_PROTO_LAYER_L3,
-
- /** Layer L4 protocols (UDP, TCP, SCTP) */
- ODP_PROTO_LAYER_L4,
-
- /** All layers */
- ODP_PROTO_LAYER_ALL
-
-} odp_proto_layer_t;
-
-/**
- * Packet API data range specifier
- */
-typedef struct odp_packet_data_range {
- /** Offset from beginning of packet */
- uint32_t offset;
-
- /** Length of data to operate on */
- uint32_t length;
-
-} odp_packet_data_range_t;
-
-/**
- * Reassembly status of a packet
- */
-typedef enum odp_packet_reass_status_t {
- /** Reassembly was not attempted */
- ODP_PACKET_REASS_NONE = 0,
-
- /** Reassembly was attempted but is incomplete. Partial reassembly
- * result can be accessed using ``odp_packet_reass_partial_state()``.
- *
- * The packet does not contain valid packet data and cannot be used
- * in normal packet operations.
- */
- ODP_PACKET_REASS_INCOMPLETE,
-
- /** Reassembly was successfully done. The packet has been
- * reassembled from multiple received fragments. */
- ODP_PACKET_REASS_COMPLETE,
-} odp_packet_reass_status_t;
-
-/**
- * Result from odp_packet_reass_partial_state()
- */
-typedef struct odp_packet_reass_partial_state_t {
- /** Number of fragments returned */
- uint16_t num_frags;
-
- /** Time, in ns, since the reception of the first received fragment */
- uint64_t elapsed_time;
-} odp_packet_reass_partial_state_t;
-
-/**
* Event subtype of a packet
*
* Returns the subtype of a packet event. Subtype tells if the packet contains
@@ -1155,6 +904,135 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src);
*/
int odp_packet_split(odp_packet_t *pkt, uint32_t len, odp_packet_t *tail);
+/**
+ * Packet buffer head pointer
+ *
+ * Packet buffer start address. Buffer level headroom starts from here. For the first
+ * packet buffer of a packet this is equivalent to odp_packet_head().
+ *
+ * @param pkt_buf Packet buffer
+ *
+ * @return Packet buffer head pointer
+ */
+void *odp_packet_buf_head(odp_packet_buf_t pkt_buf);
+
+/**
+ * Packet buffer size in bytes
+ *
+ * Packet buffer size is calculated from the buffer head pointer (@see odp_packet_buf_head()).
+ * It contains all buffer level headroom, data, and tailroom. For a single segmented packet this is
+ * equivalent to odp_packet_buf_len().
+ *
+ * @param pkt_buf Packet buffer
+ *
+ * @return Packet buffer size
+ */
+uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf);
+
+/**
+ * Packet buffer data offset
+ *
+ * Offset from the buffer head pointer to the first byte of packet data in the packet buffer.
+ * Valid values range from 0 to buf_size - 1. For the first packet buffer of a packet
+ * this is equivalent to odp_packet_headroom().
+ *
+ * @param pkt_buf Packet buffer
+ *
+ * @return Packet buffer data offset
+ */
+uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf);
+
+/**
+ * Packet buffer data length in bytes
+ *
+ * Packet buffer contains this many bytes of packet data. Valid values range from 1 to
+ * buf_size - data_offset. For the first packet buffer of a packet this is equivalent to
+ * odp_packet_seg_len().
+ *
+ * @param pkt_buf Packet buffer
+ *
+ * @return Packet buffer data length
+ */
+uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf);
+
+/**
+ * Packet buffer data set
+ *
+ * Update packet data start offset and length in the packet buffer. Valid offset values range
+ * from 0 to buf_size - 1. Valid length values range from 1 to buf_size - data_offset.
+ *
+ * @param pkt_buf Packet buffer
+ * @param data_offset Packet buffer data offset in bytes (from the buffer head pointer)
+ * @param data_len Packet buffer data length in bytes
+ */
+void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset, uint32_t data_len);
+
+/**
+ * Convert packet buffer head pointer to handle
+ *
+ * Converts a packet buffer head pointer (from a previous odp_packet_buf_head() call) to a packet
+ * buffer handle. This allows an application to save memory as it can store only buffer pointers
+ * (instead of pointers and handles) and convert those to handles when needed. This conversion
+ * may be done only for packet buffers that are not part of any packet (i.e. buffers between
+ * odp_packet_disassemble() and odp_packet_reassemble() calls).
+ *
+ * This call can be used only for packets of an external memory pool (@see odp_pool_ext_create()).
+ *
+ * @param pool Pool from which the packet buffer (disassembled packet) originate from
+ * @param head Head pointer
+ *
+ * @return Packet buffer handle on success
+ * @retval ODP_PACKET_BUF_INVALID on failure
+ */
+odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool, void *head);
+
+/**
+ * Disassemble packet into packet buffers
+ *
+ * Breaks up a packet into a list of packet buffers. Outputs a packet buffer handle for each
+ * segment of the packet (@see odp_packet_num_segs()). After a successful operation the packet
+ * handle must not be referenced anymore. Packet buffers are reassembled into a new packet (or
+ * several new packets) with a later odp_packet_reassemble() call(s). All packet buffers must be
+ * reassembled into a packet and freed into the originating pool before the pool is destroyed.
+ *
+ * This call can be used only for packets of an external memory pool (@see odp_pool_ext_create()).
+ *
+ * @param pkt Packet to be disassembled
+ * @param[out] pkt_buf Packet buffer handle array for output
+ * @param num Number of elements in packet buffer handle array. Must be equal to or
+ * larger than number of segments in the packet.
+ *
+ * @return Number of handles written (equals the number of segments in the packet)
+ * @retval 0 on failure
+ */
+uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], uint32_t num);
+
+/**
+ * Reassemble packet from packet buffers
+ *
+ * Forms a new packet from packet buffers of a previous odp_packet_disassemble() call(s). Packet
+ * buffers from different disassembled packets may be used, but all buffers must be from packets of
+ * the same pool. Packet pool capability 'max_segs_per_pkt' defines the maximum number of
+ * packet buffers that can be reassembled to form a new packet.
+ *
+ * Application may use odp_packet_buf_data_set() to adjust data_offset and data_len values
+ * in each packet buffer to match the current packet data placement. The operation
+ * maintains packet data content and position. Each buffer becomes a segment in the new packet.
+ * Packet metadata related to data length and position are set according data layout
+ * in the buffers. All other packet metadata are set to their default values. After a successful
+ * operation packet buffer handles must not be referenced anymore.
+ *
+ * This call can be used only for packets of an external memory pool (@see odp_pool_ext_create()).
+ *
+ * @param pool Pool from which all packet buffers (disassembled packets) originate from
+ * @param pkt_buf Packet buffers to form a new packet
+ * @param num Number of packet buffers. Must not exceed max_segs_per_pkt pool capability.
+ *
+ * @return Handle of the newly formed packet
+ * @retval ODP_PACKET_INVALID on failure
+ */
+odp_packet_t odp_packet_reassemble(odp_pool_t pool, odp_packet_buf_t pkt_buf[], uint32_t num);
+
/*
*
* References
@@ -1421,52 +1299,6 @@ int odp_packet_move_data(odp_packet_t pkt, uint32_t dst_offset,
*/
/**
- * Flags to control packet data checksum checking
- */
-typedef union odp_proto_chksums_t {
- /** Individual checksum bits. */
- struct {
- /** IPv4 header checksum */
- uint32_t ipv4 : 1;
-
- /** UDP checksum */
- uint32_t udp : 1;
-
- /** TCP checksum */
- uint32_t tcp : 1;
-
- /** SCTP checksum */
- uint32_t sctp : 1;
-
- } chksum;
-
- /** All checksum bits. This can be used to set/clear all flags. */
- uint32_t all_chksum;
-
-} odp_proto_chksums_t;
-
-/**
- * Packet parse parameters
- */
-typedef struct odp_packet_parse_param_t {
- /** Protocol header at parse starting point. Valid values for this
- * field are: ODP_PROTO_ETH, ODP_PROTO_IPV4, ODP_PROTO_IPV6. */
- odp_proto_t proto;
-
- /** Continue parsing until this layer. Must be the same or higher
- * layer than the layer of 'proto'. */
- odp_proto_layer_t last_layer;
-
- /** Flags to control payload data checksums checks up to the selected
- * parse layer. Checksum checking status can be queried for each packet
- * with odp_packet_l3_chksum_status() and
- * odp_packet_l4_chksum_status().
- */
- odp_proto_chksums_t chksums;
-
-} odp_packet_parse_param_t;
-
-/**
* Parse packet
*
* Parse protocol headers in packet data and update layer/protocol specific
@@ -1510,35 +1342,6 @@ int odp_packet_parse(odp_packet_t pkt, uint32_t offset,
int odp_packet_parse_multi(const odp_packet_t pkt[], const uint32_t offset[],
int num, const odp_packet_parse_param_t *param);
-/** Packet parse results */
-typedef struct odp_packet_parse_result_t {
- /** Parse result flags */
- odp_packet_parse_result_flag_t flag;
-
- /** @see odp_packet_len() */
- uint32_t packet_len;
-
- /** @see odp_packet_l2_offset() */
- uint32_t l2_offset;
- /** @see odp_packet_l3_offset() */
- uint32_t l3_offset;
- /** @see odp_packet_l4_offset() */
- uint32_t l4_offset;
-
- /** @see odp_packet_l3_chksum_status() */
- odp_packet_chksum_status_t l3_chksum_status;
- /** @see odp_packet_l4_chksum_status() */
- odp_packet_chksum_status_t l4_chksum_status;
-
- /** @see odp_packet_l2_type() */
- odp_proto_l2_type_t l2_type;
- /** @see odp_packet_l3_type() */
- odp_proto_l3_type_t l3_type;
- /** @see odp_packet_l4_type() */
- odp_proto_l4_type_t l4_type;
-
-} odp_packet_parse_result_t;
-
/**
* Read parse results
*
@@ -2101,42 +1904,6 @@ void odp_packet_shaper_len_adjust_set(odp_packet_t pkt, int8_t adj);
uint64_t odp_packet_cls_mark(odp_packet_t pkt);
/**
- * LSO options
- */
-typedef struct odp_packet_lso_opt_t {
- /** LSO profile handle
- *
- * The selected LSO profile specifies details of the segmentation operation to be done.
- * Depending on LSO profile options, additional metadata (e.g. L3/L4 protocol header
- * offsets) may need to be set on the packet. See LSO documentation
- * (e.g. odp_pktout_send_lso() and odp_lso_protocol_t) for additional metadata
- * requirements.
- */
- odp_lso_profile_t lso_profile;
-
- /** LSO payload offset
- *
- * LSO operation considers packet data before 'payload_offset' as
- * protocol headers and copies those in front of every created segment. It will modify
- * protocol headers according to the LSO profile before segment transmission.
- *
- * When stored into a packet, this offset can be read with odp_packet_payload_offset() and
- * modified with odp_packet_payload_offset_set().
- */
- uint32_t payload_offset;
-
- /** Maximum payload length in an LSO segment
- *
- * Max_payload_len parameter defines the maximum number of payload bytes in each
- * created segment. Depending on the implementation, segments with less payload may be
- * created. However, this value is used typically to divide packet payload evenly over
- * all segments except the last one, which contains the remaining payload bytes.
- */
- uint32_t max_payload_len;
-
-} odp_packet_lso_opt_t;
-
-/**
* Request Large Send Offload (LSO) for a packet
*
* Setup packet metadata which requests LSO segmentation to be performed during packet output.
@@ -2234,32 +2001,6 @@ void odp_packet_aging_tmo_set(odp_packet_t pkt, uint64_t tmo_ns);
*/
uint64_t odp_packet_aging_tmo(odp_packet_t pkt);
-/** Packet Tx completion mode */
-typedef enum odp_packet_tx_compl_mode_t {
- /** Packet Tx completion event is disabled
- *
- * When mode is disabled, all other fields of odp_packet_tx_compl_opt_t are ignored.
- */
- ODP_PACKET_TX_COMPL_DISABLED,
- /** Packet Tx completion event is sent for all packets (both transmitted and dropped) */
- ODP_PACKET_TX_COMPL_ALL,
-} odp_packet_tx_compl_mode_t;
-
-/**
- * Tx completion request options
- */
-typedef struct odp_packet_tx_compl_opt_t {
- /** Queue handle
- *
- * Tx completion event will be posted to ODP queue identified by this handle.
- */
- odp_queue_t queue;
-
- /** Packet Tx completion event mode */
- odp_packet_tx_compl_mode_t mode;
-
-} odp_packet_tx_compl_opt_t;
-
/**
* Request Tx completion event.
*
@@ -2290,6 +2031,44 @@ int odp_packet_tx_compl_request(odp_packet_t pkt, const odp_packet_tx_compl_opt_
*/
int odp_packet_has_tx_compl_request(odp_packet_t pkt);
+/**
+ * Request packet proto stats.
+ *
+ * The statistics enabled in the proto stats object are updated for the packet in
+ * packet output when the packet is transmitted or dropped. The statistics update
+ * is done as the last step of output processing after possible packet
+ * transformations (e.g. fragmentation, IPsec) that may occur. If a packet is
+ * fragmented or segmented to multiple packets as part of output processing, all
+ * the resulting packets inherit the proto stats object association from the
+ * original packet.
+ *
+ * The relevant octet counts will be updated with the actual packet length at
+ * the time of transmission or drop plus the respective adjustment value passed
+ * in the 'opt' parameter. The octet counts thus include possible additional
+ * headers added by ODP during packet output (e.g. ESP header added by inline
+ * outbound IPsec processing). Ethernet padding and FCS are not included in the
+ * octet counts. The adjustment value is added only if the respective capability
+ * field is true and otherwise ignored.
+ *
+ * @param pkt Packet handle
+ * @param opt Proto stats options. If NULL, then proto stats update is
+ * disabled for this packet.
+ *
+ * @see odp_proto_stats_capability_t::tx
+ */
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt);
+
+/**
+ * Get proto stats object.
+ *
+ * Get the proto stats object associated with the given packet.
+ *
+ * @param pkt Packet handle
+ *
+ * @return Proto stats object handle or ODP_PROTO_STATS_INVALID if not set.
+ */
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt);
+
/*
*
* Packet vector handling routines
diff --git a/include/odp/api/spec/packet_io.h b/include/odp/api/spec/packet_io.h
index d723ef11a..2ccd7b4c8 100644
--- a/include/odp/api/spec/packet_io.h
+++ b/include/odp/api/spec/packet_io.h
@@ -21,7 +21,7 @@ extern "C" {
#include <odp/api/deprecated.h>
#include <odp/api/packet_io_stats.h>
-#include <odp/api/queue.h>
+#include <odp/api/queue_types.h>
#include <odp/api/reassembly.h>
#include <odp/api/time.h>
#include <odp/api/packet.h>
@@ -506,6 +506,9 @@ typedef union odp_pktout_config_opt_t {
*/
uint64_t tx_compl_ena : 1;
+ /** Enable packet protocol stats update */
+ uint64_t proto_stats_ena : 1;
+
} bit;
/** All bits of the bit field structure
diff --git a/include/odp/api/spec/packet_types.h b/include/odp/api/spec/packet_types.h
new file mode 100644
index 000000000..5549f03aa
--- /dev/null
+++ b/include/odp/api/spec/packet_types.h
@@ -0,0 +1,466 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet types
+ */
+
+#ifndef ODP_API_SPEC_PACKET_TYPES_H_
+#define ODP_API_SPEC_PACKET_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/proto_stats_types.h>
+#include <odp/api/queue_types.h>
+
+/** @addtogroup odp_packet
+ * @{
+ */
+
+/**
+ * @typedef odp_packet_t
+ * ODP packet
+ */
+
+/**
+ * @def ODP_PACKET_INVALID
+ * Invalid packet
+ */
+
+/**
+ * @def ODP_PACKET_OFFSET_INVALID
+ * Invalid packet offset
+ */
+
+/**
+ * @typedef odp_packet_seg_t
+ * ODP packet segment
+ *
+ * A packet segment refers to a contiguous part of packet data (in memory). Segments of a packet
+ * can be examined with odp_packet_seg_data(), odp_packet_seg_data_len() and other calls.
+ */
+
+/**
+ * @def ODP_PACKET_SEG_INVALID
+ * Invalid packet segment
+ */
+
+/**
+ * @typedef odp_packet_buf_t
+ * ODP packet buffer
+ *
+ * Packet buffers are not part of any packet, but they result from a previous
+ * odp_packet_disassemble() call. A new packet is formed from packet buffers with
+ * a odp_packet_reassemble() call.
+ */
+
+/**
+ * @def ODP_PACKET_BUF_INVALID
+ * Invalid packet buffer
+ */
+
+/**
+ * @typedef odp_packet_color_t
+ * Color of packet for shaper/drop processing
+ *
+ * @var ODP_PACKET_GREEN
+ * Packet is green
+ *
+ * @var ODP_PACKET_YELLOW
+ * Packet is yellow
+ *
+ * @var ODP_PACKET_RED
+ * Packet is red
+ */
+
+/**
+ * Maximum number of packet colors which accommodates ODP_PACKET_GREEN, ODP_PACKET_YELLOW and
+ * ODP_PACKET_RED.
+ */
+#define ODP_NUM_PACKET_COLORS 3
+
+/**
+ * @typedef odp_proto_l2_type_t
+ * Layer 2 protocol type
+ */
+
+/**
+ * @def ODP_PROTO_L2_TYPE_NONE
+ * Layer 2 protocol type not defined
+ *
+ * @def ODP_PROTO_L2_TYPE_ETH
+ * Layer 2 protocol is Ethernet
+ */
+
+/**
+ * @typedef odp_proto_l3_type_t
+ * Layer 3 protocol type
+ */
+
+/**
+ * @def ODP_PROTO_L3_TYPE_NONE
+ * Layer 3 protocol type not defined
+ *
+ * @def ODP_PROTO_L3_TYPE_ARP
+ * Layer 3 protocol is ARP
+ *
+ * @def ODP_PROTO_L3_TYPE_RARP
+ * Layer 3 protocol is RARP
+ *
+ * @def ODP_PROTO_L3_TYPE_MPLS
+ * Layer 3 protocol is MPLS
+ *
+ * @def ODP_PROTO_L3_TYPE_IPV4
+ * Layer 3 protocol type is IPv4
+ *
+ * @def ODP_PROTO_L3_TYPE_IPV6
+ * Layer 3 protocol type is IPv6
+ */
+
+/**
+ * @typedef odp_proto_l4_type_t
+ * Layer 4 protocol type
+ */
+
+/**
+ * @def ODP_PROTO_L4_TYPE_NONE
+ * Layer 4 protocol type not defined
+ *
+ * @def ODP_PROTO_L4_TYPE_ICMPV4
+ * Layer 4 protocol type is ICMPv4
+ *
+ * @def ODP_PROTO_L4_TYPE_IGMP
+ * Layer 4 protocol type is IGMP
+ *
+ * @def ODP_PROTO_L4_TYPE_IPV4
+ * Layer 4 protocol type is IPv4
+ *
+ * @def ODP_PROTO_L4_TYPE_TCP
+ * Layer 4 protocol type is TCP
+ *
+ * @def ODP_PROTO_L4_TYPE_UDP
+ * Layer 4 protocol type is UDP
+ *
+ * @def ODP_PROTO_L4_TYPE_IPV6
+ * Layer 4 protocol type is IPv6
+ *
+ * @def ODP_PROTO_L4_TYPE_GRE
+ * Layer 4 protocol type is GRE
+ *
+ * @def ODP_PROTO_L4_TYPE_ESP
+ * Layer 4 protocol type is IPSEC ESP
+ *
+ * @def ODP_PROTO_L4_TYPE_AH
+ * Layer 4 protocol type is IPSEC AH
+ *
+ * @def ODP_PROTO_L4_TYPE_ICMPV6
+ * Layer 4 protocol type is ICMPv6
+ *
+ * @def ODP_PROTO_L4_TYPE_NO_NEXT
+ * Layer 4 protocol type is "No Next Header".
+ * Protocol / next header number is 59.
+ *
+ * @def ODP_PROTO_L4_TYPE_IPCOMP
+ * Layer 4 protocol type is IP Payload Compression Protocol
+ *
+ * @def ODP_PROTO_L4_TYPE_SCTP
+ * Layer 4 protocol type is SCTP
+ *
+ * @def ODP_PROTO_L4_TYPE_ROHC
+ * Layer 4 protocol type is ROHC
+ */
+
+/**
+ * @typedef odp_packet_chksum_status_t
+ * Checksum check status in packet
+ *
+ * @var ODP_PACKET_CHKSUM_UNKNOWN
+ * Checksum was not checked. Checksum check was not
+ * attempted or the attempt failed.
+ *
+ * @var ODP_PACKET_CHKSUM_BAD
+ * Checksum was checked and it was not correct.
+ *
+ * @var ODP_PACKET_CHKSUM_OK
+ * Checksum was checked and it was correct.
+ */
+
+/**
+ * @typedef odp_packet_vector_t
+ * ODP packet vector
+ */
+
+/**
+ * @def ODP_PACKET_VECTOR_INVALID
+ * Invalid packet vector
+ */
+
+/**
+ * @typedef odp_packet_tx_compl_t
+ * ODP Packet Tx completion
+ */
+
+/**
+ * @def ODP_PACKET_TX_COMPL_INVALID
+ * Invalid packet Tx completion
+ */
+
+/**
+ * Protocol
+ */
+typedef enum odp_proto_t {
+ /** No protocol defined */
+ ODP_PROTO_NONE = 0,
+
+ /** Ethernet (including VLAN) */
+ ODP_PROTO_ETH,
+
+ /** IP version 4 */
+ ODP_PROTO_IPV4,
+
+ /** IP version 6 */
+ ODP_PROTO_IPV6
+
+} odp_proto_t;
+
+/**
+ * Protocol layer
+ */
+typedef enum odp_proto_layer_t {
+ /** No layers */
+ ODP_PROTO_LAYER_NONE = 0,
+
+ /** Layer L2 protocols (Ethernet, VLAN, etc) */
+ ODP_PROTO_LAYER_L2,
+
+ /** Layer L3 protocols (IPv4, IPv6, ICMP, IPSEC, etc) */
+ ODP_PROTO_LAYER_L3,
+
+ /** Layer L4 protocols (UDP, TCP, SCTP) */
+ ODP_PROTO_LAYER_L4,
+
+ /** All layers */
+ ODP_PROTO_LAYER_ALL
+
+} odp_proto_layer_t;
+
+/**
+ * Packet API data range specifier
+ */
+typedef struct odp_packet_data_range {
+ /** Offset from beginning of packet */
+ uint32_t offset;
+
+ /** Length of data to operate on */
+ uint32_t length;
+
+} odp_packet_data_range_t;
+
+/**
+ * Reassembly status of a packet
+ */
+typedef enum odp_packet_reass_status_t {
+ /** Reassembly was not attempted */
+ ODP_PACKET_REASS_NONE = 0,
+
+ /** Reassembly was attempted but is incomplete. Partial reassembly
+ * result can be accessed using ``odp_packet_reass_partial_state()``.
+ *
+ * The packet does not contain valid packet data and cannot be used
+ * in normal packet operations.
+ */
+ ODP_PACKET_REASS_INCOMPLETE,
+
+ /** Reassembly was successfully done. The packet has been
+ * reassembled from multiple received fragments. */
+ ODP_PACKET_REASS_COMPLETE,
+} odp_packet_reass_status_t;
+
+/**
+ * Result from odp_packet_reass_partial_state()
+ */
+typedef struct odp_packet_reass_partial_state_t {
+ /** Number of fragments returned */
+ uint16_t num_frags;
+
+ /** Time, in ns, since the reception of the first received fragment */
+ uint64_t elapsed_time;
+} odp_packet_reass_partial_state_t;
+
+/**
+ * Flags to control packet data checksum checking
+ */
+typedef union odp_proto_chksums_t {
+ /** Individual checksum bits. */
+ struct {
+ /** IPv4 header checksum */
+ uint32_t ipv4 : 1;
+
+ /** UDP checksum */
+ uint32_t udp : 1;
+
+ /** TCP checksum */
+ uint32_t tcp : 1;
+
+ /** SCTP checksum */
+ uint32_t sctp : 1;
+
+ } chksum;
+
+ /** All checksum bits. This can be used to set/clear all flags. */
+ uint32_t all_chksum;
+
+} odp_proto_chksums_t;
+
+/**
+ * Packet parse parameters
+ */
+typedef struct odp_packet_parse_param_t {
+ /** Protocol header at parse starting point. Valid values for this
+ * field are: ODP_PROTO_ETH, ODP_PROTO_IPV4, ODP_PROTO_IPV6. */
+ odp_proto_t proto;
+
+ /** Continue parsing until this layer. Must be the same or higher
+ * layer than the layer of 'proto'. */
+ odp_proto_layer_t last_layer;
+
+ /** Flags to control payload data checksums checks up to the selected
+ * parse layer. Checksum checking status can be queried for each packet
+ * with odp_packet_l3_chksum_status() and
+ * odp_packet_l4_chksum_status().
+ */
+ odp_proto_chksums_t chksums;
+
+} odp_packet_parse_param_t;
+
+/**
+ * Packet parse results
+ */
+typedef struct odp_packet_parse_result_t {
+ /** Parse result flags */
+ odp_packet_parse_result_flag_t flag;
+
+ /** @see odp_packet_len() */
+ uint32_t packet_len;
+
+ /** @see odp_packet_l2_offset() */
+ uint32_t l2_offset;
+ /** @see odp_packet_l3_offset() */
+ uint32_t l3_offset;
+ /** @see odp_packet_l4_offset() */
+ uint32_t l4_offset;
+
+ /** @see odp_packet_l3_chksum_status() */
+ odp_packet_chksum_status_t l3_chksum_status;
+ /** @see odp_packet_l4_chksum_status() */
+ odp_packet_chksum_status_t l4_chksum_status;
+
+ /** @see odp_packet_l2_type() */
+ odp_proto_l2_type_t l2_type;
+ /** @see odp_packet_l3_type() */
+ odp_proto_l3_type_t l3_type;
+ /** @see odp_packet_l4_type() */
+ odp_proto_l4_type_t l4_type;
+
+} odp_packet_parse_result_t;
+
+/**
+ * LSO options
+ */
+typedef struct odp_packet_lso_opt_t {
+ /** LSO profile handle
+ *
+ * The selected LSO profile specifies details of the segmentation operation to be done.
+ * Depending on LSO profile options, additional metadata (e.g. L3/L4 protocol header
+ * offsets) may need to be set on the packet. See LSO documentation
+ * (e.g. odp_pktout_send_lso() and odp_lso_protocol_t) for additional metadata
+ * requirements.
+ */
+ odp_lso_profile_t lso_profile;
+
+ /** LSO payload offset
+ *
+ * LSO operation considers packet data before 'payload_offset' as
+ * protocol headers and copies those in front of every created segment. It will modify
+ * protocol headers according to the LSO profile before segment transmission.
+ *
+ * When stored into a packet, this offset can be read with odp_packet_payload_offset() and
+ * modified with odp_packet_payload_offset_set().
+ */
+ uint32_t payload_offset;
+
+ /** Maximum payload length in an LSO segment
+ *
+ * Max_payload_len parameter defines the maximum number of payload bytes in each
+ * created segment. Depending on the implementation, segments with less payload may be
+ * created. However, this value is used typically to divide packet payload evenly over
+ * all segments except the last one, which contains the remaining payload bytes.
+ */
+ uint32_t max_payload_len;
+
+} odp_packet_lso_opt_t;
+
+/**
+ * Packet Tx completion mode
+ */
+typedef enum odp_packet_tx_compl_mode_t {
+ /** Packet Tx completion event is disabled
+ *
+ * When mode is disabled, all other fields of odp_packet_tx_compl_opt_t are ignored.
+ */
+ ODP_PACKET_TX_COMPL_DISABLED,
+ /** Packet Tx completion event is sent for all packets (both transmitted and dropped) */
+ ODP_PACKET_TX_COMPL_ALL,
+} odp_packet_tx_compl_mode_t;
+
+/**
+ * Tx completion request options
+ */
+typedef struct odp_packet_tx_compl_opt_t {
+ /** Queue handle
+ *
+ * Tx completion event will be posted to ODP queue identified by this handle.
+ */
+ odp_queue_t queue;
+
+ /** Packet Tx completion event mode */
+ odp_packet_tx_compl_mode_t mode;
+
+} odp_packet_tx_compl_opt_t;
+
+/**
+ * Packet proto stats options
+ */
+typedef struct odp_packet_proto_stats_opt_t {
+ /** Packet proto stats object handle
+ *
+ * Stats in the packet proto stats object will be updated.
+ */
+ odp_proto_stats_t stat;
+
+ /** Octet counter 0 adjust */
+ int32_t oct_count0_adj;
+
+ /** Octet counter 1 adjust */
+ int32_t oct_count1_adj;
+} odp_packet_proto_stats_opt_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h
index c30d937e5..6314b827c 100644
--- a/include/odp/api/spec/pool.h
+++ b/include/odp/api/spec/pool.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020, Nokia
+ * Copyright (c) 2020-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -20,6 +20,7 @@ extern "C" {
#endif
#include <odp/api/std_types.h>
+#include <odp/api/spec/pool_types.h>
/** @defgroup odp_pool ODP POOL
* Packet and buffer (event) pools.
@@ -27,275 +28,6 @@ extern "C" {
*/
/**
- * @typedef odp_pool_t
- * ODP pool
- */
-
-/**
- * @def ODP_POOL_INVALID
- * Invalid pool
- */
-
-/**
- * @def ODP_POOL_NAME_LEN
- * Maximum pool name length in chars including null char
- */
-
-/** Maximum number of packet pool subparameters */
-#define ODP_POOL_MAX_SUBPARAMS 7
-
-/**
- * Pool statistics counters options
- *
- * Pool statistics counters listed in a bit field structure.
- */
-typedef union odp_pool_stats_opt_t {
- /** Option flags */
- struct {
- /** @see odp_pool_stats_t::available */
- uint64_t available : 1;
-
- /** @see odp_pool_stats_t::alloc_ops */
- uint64_t alloc_ops : 1;
-
- /** @see odp_pool_stats_t::alloc_fails */
- uint64_t alloc_fails : 1;
-
- /** @see odp_pool_stats_t::free_ops */
- uint64_t free_ops : 1;
-
- /** @see odp_pool_stats_t::total_ops */
- uint64_t total_ops : 1;
-
- /** @see odp_pool_stats_t::cache_available */
- uint64_t cache_available : 1;
-
- /** @see odp_pool_stats_t::cache_alloc_ops */
- uint64_t cache_alloc_ops : 1;
-
- /** @see odp_pool_stats_t::cache_free_ops */
- uint64_t cache_free_ops : 1;
- } bit;
-
- /** All bits of the bit field structure
- *
- * This field can be used to set/clear all flags, or for bitwise
- * operations over the entire structure. */
- uint64_t all;
-
-} odp_pool_stats_opt_t;
-
-/**
- * Pool statistics counters
- *
- * In addition to API alloc and free calls, statistics counters may be updated
- * by alloc/free operations from implementation internal software or hardware
- * components.
- */
-typedef struct odp_pool_stats_t {
- /** The number of available events in the pool */
- uint64_t available;
-
- /** The number of alloc operations from the pool. Includes both
- * successful and failed operations (pool empty). */
- uint64_t alloc_ops;
-
- /** The number of failed alloc operations (pool empty) */
- uint64_t alloc_fails;
-
- /** The number of free operations to the pool */
- uint64_t free_ops;
-
- /** The total number of alloc and free operations. Includes both
- * successful and failed operations (pool empty). */
- uint64_t total_ops;
-
- /** The number of available events in the local caches of all threads
- * using the pool */
- uint64_t cache_available;
-
- /** The number of successful alloc operations from pool caches (returned
- * at least one event). */
- uint64_t cache_alloc_ops;
-
- /** The number of free operations, which stored events to pool caches. */
- uint64_t cache_free_ops;
-
-} odp_pool_stats_t;
-
-/**
- * Pool capabilities
- */
-typedef struct odp_pool_capability_t {
- /** Maximum number of pools of any type */
- unsigned int max_pools;
-
- /** Buffer pool capabilities */
- struct {
- /** Maximum number of buffer pools */
- unsigned int max_pools;
-
- /** Maximum buffer data alignment in bytes */
- uint32_t max_align;
-
- /** Maximum buffer data size in bytes
- *
- * The value of zero means that size is limited only by the
- * available memory size for the pool. */
- uint32_t max_size;
-
- /** Maximum number of buffers of any size
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_num;
-
- /** Minimum size of thread local cache */
- uint32_t min_cache_size;
-
- /** Maximum size of thread local cache */
- uint32_t max_cache_size;
-
- /** Supported statistics counters */
- odp_pool_stats_opt_t stats;
- } buf;
-
- /** Packet pool capabilities */
- struct {
- /** Maximum number of packet pools */
- unsigned int max_pools;
-
- /** Maximum packet data length in bytes
- *
- * This defines the maximum packet data length that can be
- * stored into a packet. Attempts to allocate or extend packets
- * to sizes larger than this limit will fail.
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_len;
-
- /** Maximum number of packets of any length
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_num;
-
- /** Maximum packet data alignment in bytes
- *
- * This is the maximum value of packet pool alignment
- * (pkt.align) parameter. */
- uint32_t max_align;
-
- /** Minimum packet level headroom length in bytes
- *
- * The minimum number of headroom bytes that newly created
- * packets have by default. The default apply to both ODP
- * packet input and user allocated packets.*/
- uint32_t min_headroom;
-
- /** Maximum packet level headroom length in bytes
- *
- * The maximum value of packet pool headroom parameter
- * that can be configured. This value applies to both ODP
- * packet input and user allocated packets.*/
- uint32_t max_headroom;
-
- /** Minimum packet level tailroom length in bytes
- *
- * The minimum number of tailroom bytes that newly created
- * packets have by default. The default apply to both ODP
- * packet input and user allocated packets.*/
- uint32_t min_tailroom;
-
- /** Maximum number of segments per packet */
- uint32_t max_segs_per_pkt;
-
- /** Minimum packet segment data length in bytes
- *
- * The user defined segment length (seg_len in
- * odp_pool_param_t) will be rounded up into this value. */
- uint32_t min_seg_len;
-
- /** Maximum packet segment data length in bytes
- *
- * The user defined segment length (seg_len in odp_pool_param_t)
- * must not be larger than this.
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_seg_len;
-
- /** Maximum user area size in bytes
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_uarea_size;
-
- /** Maximum number of subparameters
- *
- * Maximum number of packet pool subparameters. Valid range is
- * 0 ... ODP_POOL_MAX_SUBPARAMS. */
- uint8_t max_num_subparam;
-
- /** Minimum size of thread local cache */
- uint32_t min_cache_size;
-
- /** Maximum size of thread local cache */
- uint32_t max_cache_size;
-
- /** Supported statistics counters */
- odp_pool_stats_opt_t stats;
- } pkt;
-
- /** Timeout pool capabilities */
- struct {
- /** Maximum number of timeout pools */
- unsigned int max_pools;
-
- /** Maximum number of timeout events in a pool
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_num;
-
- /** Minimum size of thread local cache */
- uint32_t min_cache_size;
-
- /** Maximum size of thread local cache */
- uint32_t max_cache_size;
-
- /** Supported statistics counters */
- odp_pool_stats_opt_t stats;
- } tmo;
-
- /** Vector pool capabilities */
- struct {
- /** Maximum number of vector pools */
- unsigned int max_pools;
-
- /** Maximum number of vector events in a pool
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
- uint32_t max_num;
-
- /** Maximum number of general types, such as odp_packet_t, in a vector. */
- uint32_t max_size;
-
- /** Minimum size of thread local cache */
- uint32_t min_cache_size;
-
- /** Maximum size of thread local cache */
- uint32_t max_cache_size;
-
- /** Supported statistics counters */
- odp_pool_stats_opt_t stats;
- } vector;
-
-} odp_pool_capability_t;
-
-/**
* Query pool capabilities
*
* Outputs pool capabilities on success.
@@ -308,209 +40,6 @@ typedef struct odp_pool_capability_t {
int odp_pool_capability(odp_pool_capability_t *capa);
/**
- * Packet pool subparameters
- */
-typedef struct odp_pool_pkt_subparam_t {
- /** Number of 'len' byte packets. */
- uint32_t num;
-
- /** Packet length in bytes */
- uint32_t len;
-
-} odp_pool_pkt_subparam_t;
-
-/**
- * Pool parameters
- */
-typedef struct odp_pool_param_t {
- /** Pool type */
- int type;
-
- /** Parameters for buffer pools */
- struct {
- /** Number of buffers in the pool */
- uint32_t num;
-
- /** Buffer size in bytes. The maximum number of bytes
- * application will store in each buffer.
- */
- uint32_t size;
-
- /** Minimum buffer alignment in bytes. Valid values are
- * powers of two. Use 0 for default alignment.
- * Default will always be a multiple of 8.
- */
- uint32_t align;
-
- /** Maximum number of buffers cached locally per thread
- *
- * A non-zero value allows implementation to cache buffers
- * locally per each thread. Thread local caching may improve
- * performance, but requires application to take account that
- * some buffers may be stored locally per thread and thus are
- * not available for allocation from other threads.
- *
- * This is the maximum number of buffers to be cached per
- * thread. The actual cache size is implementation specific.
- * The value must not be less than 'min_cache_size' or exceed
- * 'max_cache_size' capability. The default value is
- * implementation specific and set by odp_pool_param_init().
- */
- uint32_t cache_size;
- } buf;
-
- /** Parameters for packet pools */
- struct {
- /** Minimum number of 'len' byte packets.
- *
- * The pool must contain at least this many packets that are
- * 'len' bytes or smaller. An implementation may round up the
- * value, as long as the 'max_num' parameter below is not
- * violated. The maximum value for this field is defined by
- * pool capability pkt.max_num.
- */
- uint32_t num;
-
- /** Maximum number of packets.
- *
- * This is the maximum number of packets of any length that can
- * be allocated from the pool. The maximum value is defined by
- * pool capability pkt.max_num. Use 0 when there's no
- * requirement for the maximum number of packets. The default
- * value is 0.
- */
- uint32_t max_num;
-
- /** Minimum length of 'num' packets.
- *
- * The pool must contain at least 'num' packets up to this
- * packet length (1 ... 'len' bytes). The maximum value for
- * this field is defined by pool capability pkt.max_len.
- * Use 0 for default.
- */
- uint32_t len;
-
- /** Maximum packet length that will be allocated from
- * the pool. The maximum value is defined by pool capability
- * pkt.max_len. Use 0 for default.
- */
- uint32_t max_len;
-
- /** Minimum packet data alignment in bytes.
- *
- * Valid values are powers of two. User allocated packets have
- * start of data (@see odp_packet_data()) aligned to this or
- * a higher alignment (power of two value). This parameter
- * does not apply to packets that ODP allocates internally
- * (e.g. packets from packet input).
- *
- * The maximum value is defined by pool capability
- * pkt.max_align. Use 0 for default alignment.
- */
- uint32_t align;
-
- /** Minimum number of packet data bytes that are stored in the
- * first segment of a packet. The maximum value is defined by
- * pool capability pkt.max_seg_len. Use 0 for default.
- */
- uint32_t seg_len;
-
- /** User area size in bytes. The maximum value is defined by
- * pool capability pkt.max_uarea_size. Specify as 0 if no user
- * area is needed.
- */
- uint32_t uarea_size;
-
- /** Minimum headroom size in bytes. Each newly allocated
- * packet from the pool must have at least this much headroom.
- * The maximum value is defined by pool capability
- * pkt.max_headroom. Use zero if headroom is not needed.
- */
- uint32_t headroom;
-
- /** Number of subparameters
- *
- * The number of subparameter table entries used. The maximum
- * value is defined by pool capability pkt.max_num_subparam.
- * The default value is 0.
- */
- uint8_t num_subparam;
-
- /** Subparameter table
- *
- * Subparameters continue pool configuration with additional
- * packet length requirements. The first table entry follows
- * the num/len specification above. So that, sub[0].len > 'len'
- * and sub[0].num refers to packet lengths between 'len' + 1
- * and sub[0].len. Similarly, sub[1] follows sub[0]
- * specification, and so on.
- *
- * Each requirement is supported separately and may be rounded
- * up, as long as the 'max_num' parameter is not violated. It's
- * implementation specific if some requirements are supported
- * simultaneously (e.g. due to subpool design).
- */
- odp_pool_pkt_subparam_t sub[ODP_POOL_MAX_SUBPARAMS];
-
- /** Maximum number of packets cached locally per thread
- *
- * See buf.cache_size documentation for details.
- */
- uint32_t cache_size;
- } pkt;
-
- /** Parameters for timeout pools */
- struct {
- /** Number of timeouts in the pool */
- uint32_t num;
-
- /** Maximum number of timeouts cached locally per thread
- *
- * See buf.cache_size documentation for details.
- */
- uint32_t cache_size;
- } tmo;
-
- /** Parameters for vector pools */
- struct {
- /** Number of vectors in the pool */
- uint32_t num;
-
- /** Maximum number of general types, such as odp_packet_t, in a vector. */
- uint32_t max_size;
-
- /** Maximum number of vectors cached locally per thread
- *
- * See buf.cache_size documentation for details.
- */
- uint32_t cache_size;
- } vector;
-
- /**
- * Configure statistics counters
- *
- * An application can read the enabled statistics counters using
- * odp_pool_stats(). For optimal performance an application should
- * enable only the required counters.
- */
- odp_pool_stats_opt_t stats;
-} odp_pool_param_t;
-
-/** Packet pool*/
-#define ODP_POOL_PACKET ODP_EVENT_PACKET
-/** Buffer pool */
-#define ODP_POOL_BUFFER ODP_EVENT_BUFFER
-/** Timeout pool */
-#define ODP_POOL_TIMEOUT ODP_EVENT_TIMEOUT
-/** Vector pool
- *
- * The pool to hold a vector of general type such as odp_packet_t.
- * Each vector holds an array of generic types of the same type.
- * @see ODP_EVENT_PACKET_VECTOR
- */
-#define ODP_POOL_VECTOR (ODP_POOL_TIMEOUT + 1)
-
-/**
* Create a pool
*
* This routine is used to create a pool. The use of pool name is optional.
@@ -553,49 +82,6 @@ int odp_pool_destroy(odp_pool_t pool);
odp_pool_t odp_pool_lookup(const char *name);
/**
- * Pool information struct
- * Used to get information about a pool.
- */
-typedef struct odp_pool_info_t {
- /** Pool name */
- const char *name;
-
- /** Copy of pool parameters */
- odp_pool_param_t params;
-
- /** Additional info for packet pools */
- struct {
- /** Maximum number of packets of any length
- *
- * This is the maximum number of packets that can be allocated
- * from the pool at anytime. Application can use this e.g.
- * to prepare enough per packet contexts.
- */
- uint32_t max_num;
-
- } pkt;
-
- /** Minimum data address.
- *
- * This is the minimum address that application accessible
- * data of any object (event) allocated from the pool may
- * locate. When there's no application accessible data
- * (e.g. ODP_POOL_TIMEOUT pools), the value may be zero.
- */
- uintptr_t min_data_addr;
-
- /** Maximum data address.
- *
- * This is the maximum address that application accessible
- * data of any object (event) allocated from the pool may
- * locate. When there's no application accessible data
- * (e.g. ODP_POOL_TIMEOUT pools), the value may be zero.
- */
- uintptr_t max_data_addr;
-
-} odp_pool_info_t;
-
-/**
* Retrieve information about a pool
*
* @param pool Pool handle
@@ -688,6 +174,108 @@ int odp_pool_stats(odp_pool_t pool, odp_pool_stats_t *stats);
int odp_pool_stats_reset(odp_pool_t pool);
/**
+ * Query capabilities of an external memory pool type
+ *
+ * Outputs pool capabilities on success. Returns failure if a bad pool type is used. When
+ * the requested pool type is valid but not supported, sets the value of 'max_pools' to zero.
+ *
+ * @param type Pool type
+ * @param[out] capa Pointer to capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *capa);
+
+/**
+ * Initialize pool params
+ *
+ * Initialize an odp_pool_ext_param_t to its default values for all fields
+ * based on the selected pool type.
+ *
+ * @param type Pool type
+ * @param param odp_pool_ext_param_t to be initialized
+ */
+void odp_pool_ext_param_init(odp_pool_type_t type, odp_pool_ext_param_t *param);
+
+/**
+ * Create an external memory pool
+ *
+ * This routine is used to create a pool. The use of pool name is optional.
+ * Unique names are not required. However, odp_pool_lookup() returns only a
+ * single matching pool. Use odp_pool_ext_param_init() to initialize parameters
+ * into their default values.
+ *
+ * @param name Name of the pool or NULL. Maximum string length is ODP_POOL_NAME_LEN.
+ * @param param Pool parameters
+ *
+ * @return Pool handle on success
+ * @retval ODP_POOL_INVALID on failure
+ */
+odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *param);
+
+/**
+ * Populate external memory pool with buffer memory
+ *
+ * Populate can be called multiple times to add memory buffers into the pool. Application must
+ * populate the pool with the exact number of buffers specified in pool parameters. The pool is
+ * ready to be used for allocations only after all populate calls have returned successfully.
+ * Application marks the last populate call with ODP_POOL_POPULATE_DONE flag.
+ *
+ * Depending on pool usage (and ODP implementation), the memory may need to be accessible by
+ * HW accelerators. Application may use e.g. odp_shm_reserve() with ODP_SHM_HW_ACCESS flag to
+ * ensure HW access. The memory area used for one pool, starting from (or before) the lowest
+ * addressed buffer and extending to the end (or after) of the highest addressed buffer, must not
+ * overlap with the memory area used for any other pool. Pool capabilities
+ * (odp_pool_ext_capability_t) specify the minimum alignment of the memory area.
+ *
+ * Pool type defines memory buffer layout and where the buffer pointer (buf[N]) points
+ * in the layout. Pool capabilities specify requirements for buffer size, layout and
+ * pointer alignment.
+ *
+ * For packet pools, packet buffer layout is shown below. The packet headroom (odp_packet_head())
+ * starts immediately after the application header. For a segmented packet, each segment has this
+ * same layout. Buffer size includes all headers, headroom, data, tailroom and trailer.
+ *
+ * @code{.unparsed}
+ *
+ * +-------------------------------+ -- --
+ * buf[N] ---> | | | |
+ * | ODP header (optional) | > odp_header_size |
+ * | | | |
+ * +-------------------------------+ -- |
+ * | | | |
+ * | Application header (optional) | > app_header_size |
+ * | | | > buf_size
+ * +-------------------------------+ -- |
+ * odp_packet_head()--> | | |
+ * | Packet data | |
+ * | (headroom, data, tailroom) | |
+ * | | |
+ * | | |
+ * +-------------------------------+ -- |
+ * | | | |
+ * | ODP trailer (optional) | > odp_trailer_size |
+ * | | | |
+ * +-------------------------------+ -- --
+ *
+ * @endcode
+ *
+ * @param pool External memory pool
+ * @param buf Buffer pointers to be populated into the pool
+ * @param buf_size Buffer size
+ * @param num Number of buffer pointers
+ * @param flags 0: No flags
+ * ODP_POOL_POPULATE_DONE: Marks the last populate call and completes the pool
+ * population phase
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pool_ext_populate(odp_pool_t pool, void *buf[], uint32_t buf_size, uint32_t num,
+ uint32_t flags);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/pool_types.h b/include/odp/api/spec/pool_types.h
new file mode 100644
index 000000000..44d9297c1
--- /dev/null
+++ b/include/odp/api/spec/pool_types.h
@@ -0,0 +1,772 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * Type definitions for pools
+ */
+
+#ifndef ODP_API_SPEC_POOL_TYPES_H_
+#define ODP_API_SPEC_POOL_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+/** @addtogroup odp_pool
+ * @{
+ */
+
+/**
+ * @typedef odp_pool_t
+ * ODP pool
+ */
+
+/**
+ * @def ODP_POOL_INVALID
+ * Invalid pool
+ */
+
+/**
+ * @def ODP_POOL_NAME_LEN
+ * Maximum pool name length in chars including null char
+ */
+
+/** Maximum number of packet pool subparameters */
+#define ODP_POOL_MAX_SUBPARAMS 7
+
+/**
+ * Pool statistics counters options
+ *
+ * Pool statistics counters listed in a bit field structure.
+ */
+typedef union odp_pool_stats_opt_t {
+ /** Option flags */
+ struct {
+ /** @see odp_pool_stats_t::available */
+ uint64_t available : 1;
+
+ /** @see odp_pool_stats_t::alloc_ops */
+ uint64_t alloc_ops : 1;
+
+ /** @see odp_pool_stats_t::alloc_fails */
+ uint64_t alloc_fails : 1;
+
+ /** @see odp_pool_stats_t::free_ops */
+ uint64_t free_ops : 1;
+
+ /** @see odp_pool_stats_t::total_ops */
+ uint64_t total_ops : 1;
+
+ /** @see odp_pool_stats_t::cache_available */
+ uint64_t cache_available : 1;
+
+ /** @see odp_pool_stats_t::cache_alloc_ops */
+ uint64_t cache_alloc_ops : 1;
+
+ /** @see odp_pool_stats_t::cache_free_ops */
+ uint64_t cache_free_ops : 1;
+ } bit;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or for bitwise
+ * operations over the entire structure. */
+ uint64_t all;
+
+} odp_pool_stats_opt_t;
+
+/**
+ * Pool statistics counters
+ *
+ * In addition to API alloc and free calls, statistics counters may be updated
+ * by alloc/free operations from implementation internal software or hardware
+ * components.
+ */
+typedef struct odp_pool_stats_t {
+ /** The number of available events in the pool */
+ uint64_t available;
+
+ /** The number of alloc operations from the pool. Includes both
+ * successful and failed operations (pool empty). */
+ uint64_t alloc_ops;
+
+ /** The number of failed alloc operations (pool empty) */
+ uint64_t alloc_fails;
+
+ /** The number of free operations to the pool */
+ uint64_t free_ops;
+
+ /** The total number of alloc and free operations. Includes both
+ * successful and failed operations (pool empty). */
+ uint64_t total_ops;
+
+ /** The number of available events in the local caches of all threads
+ * using the pool */
+ uint64_t cache_available;
+
+ /** The number of successful alloc operations from pool caches (returned
+ * at least one event). */
+ uint64_t cache_alloc_ops;
+
+ /** The number of free operations, which stored events to pool caches. */
+ uint64_t cache_free_ops;
+
+} odp_pool_stats_t;
+
+/**
+ * Pool capabilities
+ */
+typedef struct odp_pool_capability_t {
+ /** Maximum number of pools of any type */
+ unsigned int max_pools;
+
+ /** Buffer pool capabilities */
+ struct {
+ /** Maximum number of buffer pools */
+ unsigned int max_pools;
+
+ /** Maximum buffer data alignment in bytes */
+ uint32_t max_align;
+
+ /** Maximum buffer data size in bytes
+ *
+ * The value of zero means that size is limited only by the
+ * available memory size for the pool. */
+ uint32_t max_size;
+
+ /** Maximum number of buffers of any size
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_num;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+ } buf;
+
+ /** Packet pool capabilities */
+ struct {
+ /** Maximum number of packet pools */
+ unsigned int max_pools;
+
+ /** Maximum packet data length in bytes
+ *
+ * This defines the maximum packet data length that can be
+ * stored into a packet. Attempts to allocate or extend packets
+ * to sizes larger than this limit will fail.
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_len;
+
+ /** Maximum number of packets of any length
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_num;
+
+ /** Maximum packet data alignment in bytes
+ *
+ * This is the maximum value of packet pool alignment
+ * (pkt.align) parameter. */
+ uint32_t max_align;
+
+ /** Minimum packet level headroom length in bytes
+ *
+ * The minimum number of headroom bytes that newly created
+ * packets have by default. The default apply to both ODP
+ * packet input and user allocated packets.*/
+ uint32_t min_headroom;
+
+ /** Maximum packet level headroom length in bytes
+ *
+ * The maximum value of packet pool headroom parameter
+ * that can be configured. This value applies to both ODP
+ * packet input and user allocated packets.*/
+ uint32_t max_headroom;
+
+ /** Minimum packet level tailroom length in bytes
+ *
+ * The minimum number of tailroom bytes that newly created
+ * packets have by default. The default apply to both ODP
+ * packet input and user allocated packets.*/
+ uint32_t min_tailroom;
+
+ /** Maximum number of segments per packet */
+ uint32_t max_segs_per_pkt;
+
+ /** Minimum packet segment data length in bytes
+ *
+ * The user defined segment length (seg_len in
+ * odp_pool_param_t) will be rounded up into this value. */
+ uint32_t min_seg_len;
+
+ /** Maximum packet segment data length in bytes
+ *
+ * The user defined segment length (seg_len in odp_pool_param_t)
+ * must not be larger than this.
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_seg_len;
+
+ /** Maximum user area size in bytes
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_uarea_size;
+
+ /** Maximum number of subparameters
+ *
+ * Maximum number of packet pool subparameters. Valid range is
+ * 0 ... ODP_POOL_MAX_SUBPARAMS. */
+ uint8_t max_num_subparam;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+ } pkt;
+
+ /** Timeout pool capabilities */
+ struct {
+ /** Maximum number of timeout pools */
+ unsigned int max_pools;
+
+ /** Maximum number of timeout events in a pool
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_num;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+ } tmo;
+
+ /** Vector pool capabilities */
+ struct {
+ /** Maximum number of vector pools */
+ unsigned int max_pools;
+
+ /** Maximum number of vector events in a pool
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_num;
+
+ /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ uint32_t max_size;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+ } vector;
+
+} odp_pool_capability_t;
+
+/**
+ * Packet pool subparameters
+ */
+typedef struct odp_pool_pkt_subparam_t {
+ /** Number of 'len' byte packets. */
+ uint32_t num;
+
+ /** Packet length in bytes */
+ uint32_t len;
+
+} odp_pool_pkt_subparam_t;
+
+/**
+ * Pool types
+ */
+typedef enum odp_pool_type_t {
+ /** Packet pool*/
+ ODP_POOL_PACKET = ODP_EVENT_PACKET,
+
+ /** Buffer pool */
+ ODP_POOL_BUFFER = ODP_EVENT_BUFFER,
+
+ /** Timeout pool */
+ ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT,
+
+ /** Vector pool
+ *
+ * The pool to hold a vector of general type such as odp_packet_t.
+ * Each vector holds an array of generic types of the same type.
+ * @see ODP_EVENT_PACKET_VECTOR
+ */
+ ODP_POOL_VECTOR = (ODP_POOL_TIMEOUT + 1)
+
+} odp_pool_type_t;
+
+/**
+ * Pool parameters
+ */
+typedef struct odp_pool_param_t {
+ /** Pool type */
+ odp_pool_type_t type;
+
+ /** Parameters for buffer pools */
+ struct {
+ /** Number of buffers in the pool */
+ uint32_t num;
+
+ /** Buffer size in bytes. The maximum number of bytes
+ * application will store in each buffer.
+ */
+ uint32_t size;
+
+ /** Minimum buffer alignment in bytes. Valid values are
+ * powers of two. Use 0 for default alignment.
+ * Default will always be a multiple of 8.
+ */
+ uint32_t align;
+
+ /** Maximum number of buffers cached locally per thread
+ *
+ * A non-zero value allows implementation to cache buffers
+ * locally per each thread. Thread local caching may improve
+ * performance, but requires application to take account that
+ * some buffers may be stored locally per thread and thus are
+ * not available for allocation from other threads.
+ *
+ * This is the maximum number of buffers to be cached per
+ * thread. The actual cache size is implementation specific.
+ * The value must not be less than 'min_cache_size' or exceed
+ * 'max_cache_size' capability. The default value is
+ * implementation specific and set by odp_pool_param_init().
+ */
+ uint32_t cache_size;
+ } buf;
+
+ /** Parameters for packet pools */
+ struct {
+ /** Minimum number of 'len' byte packets.
+ *
+ * The pool must contain at least this many packets that are
+ * 'len' bytes or smaller. An implementation may round up the
+ * value, as long as the 'max_num' parameter below is not
+ * violated. The maximum value for this field is defined by
+ * pool capability pkt.max_num.
+ */
+ uint32_t num;
+
+ /** Maximum number of packets.
+ *
+ * This is the maximum number of packets of any length that can
+ * be allocated from the pool. The maximum value is defined by
+ * pool capability pkt.max_num. Use 0 when there's no
+ * requirement for the maximum number of packets. The default
+ * value is 0.
+ */
+ uint32_t max_num;
+
+ /** Minimum length of 'num' packets.
+ *
+ * The pool must contain at least 'num' packets up to this
+ * packet length (1 ... 'len' bytes). The maximum value for
+ * this field is defined by pool capability pkt.max_len.
+ * Use 0 for default.
+ */
+ uint32_t len;
+
+ /** Maximum packet length that will be allocated from
+ * the pool. The maximum value is defined by pool capability
+ * pkt.max_len. Use 0 for default.
+ */
+ uint32_t max_len;
+
+ /** Minimum packet data alignment in bytes.
+ *
+ * Valid values are powers of two. User allocated packets have
+ * start of data (@see odp_packet_data()) aligned to this or
+ * a higher alignment (power of two value). This parameter
+ * does not apply to packets that ODP allocates internally
+ * (e.g. packets from packet input).
+ *
+ * The maximum value is defined by pool capability
+ * pkt.max_align. Use 0 for default alignment.
+ */
+ uint32_t align;
+
+ /** Minimum number of packet data bytes that are stored in the
+ * first segment of a packet. The maximum value is defined by
+ * pool capability pkt.max_seg_len. Use 0 for default.
+ */
+ uint32_t seg_len;
+
+ /** User area size in bytes. The maximum value is defined by
+ * pool capability pkt.max_uarea_size. Specify as 0 if no user
+ * area is needed.
+ */
+ uint32_t uarea_size;
+
+ /** Minimum headroom size in bytes. Each newly allocated
+ * packet from the pool must have at least this much headroom.
+ * The maximum value is defined by pool capability
+ * pkt.max_headroom. Use zero if headroom is not needed.
+ */
+ uint32_t headroom;
+
+ /** Number of subparameters
+ *
+ * The number of subparameter table entries used. The maximum
+ * value is defined by pool capability pkt.max_num_subparam.
+ * The default value is 0.
+ */
+ uint8_t num_subparam;
+
+ /** Subparameter table
+ *
+ * Subparameters continue pool configuration with additional
+ * packet length requirements. The first table entry follows
+ * the num/len specification above. So that, sub[0].len > 'len'
+ * and sub[0].num refers to packet lengths between 'len' + 1
+ * and sub[0].len. Similarly, sub[1] follows sub[0]
+ * specification, and so on.
+ *
+ * Each requirement is supported separately and may be rounded
+ * up, as long as the 'max_num' parameter is not violated. It's
+ * implementation specific if some requirements are supported
+ * simultaneously (e.g. due to subpool design).
+ */
+ odp_pool_pkt_subparam_t sub[ODP_POOL_MAX_SUBPARAMS];
+
+ /** Maximum number of packets cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
+ } pkt;
+
+ /** Parameters for timeout pools */
+ struct {
+ /** Number of timeouts in the pool */
+ uint32_t num;
+
+ /** Maximum number of timeouts cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
+ } tmo;
+
+ /** Parameters for vector pools */
+ struct {
+ /** Number of vectors in the pool */
+ uint32_t num;
+
+ /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ uint32_t max_size;
+
+ /** Maximum number of vectors cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
+ } vector;
+
+ /**
+ * Configure statistics counters
+ *
+ * An application can read the enabled statistics counters using
+ * odp_pool_stats(). For optimal performance an application should
+ * enable only the required counters.
+ */
+ odp_pool_stats_opt_t stats;
+
+} odp_pool_param_t;
+
+/**
+ * External memory pool population done
+ *
+ * Application uses this flag to mark the last odp_pool_ext_populate() call, which completes
+ * external memory pool population phase.
+ */
+#define ODP_POOL_POPULATE_DONE 0x1
+
+/**
+ * External memory pool capabilities
+ *
+ * Generic fields (not specific to a pool type) contain capabilities
+ * of the requested pool type.
+ */
+typedef struct odp_pool_ext_capability_t {
+ /** Requested pool type
+ *
+ * Pool type from the odp_pool_ext_capability() call is recorded here for reference. */
+ odp_pool_type_t type;
+
+ /** Maximum number of pools
+ *
+ * Maximum number of external memory pools of the requested type. */
+ uint32_t max_pools;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+
+ /** Packet pool capabilities */
+ struct {
+ /** Maximum number of packet buffers */
+ uint32_t max_num_buf;
+
+ /** Maximum packet buffer size in bytes */
+ uint32_t max_buf_size;
+
+ /** ODP header size in bytes
+ *
+ * Application must reserve this many bytes from the start of a packet buffer
+ * for ODP implementation usage. When the value is zero, ODP implementation does
+ * not need header space to be reserved for it. Application will not modify this
+ * memory area (after buffer populate call).
+ */
+ uint32_t odp_header_size;
+
+ /** ODP trailer size in bytes
+ *
+ * Application must reserve this many bytes from the end of a packet buffer
+ * for ODP implementation usage. When the value is zero, ODP implementation does
+ * not need trailer space to be reserved for it. Application will not modify this
+ * memory area (after buffer populate call).
+ */
+ uint32_t odp_trailer_size;
+
+ /** Minimum packet pool memory area alignment in bytes
+ *
+ * The memory area used for a packet pool, starting from (or before) the lowest
+ * addressed buffer and extending to the end (or after) of the highest addressed
+ * buffer, must have at least this (power of two) alignment. The value is 1 when
+ * there is no alignment requirement.
+ */
+ uint32_t min_mem_align;
+
+ /** Minimum packet buffer pointer alignment in bytes
+ *
+ * Packet buffer pointers populated into a pool must be evenly divisible with
+ * this value. The value is 1 when there is no alignment requirement.
+ */
+ uint32_t min_buf_align;
+
+ /** Minimum packet headroom alignment in bytes
+ *
+ * Packet buffers populated into a pool must have their headroom start address
+ * evenly divisible with this value. The value is 1 when there is no alignment
+ * requirement.
+ */
+ uint32_t min_head_align;
+
+ /** Packet buffer alignment flags
+ *
+ * These flags specify additional alignment requirements for packet buffers.
+ * If not stated otherwise, min_buf_align and min_head_align alignment
+ * requirements apply also.
+ */
+ struct {
+ /** Packet buffers are size aligned
+ *
+ * When set, packet buffer pointers must be aligned to the buffer size.
+ * For example, if the buffer size would be 2304 bytes (0x900),
+ * each buffer start address must be a multiple of 0x900
+ * (e.g. 0x12000900, 0x12001200, 0x12004800, etc). */
+ uint16_t buf_size_aligned : 1;
+
+ };
+
+ /** Maximum headroom parameter value
+ *
+ * The packet pool headroom parameter may not exceed this value.
+ */
+ uint32_t max_headroom;
+
+ /** Maximum headroom size in bytes
+ *
+ * Any newly allocated packet will have at most this much headroom. Application
+ * may use this to ensure that packet buffer size is large enough to fit both
+ * buffer headers, headroom and data.
+ */
+ uint32_t max_headroom_size;
+
+ /** Maximum number of segments per packet */
+ uint32_t max_segs_per_pkt;
+
+ /** Maximum user area size in bytes */
+ uint32_t max_uarea_size;
+
+ } pkt;
+
+} odp_pool_ext_capability_t;
+
+/**
+ * External memory pool parameters
+ */
+typedef struct odp_pool_ext_param_t {
+ /** Pool type */
+ odp_pool_type_t type;
+
+ /** Maximum thread local cache size for the pool
+ *
+ * Valid value range is from min_cache_size to max_cache_size capability.
+ * The default value is implementation specific. See odp_pool_param_t (buf.cache_size)
+ * for more detailed documentation.
+ */
+ uint32_t cache_size;
+
+ /**
+ * Pool statistics configuration
+ *
+ * All pool statistics are disabled by default. For optimal performance, enable only those
+ * counters that are actually used. Counters may be read with odp_pool_stats().
+ */
+ odp_pool_stats_opt_t stats;
+
+ /** Parameters for packet pools */
+ struct {
+ /** Number of packet buffers
+ *
+ * The number of packet buffers application will populate into the pool.
+ * The maximum value is defined by pool capability pkt.max_num_buf.
+ */
+ uint32_t num_buf;
+
+ /** Packet buffer size
+ *
+ * Total buffer size in bytes including all headers, trailer, head-/tailroom
+ * and data. This is calculated from buffer start pointer to the end of buffer
+ * data area (including tailroom) or ODP trailer (see odp_trailer_size capability).
+ * All packet buffers application populates into the pool are of this size.
+ */
+ uint32_t buf_size;
+
+ /** Application header size
+ *
+ * Application reserves this many bytes for its own buffer header usage.
+ * The application header follows immediately the ODP buffer header
+ * (see odp_header_size capability). ODP implementation will not modify this
+ * memory area. The default value is 0.
+ */
+ uint32_t app_header_size;
+
+ /** User area size
+ *
+ * Per packet user area size in bytes. As with normal pools, user area location
+ * is ODP implementation specific. Use zero if no user area is needed.
+ * The maximum value is defined by pool capability pkt.max_uarea_size.
+ * The default value is 0.
+ */
+ uint32_t uarea_size;
+
+ /** Minimum headroom size
+ *
+ * Each newly allocated packet from the pool must have at least this much
+ * headroom in bytes. The configuration applies to both ODP packet input and
+ * application allocated packets. Use zero if headroom is not needed. The maximum
+ * value is defined by pool capability pkt.max_headroom. Implementation may
+ * round up the initial headroom size up to pool capability pkt.max_headroom_size.
+ */
+ uint32_t headroom;
+
+ } pkt;
+
+} odp_pool_ext_param_t;
+
+/**
+ * Pool information struct
+ * Used to get information about a pool.
+ */
+typedef struct odp_pool_info_t {
+ /** Pool name */
+ const char *name;
+
+ /** External memory pool
+ *
+ * 0: Pool is a normal pool
+ * 1: Pool is an external memory pool
+ */
+ odp_bool_t pool_ext;
+
+ /** Pool parameters union */
+ union {
+ /** Copy of pool parameters. This is set when pool_ext is 0. */
+ odp_pool_param_t params;
+
+ /** Copy of external memory pool parameters. This is set when pool_ext is 1. */
+ odp_pool_ext_param_t pool_ext_param;
+ };
+
+ /** Additional info for packet pools */
+ struct {
+ /** Maximum number of packets of any length
+ *
+ * This is the maximum number of packets that can be allocated
+ * from the pool at anytime. Application can use this e.g.
+ * to prepare enough per packet contexts.
+ */
+ uint32_t max_num;
+
+ } pkt;
+
+ /** Minimum data address.
+ *
+ * This is the minimum address that application accessible
+ * data of any object (event) allocated from the pool may
+ * locate. When there's no application accessible data
+ * (e.g. ODP_POOL_TIMEOUT pools), the value may be zero.
+ */
+ uintptr_t min_data_addr;
+
+ /** Maximum data address.
+ *
+ * This is the maximum address that application accessible
+ * data of any object (event) allocated from the pool may
+ * locate. When there's no application accessible data
+ * (e.g. ODP_POOL_TIMEOUT pools), the value may be zero.
+ */
+ uintptr_t max_data_addr;
+
+} odp_pool_info_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/proto_stats.h b/include/odp/api/spec/proto_stats.h
new file mode 100644
index 000000000..0957c9e0e
--- /dev/null
+++ b/include/odp/api/spec/proto_stats.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Proto Stats
+ */
+
+#ifndef ODP_API_SPEC_PROTO_STATS_H_
+#define ODP_API_SPEC_PROTO_STATS_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/proto_stats_types.h>
+
+/** @defgroup odp_proto_stats ODP PROTO STATS
+ * Flow specific packet statistics.
+ * @{
+ */
+
+/**
+ * Initialize proto stats parameters
+ *
+ * Initialize an odp_proto_stats_param_t to its default values.
+ * By default all the statistics are disabled.
+ *
+ * @param param Proto stats parameter pointer.
+ */
+void odp_proto_stats_param_init(odp_proto_stats_param_t *param);
+
+/**
+ * Get proto stats capability
+ *
+ * Get supported protocol statistics and metadata for a PKTIO.
+ *
+ * @param pktio Packet IO handle
+ * @param[out] capa Pointer where capabilities are updated
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_proto_stats_capability(odp_pktio_t pktio, odp_proto_stats_capability_t *capa);
+
+/**
+ * Create a proto stats object
+ *
+ * Create a proto stats object with given name and parameters.
+ * A proto stats object can be created with any set of statistics but only the
+ * statistics that are supported by a PKTIO are updated in a proto stats object
+ * for that PKTIO associated packets. Same proto stats object can be used with
+ * any PKTIO.
+ *
+ * @param name Object name
+ * @param param Proto stats parameters
+ *
+ * @return Proto stats object handle
+ * @retval ODP_PROTO_STATS_INVALID on failure
+ */
+odp_proto_stats_t odp_proto_stats_create(const char *name, const odp_proto_stats_param_t *param);
+
+/**
+ * Lookup a proto stats object by name
+ *
+ * Lookup an already created proto stats object by name.
+ *
+ * @param name Proto stats object name
+ *
+ * @return Proto stats object handle
+ * @retval ODP_PROTO_STATS_INVALID on failure
+ */
+odp_proto_stats_t odp_proto_stats_lookup(const char *name);
+
+/**
+ * Destroy a proto stats object
+ *
+ * Destroy a proto stats object already created.
+ *
+ * Before destroying proto stats object having tx statistics enabled,
+ * for all PKTIO devices to which packets were Tx'ed earlier with
+ * this proto stats object, odp_pktio_stop() must be called. Additionally,
+ * existing packets that refer to the proto stats object being destroyed
+ * must not be sent at the same time as or after the proto stats object
+ * destruction.
+ *
+ * @param stat Proto stats handle
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_proto_stats_destroy(odp_proto_stats_t stat);
+
+/**
+ * Get all proto stats counters
+ *
+ * Get current values of all counters of the proto stats object.
+ * The values of counters that are not enabled in the proto stats object are undefined.
+ *
+ * @param stat Proto stats object handle
+ * @param[out] data Pointer to a caller allocated structure where the statistics will
+ * be written to.
+ *
+ * @retval =0 on success
+ * @retval <0 on failure
+ */
+int odp_proto_stats(odp_proto_stats_t stat, odp_proto_stats_data_t *data);
+
+/**
+ * Print proto stats object info to ODP log.
+ *
+ * Print implementation-defined proto stats debug information to ODP log.
+ *
+ * @param stat Proto stats object handle
+ */
+void odp_proto_stats_print(odp_proto_stats_t stat);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/proto_stats_types.h b/include/odp/api/spec/proto_stats_types.h
new file mode 100644
index 000000000..ff5f352b9
--- /dev/null
+++ b/include/odp/api/spec/proto_stats_types.h
@@ -0,0 +1,126 @@
+/* Copyright(C) 2021, Marvell
+ * Copyright(C) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats types
+ */
+
+#ifndef ODP_API_SPEC_PROTO_STATS_TYPES_H_
+#define ODP_API_SPEC_PROTO_STATS_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+/** @addtogroup odp_proto_stats
+ * @{
+ */
+
+/**
+ * @def ODP_PROTO_STATS_INVALID
+ * Invalid proto stats handle
+ */
+
+/** ODP proto stats counters
+ *
+ * Statistics that can be enabled in proto stats object. For Tx stats counters,
+ * Pktout config `odp_pktout_config_opt_t::bit::proto_stats_ena` needs to be
+ * enabled.
+ *
+ * Tx packet and octet sent/drop statistics might include packets sent/dropped via
+ * Traffic Manager or Tx packet Aging or due to any other Tx errors. It is
+ * implementation specific as to what all Tx sent/drop events are accounted for.
+ */
+typedef union odp_proto_stats_counters_t {
+ /** Option flags */
+ struct {
+ /** Tx packet sent count */
+ uint64_t tx_pkts : 1;
+
+ /** Tx packet drop count */
+ uint64_t tx_pkt_drops : 1;
+
+ /** Tx packet sent Octet counter 0 */
+ uint64_t tx_oct_count0 : 1;
+
+ /** Tx packet drop Octet counter 0 */
+ uint64_t tx_oct_count0_drops : 1;
+
+ /** Tx packet sent octet counter 1 */
+ uint64_t tx_oct_count1 : 1;
+
+ /** Tx packet drop octet counter 1 */
+ uint64_t tx_oct_count1_drops : 1;
+ } bit;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or bitwise
+ * operations over the entire structure.
+ */
+ uint64_t all_bits;
+} odp_proto_stats_counters_t;
+
+/** ODP proto stats params */
+typedef struct odp_proto_stats_param_t {
+ /** Stats counters to enable */
+ odp_proto_stats_counters_t counters;
+} odp_proto_stats_param_t;
+
+/**
+ * Proto stats capabilities
+ */
+typedef struct odp_proto_stats_capability_t {
+ /** Tx capabilities */
+ struct {
+ /** Stats counters supported */
+ odp_proto_stats_counters_t counters;
+
+ /** Packet adjust support for Octet counter 0 */
+ odp_bool_t oct_count0_adj;
+
+ /** Packet adjust support for Octet counter 1 */
+ odp_bool_t oct_count1_adj;
+ } tx;
+} odp_proto_stats_capability_t;
+
+/** ODP proto stats counters */
+typedef struct odp_proto_stats_data_t {
+ /** Packet sent count */
+ uint64_t tx_pkts;
+
+ /** Packet drop count */
+ uint64_t tx_pkt_drops;
+
+ /** Packet sent Octet counter 0 */
+ uint64_t tx_oct_count0;
+
+ /** Packet drop Octet counter 0 */
+ uint64_t tx_oct_count0_drops;
+
+ /** Packet sent octet counter 1 */
+ uint64_t tx_oct_count1;
+
+ /** Packet drop octet counter 1 */
+ uint64_t tx_oct_count1_drops;
+} odp_proto_stats_data_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/queue.h b/include/odp/api/spec/queue.h
index 005229e35..9c677c926 100644
--- a/include/odp/api/spec/queue.h
+++ b/include/odp/api/spec/queue.h
@@ -19,7 +19,7 @@ extern "C" {
#endif
#include <odp/api/event.h>
-#include <odp/api/spec/queue_types.h>
+#include <odp/api/queue_types.h>
/** @defgroup odp_queue ODP QUEUE
* Queues for event passing and scheduling.
@@ -27,21 +27,6 @@ extern "C" {
*/
/**
- * @typedef odp_queue_t
- * ODP queue
- */
-
-/**
- * @def ODP_QUEUE_INVALID
- * Invalid queue
- */
-
-/**
- * @def ODP_QUEUE_NAME_LEN
- * Maximum queue name length in chars including null char
- */
-
-/**
* Queue create
*
* Create a queue according to the queue parameters. Queue type is specified by
diff --git a/include/odp/api/spec/queue_types.h b/include/odp/api/spec/queue_types.h
index a6899c31c..cff47ee60 100644
--- a/include/odp/api/spec/queue_types.h
+++ b/include/odp/api/spec/queue_types.h
@@ -26,6 +26,21 @@ extern "C" {
*/
/**
+ * @typedef odp_queue_t
+ * ODP queue
+ */
+
+/**
+ * @def ODP_QUEUE_INVALID
+ * Invalid queue
+ */
+
+/**
+ * @def ODP_QUEUE_NAME_LEN
+ * Maximum queue name length in chars including null char
+ */
+
+/**
* Queue type
*/
typedef enum odp_queue_type_t {
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index e07c92b7e..12da88f7d 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -20,7 +20,7 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/event.h>
-#include <odp/api/queue.h>
+#include <odp/api/queue_types.h>
#include <odp/api/schedule_types.h>
#include <odp/api/thrmask.h>
diff --git a/include/odp/api/spec/std_clib.h b/include/odp/api/spec/std.h
index fd65e7f5e..0341f734e 100644
--- a/include/odp/api/spec/std_clib.h
+++ b/include/odp/api/spec/std.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,23 +8,24 @@
/**
* @file
*
- * ODP version of often used C library calls
+ * ODP standard types and optimized C library functions
*/
-#ifndef ODP_API_SPEC_STD_CLIB_H_
-#define ODP_API_SPEC_STD_CLIB_H_
+#ifndef ODP_API_SPEC_STD_H_
+#define ODP_API_SPEC_STD_H_
#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
#endif
+#include <odp/api/std_types.h>
+
/**
- * @defgroup odp_std_clib ODP STD CLIB
- * Performance optimized versions of selected C library functions.
+ * @defgroup odp_std ODP STD
+ * Standard types and performance optimized versions of selected C library
+ * functions.
*
- * @details
- * ODP version of often used C library calls
* @{
*/
@@ -75,6 +77,18 @@ void *odp_memset(void *ptr, int value, size_t num);
int odp_memcmp(const void *ptr1, const void *ptr2, size_t num);
/**
+ * Convert fractional number (u64) to double
+ *
+ * Converts value of the unsigned 64 bit fractional number to a double-precision
+ * floating-point value.
+ *
+ * @param fract Pointer to a fractional number
+ *
+ * @return Value of the fractional number as double
+ */
+double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/std_types.h b/include/odp/api/spec/std_types.h
index 4b2af87ef..5dc350a24 100644
--- a/include/odp/api/spec/std_types.h
+++ b/include/odp/api/spec/std_types.h
@@ -24,7 +24,7 @@
extern "C" {
#endif
-/** @addtogroup odp_system ODP SYSTEM
+/** @addtogroup odp_std ODP STD
* @{
*/
@@ -96,18 +96,6 @@ typedef struct odp_fract_u64_t {
} odp_fract_u64_t;
/**
- * Convert fractional number (u64) to double
- *
- * Converts value of the unsigned 64 bit fractional number to a double-precision
- * floating-point value.
- *
- * @param fract Pointer to a fractional number
- *
- * @return Value of the fractional number as double
- */
-double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract);
-
-/**
* @}
*/
diff --git a/include/odp/api/spec/traffic_mngr.h b/include/odp/api/spec/traffic_mngr.h
index 699d0bb26..57c11c486 100644
--- a/include/odp/api/spec/traffic_mngr.h
+++ b/include/odp/api/spec/traffic_mngr.h
@@ -1,4 +1,5 @@
-/** Copyright (c) 2015-2018, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -37,7 +38,7 @@ extern "C" {
* based systems or one or more hybrid systems - where because of
* hardware constraints some of the packet scheduling is done in hardware
* and some is done in software. In addition, there may also be additional
- * API's beyond those described here for (a) controlling advanced capabilities
+ * APIs beyond those described here for (a) controlling advanced capabilities
* supported by specific hardware, software or hybrid subsystems or (b)
* dealing with constraints and limitations of specific implementations.
*/
@@ -189,6 +190,63 @@ extern "C" {
* tree/hierarchy of nodes.
*/
+/**
+ * TM queue specific statistics counters
+ */
+typedef struct odp_tm_queue_stats_t {
+ /** Number of octets in successfully transmitted packets. In case of
+ * Ethernet, packet size includes MAC header. */
+ uint64_t octets;
+
+ /** Number of successfully transmitted packets. */
+ uint64_t packets;
+
+ /** Number of packets discarded due to other reasons (e.g. aging) than
+ * errors. */
+ uint64_t discards;
+
+ /** Number of octets in packets discarded due to other reasons (e.g.
+ * aging) than errors. */
+ uint64_t discard_octets;
+
+ /** Number of packets with transmission errors. */
+ uint64_t errors;
+
+} odp_tm_queue_stats_t;
+
+/**
+ * TM queue level statistics capabilities
+ */
+typedef struct odp_tm_queue_stats_capability_t {
+ /** Supported counters */
+ union {
+ /** Statistics counters in a bit field structure */
+ struct {
+ /** @see odp_tm_queue_stats_t::octets */
+ uint64_t octets : 1;
+
+ /** @see odp_tm_queue_stats_t::packets */
+ uint64_t packets : 1;
+
+ /** @see odp_tm_queue_stats_t::discards */
+ uint64_t discards : 1;
+
+ /** @see odp_tm_queue_stats_t::discard_octets */
+ uint64_t discard_octets : 1;
+
+ /** @see odp_tm_queue_stats_t::errors */
+ uint64_t errors : 1;
+
+ } counter;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or
+ * for bitwise operations over the entire structure. */
+ uint64_t all_counters;
+ };
+} odp_tm_queue_stats_capability_t;
+
/** Per Level Capabilities
*
* The odp_tm_level_capabilities_t record is used to describe the capabilities
@@ -213,12 +271,12 @@ typedef struct {
/** min_weight only has significance when the weights_supported field
* below is true, in which case it specifies the smallest value
* of the weights allowed at this level. */
- uint8_t min_weight;
+ uint32_t min_weight;
/** max_weight only has significance when the weights_supported field
* below is true, in which case it specifies the largest value
* of the weights allowed at this level. */
- uint8_t max_weight;
+ uint32_t max_weight;
/** tm_node_shaper_supported indicates that the tm_nodes at this level
* all support TM shaping, */
@@ -248,8 +306,39 @@ typedef struct {
* When true the min_weight and max_weight fields above specify
* the legal range of such weights. */
odp_bool_t weights_supported;
+
+ /** tm_node_threshold indicates that the tm_nodes at this
+ * level support threshold profiles. */
+ odp_bool_t tm_node_threshold;
} odp_tm_level_capabilities_t;
+/** The tm_pkt_prio_mode_t enumeration type is used to indicate different
+ * modes a tm system supports with respect to assigning priority to a packet
+ * and propagating it across TM system. All the nodes in a TM system can
+ * function only on single mode specified at time of odp_tm_create().
+ */
+typedef enum odp_tm_pkt_prio_mode {
+ /** Indicates Packet priority preserve mode. In this mode, a packet gets
+ * its priority based on a TM queue it gets enqueued to and then it
+ * carries the same priority along with it as long as it is in the TM
+ * system. At every TM node in the topology, that specific pkt is
+ * scheduled as per that priority.
+ */
+ ODP_TM_PKT_PRIO_MODE_PRESERVE,
+
+ /** Indicates Packet priority overwrite mode. In this mode, a packet
+ * gets a new priority every time it passes through a TM queue or a
+ * TM node. All the packets fed by a fan-in node will get the same
+ * priority and that will be valid until overwritten again by another TM
+ * node. This priority is part of the TM fan-in node parameters and is
+ * fixed at node creation time.
+ */
+ ODP_TM_PKT_PRIO_MODE_OVERWRITE,
+
+ /** Max enum of Packet priority mode */
+ ODP_TM_PKT_PRIO_MODE_MAX,
+} odp_tm_pkt_prio_mode_t;
+
/** TM Capabilities Record.
*
* The odp_tm_capabilities_t record type is used to describe the feature set
@@ -283,7 +372,7 @@ typedef struct {
* proper TM shaping. Note that TM Shaping is NOT the same thing as
* Ingress Metering/Policing as specified by RFC 2697 (A Single Rate
* Three Color Marker) or RFC 2698 (A Two Rate Three Color Marker).
- * These RFC's can be used for a Diffserv traffic conditioner, or
+ * These RFCs can be used for a Diffserv traffic conditioner, or
* other ingress policing. They make no mention of and have no
* algorithms for delaying packets - which is what TM shapers are
* expected to do. */
@@ -385,6 +474,44 @@ typedef struct {
* the parameters of the threshold profile of any TM node or TM queue.
*/
odp_bool_t dynamic_threshold_update;
+
+ /** TM queue statistics counter capabilities */
+ odp_tm_queue_stats_capability_t queue_stats;
+
+ /** tm_queue_threshold indicates support for threshold profile on a
+ * TM queue. When TRUE, users can set/clear/update threshold profile
+ * on a TM queue. When false none of it is supported.
+ */
+ odp_bool_t tm_queue_threshold;
+
+ /** tm_queue_query_flags indicates supported types of TM queue query.
+ * Types of TM queue query are same as query_flags that are passed to
+ * odp_tm_queue_query(), odp_tm_priority_query() and
+ * odp_tm_total_query(). When zero, none of the queue query API's are
+ * supported. When non-zero, only the only supported types of passed
+ * query_flags are taken into account and corresponding fields updated.
+ *
+ * @see ODP_TM_QUERY_PKT_CNT, ODP_TM_QUERY_BYTE_CNT,
+ * ODP_TM_QUERY_THRESHOLDS.
+ */
+ uint32_t tm_queue_query_flags;
+
+ /** Indicates the packet priority modes supported by TM systems on a
+ * platform. A platform can support multiple packet priority modes. The
+ * actual mode a TM system runs with is defined by
+ * odp_tm_requirements_t.
+ */
+ odp_bool_t pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_MAX];
+
+ /** Maximum number of schedulers supported by a TM node at any level.
+ * A TM node contains a WFQ/WRR scheduler for each packet priority level
+ * for which the node has more than one possible input. TM topology and
+ * priority configuration must be made so that the resulting number of
+ * WFQ/WRR schedulers does not exceed this capability in any TM node.
+ *
+ * The value can vary between 0 and ODP_TM_MAX_PRIORITIES.
+ */
+ uint8_t max_schedulers_per_node;
} odp_tm_capabilities_t;
/** Per Level Requirements
@@ -411,12 +538,12 @@ typedef struct {
/** min_weight only has significance when the weights_supported field
* below is true, in which case it specifies the smallest value
* of the weights that will be used at this level. */
- uint8_t min_weight;
+ uint32_t min_weight;
/** max_weight only has significance when the weights_supported field
* below is true, in which case it specifies the largest value
* of the weights that will be used at this level. */
- uint8_t max_weight;
+ uint32_t max_weight;
/** tm_node_shaper_needed indicates that the tm_nodes at this level
* are expected to do TM shaping, */
@@ -437,11 +564,15 @@ typedef struct {
* disciplines. */
odp_bool_t fair_queuing_needed;
- /** weights_needd indicates that the tm_node schedulers at this
+ /** weights_needed indicates that the tm_node schedulers at this
* level are expected have different weights for their different
* fanins. When true the min_weight and max_weight fields above
* specify the used range of such weights. */
odp_bool_t weights_needed;
+
+ /** tm_node_threshold_needed indicates that the tm_nodes at this
+ * level may use threshold profile support */
+ odp_bool_t tm_node_threshold_needed;
} odp_tm_level_requirements_t;
/** TM Requirements Record.
@@ -471,6 +602,10 @@ typedef struct {
* ignored if tm_queue_wred_needed above is false. */
odp_bool_t tm_queue_dual_slope_needed;
+ /** tm_queue_threshold_needed indicates that the tm_queues are
+ * expected to use threshold profile support */
+ odp_bool_t tm_queue_threshold_needed;
+
/** vlan_marking_needed indicates that the ODP application expects
* to use some form of VLAN egress marking using the
* odp_tm_vlan_marking() function. See also comments for
@@ -494,9 +629,15 @@ typedef struct {
* the application will not enable this color for vlan marking,
* ecn marking nor drop precedence marking. A value of TRUE means that
* the application expects to use this color in conjunction with one or
- * more of the marking API's. */
+ * more of the marking APIs. */
odp_bool_t marking_colors_needed[ODP_NUM_PACKET_COLORS];
+ /** Packet priority mode.
+ * TM capabilities indicate which modes are supported.
+ * The default value is ODP_TM_PKT_PRIO_MODE_PRESERVE.
+ */
+ odp_tm_pkt_prio_mode_t pkt_prio_mode;
+
/** The per_level array specifies the TM system requirements that
* can vary based upon the tm_node level. */
odp_tm_level_requirements_t per_level[ODP_TM_MAX_LEVELS];
@@ -556,19 +697,23 @@ void odp_tm_egress_init(odp_tm_egress_t *egress);
/** Query All TM Capabilities
*
- * The odp_tm_capabilities() function can be used to obtain the complete set of
- * TM limits supported by this implementation. The reason that this returns
- * a SET of capabilities and not just one, is because it is expected that
- * many HW based implementations may have one set of limits for the HW and
- * also support a SW TM implementation with a (presumably larger) different
- * set of limits. There are also cases where there could be more than
- * SW implementation (one supporting say tens of thousands of tm_queues and
- * a variant supporting tens of millions of tm_queues).
+ * @deprecated Use odp_tm_egress_capabilities() instead that also additionally
+ * takes egress as input to provide capabilities specific to a given egress.
+ *
+ * This function returns the set of TM capabilities that are common for all
+ * egresses. The reason that this returns a SET of capabilities and not just
+ * one, is because it is expected that many HW based implementations may have
+ * one set of limits for the HW and also support a SW TM implementation with a
+ * (presumably larger) different set of limits. There are also cases where
+ * there could be more than one SW implementation (one supporting say tens of
+ * thousands of tm_queues and a variant supporting tens of millions of
+ * tm_queues). It returns capabilities that are valid for all egresses.
* The caller passes in an array of odp_tm_capabilities_t records and the
- * number of such records. Then the first N of these records will be filled
- * in by the implementation and the number N will be returned. In the event
- * that N is larger than the capabilities_size, N will still be returned,
- * but only capabilities_size records will be filled in.
+ * maximum number of such records to output. If number of such records
+ * implementation supports is larger than caller requested number, then
+ * only caller requested number of records are written and return value is
+ * max number of records implementation supports.
+ * Caller then may again call with larger number of records to be returned.
*
* @param[out] capabilities An array of odp_tm_capabilities_t records to
* be filled in.
@@ -580,8 +725,32 @@ void odp_tm_egress_init(odp_tm_egress_t *egress);
* implementations supports. *NOTE* that this
* number can be > capabilities_size!
*/
-int odp_tm_capabilities(odp_tm_capabilities_t capabilities[],
- uint32_t capabilities_size);
+int ODP_DEPRECATE(odp_tm_capabilities)(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size);
+
+/** Query TM Capabilities specific to an egress
+ *
+ * The function returns the set of TM limits supported by this implementation
+ * for a given egress. Unlike odp_tm_capability() which return's capabilities
+ * of already created TM system which are limited by its requirements, this
+ * function returns maximum TM system limits.
+ *
+ * Lack of TM support in the given egress does not cause this
+ * function to return a failure. Lack of TM support is indicated
+ * by zero max_tm_queues capability.
+ *
+ * If the pktio of an egress of the pktio kind has not been opened
+ * in the ODP_PKTOUT_MODE_TM pktout mode, the capabilities will
+ * indicate that TM is not supported.
+ *
+ * @param[out] capabilities odp_tm_capabilities_t record to be filled in.
+ * @param egress Only capabilities compatible with this egress
+ * are returned.
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_tm_egress_capabilities(odp_tm_capabilities_t *capabilities,
+ const odp_tm_egress_t *egress);
/** Create/instantiate a TM Packet Scheduling system.
*
@@ -645,8 +814,7 @@ odp_tm_t odp_tm_find(const char *name,
* In addition, ODP TM implementations should fail API requests that "exceed"
* the limits or features contracted for in the requirements.
*
- * @param odp_tm The odp_tm_t value of the TM system to be
- * queried.
+ * @param tm TM handle
* @param[out] capabilities A pointer to an odp_tm_capabilities_t record
* where the actual limits used by the TM system are
* copied into. Note that these limits do NOT
@@ -654,10 +822,11 @@ odp_tm_t odp_tm_find(const char *name,
* a TM system was created by odp_tm_create,
* but of course these limits in some cases could
* be larger.
+ *
* @return Returns 0 upon success, < 0 upon failure (which
* indicates that the odp_tm value did not exist).
*/
-int odp_tm_capability(odp_tm_t odp_tm, odp_tm_capabilities_t *capabilities);
+int odp_tm_capability(odp_tm_t tm, odp_tm_capabilities_t *capabilities);
/**
* Start a TM system
@@ -665,7 +834,7 @@ int odp_tm_capability(odp_tm_t odp_tm, odp_tm_capabilities_t *capabilities);
* odp_tm_start() needs to be used to start an already created or found TM
* system. By default, all the TM systems are in stopped state.
*
- * @param tm TM system to be started
+ * @param tm TM handle
*
* @retval 0 on success
* @retval <0 on failure
@@ -688,7 +857,7 @@ int odp_tm_start(odp_tm_t tm);
* A following call to odp_tm_start() restarts TM system and its scheduling/shaping
* on existing and new packets.
*
- * @param tm TM system to be stopped
+ * @param tm TM handle
*
* @retval 0 on success
* @retval <0 on failure
@@ -715,11 +884,11 @@ int odp_tm_stop(odp_tm_t tm);
* TM system, other than EVENTUALLY these packets will be either sent (in ANY
* order) or freed.
*
- * @param odp_tm The odp_tm_t value of the TM system to be destroyed (and
- * hence destroyed (and hence freed).
+ * @param tm The handle of the TM system to be destroyed (and hence freed).
+ *
* @return 0 upon success, < 0 upon failure.
*/
-int odp_tm_destroy(odp_tm_t odp_tm);
+int odp_tm_destroy(odp_tm_t tm);
/** Marking APIs */
@@ -736,15 +905,16 @@ int odp_tm_destroy(odp_tm_t odp_tm);
* calls to this function with drop_eligible_enabled == FALSE - i.e. must
* always return 0 when disabling this feature.
*
- * @param odp_tm Odp_tm is used to identify the TM system
- * whose egress behavior is being changed.
+ * @param tm Handle of the TM system whose egress behavior
+ * is being changed.
* @param color The packet color whose egress marking is
* being changed.
* @param drop_eligible_enabled If true then will set the DEI bit for
* egressed VLAN tagged pkts with this color.
+ *
* @return 0 upon success, < 0 upon failure.
*/
-int odp_tm_vlan_marking(odp_tm_t odp_tm,
+int odp_tm_vlan_marking(odp_tm_t tm,
odp_packet_color_t color,
odp_bool_t drop_eligible_enabled);
@@ -765,8 +935,8 @@ int odp_tm_vlan_marking(odp_tm_t odp_tm,
* calls to this function with ecn_ce_enabled == FALSE - i.e. must always
* return 0 when disabling this feature.
*
- * @param odp_tm Odp_tm is used to identify the TM system whose
- * egress behavior is being changed.
+ * @param tm Handle of the TM system whose egress behavior is being
+ * changed.
* @param color The packet color whose egress marking is
* being changed.
* @param ecn_ce_enabled If true then egressed IPv4/IPv6 pkts whose
@@ -774,9 +944,10 @@ int odp_tm_vlan_marking(odp_tm_t odp_tm,
* either one of the two values 1 or 2, will set this
* subfield to the value ECN_CE - i.e. Congestion
* Experienced (whose value is 3).
+ *
* @return 0 upon success, < 0 upon failure.
*/
-int odp_tm_ecn_marking(odp_tm_t odp_tm,
+int odp_tm_ecn_marking(odp_tm_t tm,
odp_packet_color_t color,
odp_bool_t ecn_ce_enabled);
@@ -805,17 +976,18 @@ int odp_tm_ecn_marking(odp_tm_t odp_tm,
* calls to this function with drop_prec_enabled == FALSE - i.e. must always
* return 0 when disabling this feature.
*
- * @param odp_tm Odp_tm is used to identify the TM system whose
- * egress behavior is being changed.
+ * @param tm Handle of the TM system whose egress behavior is
+ * being changed.
* @param color The packet color whose egress marking is
* being changed.
* @param drop_prec_enabled If true then egressed IPv4/IPv6 pkts with this
* color will have the pkt's Drop Precedence
* sub-subfield of the DSCP subfield set to
* LOW, MEDIUM or HIGH drop precedence.
+ *
* @return 0 upon success, < 0 upon failure.
*/
-int odp_tm_drop_prec_marking(odp_tm_t odp_tm,
+int odp_tm_drop_prec_marking(odp_tm_t tm,
odp_packet_color_t color,
odp_bool_t drop_prec_enabled);
@@ -1004,11 +1176,11 @@ typedef struct {
/** In the case that sched_modes for a given strict priority level
* indicates the use of weighted scheduling, this field supplies the
* weighting factors. The weights - when defined - are used such that
- * the (adjusted) frame lengths are divided by these 8-bit weights
+ * the (adjusted) frame lengths are divided by these weights
* (i.e. they are divisors and not multipliers). Consequently a
* weight of 0 (when sched_mode is ODP_TM_BYTE_BASED_WEIGHTS) is
* illegal. */
- uint8_t sched_weights[ODP_TM_MAX_PRIORITIES];
+ uint32_t sched_weights[ODP_TM_MAX_PRIORITIES];
} odp_tm_sched_params_t;
/** odp_tm_sched_params_init() must be called to initialize any
@@ -1351,6 +1523,22 @@ typedef struct {
* greater levels may be connected to the fan-in of tm_node's with
* numerically smaller levels. */
uint8_t level;
+
+ /** New strict priority level assigned to packets going through this
+ * node when packet priority mode is ODP_TM_PKT_PRIO_MODE_OVERWRITE.
+ * In other packet priority modes this field is ignored. The new
+ * priority does not affect packet processing in this node but in
+ * its destination node.
+ *
+ * The value must be in the range 0..ODP_TM_MAX_PRIORITIES-1.
+ * Additionally, the total number of possible priorities seen by
+ * the destination node must not exceed the max priority configured
+ * for the destination node.
+ *
+ * @see odp_tm_pkt_prio_mode_t
+ * @see odp_tm_level_requirements_t::max_priority
+ */
+ uint8_t priority;
} odp_tm_node_params_t;
/** odp_tm_node_params_init() must be called to initialize any
@@ -1368,17 +1556,18 @@ void odp_tm_node_params_init(odp_tm_node_params_t *params);
* strict priority levels for an tm_node cannot be changed after tm_node
* creation. The level parameter MUST be in the range 0..max_level - 1.
*
- * @param odp_tm Odp_tm is used to identify the TM system into which this
- * odp_tm_node object is created.
+ * @param tm Handle of the TM system into which this odp_tm_node object is
+ * created.
* @param name Optional name that can be used later later to find this
* same odp_tm_node_t. Can be NULL, otherwise must be
* unique across all odp_tm_node objects.
* @param params A pointer to a record holding (an extensible) set of
* properties/attributes of this tm_node.
+ *
* @return Returns ODP_TM_INVALID upon failure, otherwise returns
* a valid odp_tm_node_t handle if successful.
*/
-odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, const char *name,
+odp_tm_node_t odp_tm_node_create(odp_tm_t tm, const char *name,
const odp_tm_node_params_t *params);
/** Destroy a tm_node object.
@@ -1455,14 +1644,13 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
/** odp_tm_node_lookup() can be used to find the tm_node object created with
* the specified name.
*
- * @param odp_tm Odp_tm is used to identify the TM system into which this
- * odp_tm_node object is created.
+ * @param tm TM handle
* @param name Name of a previously created tm_node. Cannot be NULL.
*
* @return Returns ODP_TM_INVALID upon failure, or the tm_node
* handle created with this name.
*/
-odp_tm_node_t odp_tm_node_lookup(odp_tm_t odp_tm, const char *name);
+odp_tm_node_t odp_tm_node_lookup(odp_tm_t tm, const char *name);
/** odp_tm_node_context() can be used to get the user_context value that is
* associated with the given tm_node.
@@ -1518,6 +1706,12 @@ typedef struct {
* have the same single strict priority level and this level must be
* in the range 0..max_priority. */
uint8_t priority;
+
+ /** Maintain original packet order of the source queue when enqueuing
+ * packets to this queue while holding ordered or atomic queue
+ * synchronization context. Default value of this flag is true.
+ */
+ odp_bool_t ordered_enqueue;
} odp_tm_queue_params_t;
/** odp_tm_queue_params_init() must be called to initialize any
@@ -1536,14 +1730,15 @@ void odp_tm_queue_params_init(odp_tm_queue_params_t *params);
* number of buffers and instead limit the queue memory usage by buffer counts
* versus strictly using byte counts.
*
- * @param odp_tm Odp_tm is used to identify the TM system into which this
- * odp_tm_queue object is created.
+ * @param tm Handle of the TM system into which this odp_tm_queue object is
+ * created.
* @param params A pointer to a record holding (an extensible) set of
* properties/attributes of this tm_queue.
+ *
* @return Returns ODP_TM_INVALID upon failure, otherwise a valid
* odp_tm_queue_t handle.
*/
-odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
+odp_tm_queue_t odp_tm_queue_create(odp_tm_t tm,
const odp_tm_queue_params_t *params);
/** Destroy an tm_queue object. The odp_tm_queue_destroy frees the resources
@@ -1561,7 +1756,7 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue);
* @param tm_queue Specifies the tm_queue whose user_context is to be
* returned.
* @return Returns the user_context pointer associated with this
- * tm_queue. Returns NULL if the tm_quue is not valid OR
+ * tm_queue. Returns NULL if the tm_queue is not valid OR
* if the user_context was NULL.
*/
void *odp_tm_queue_context(odp_tm_queue_t tm_queue);
@@ -1719,6 +1914,22 @@ int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue);
*/
int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt);
+/** The odp_tm_enq_multi() function is used to add packets to a given TM system.
+ * This function enqueues multiple packets but is otherwise similar to
+ * odp_tm_enq(). Packets dropped by WRED or other queue management action do not
+ * cause this function to return a failure. Such packets get consumed just like
+ * the packets that are not dropped.
+ *
+ * @param tm_queue Specifies the tm_queue (and indirectly the TM system).
+ * @param packets Array of packets to enqueue.
+ * @param num Number of packets to send.
+ *
+ * @retval >0 on success indicating number of packets consumed
+ * @retval <=0 on failure.
+ */
+int odp_tm_enq_multi(odp_tm_queue_t tm_queue, const odp_packet_t packets[],
+ int num);
+
/** The odp_tm_enq_with_cnt() function behaves identically to odp_tm_enq(),
* except that it also returns (an approximation to?) the current tm_queue
* packet queue count.
@@ -1862,7 +2073,7 @@ typedef struct {
odp_tm_wred_t wred_profile[ODP_NUM_PACKET_COLORS];
/** The next_tm_node is the "next" node in the tree - i.e. the fanout
- * of this tm_queu. Can be ODP_TM_ROOT if this tm_queue directly
+ * of this tm_queue. Can be ODP_TM_ROOT if this tm_queue directly
* connects to the egress spigot and can be ODP_TM_INVALID if this
* tm_queue is disconnected from the TM system tree. */
odp_tm_node_t next_tm_node;
@@ -1956,12 +2167,10 @@ typedef struct {
} odp_tm_query_info_t;
/** The odp_tm_queue_query() function can be used to check a single tm_queue's
- * queue utilization. The query_flags indicate whether or not packet counts,
- * byte counts or both are being requested. It is an error to request
- * neither. The implementation may still return both sets of counts
- * regardless of query_flags if the cost of returning all the counts is
- * comparable to the cost of checking the query_flags. The info structure is
- * written only on success.
+ * queue utilization. The query flags indicate which information is being
+ * requested.
+ * The implementation may choose to return additional information that was not
+ * requested. The info structure is written only on success.
*
* @param tm_queue Specifies the tm_queue (and indirectly the
* TM system).
@@ -1976,12 +2185,10 @@ int odp_tm_queue_query(odp_tm_queue_t tm_queue,
odp_tm_query_info_t *info);
/** The odp_tm_priority_query() function can be used to check the queue
- * utilization of all tm_queue's with the given priority. The query_flags
- * indicate whether or not packet counts, byte counts or both are being
- * requested. It is an error to request neither. The implementation may
- * still return both sets of counts regardless of query_flags if the cost of
- * returning all the counts is comparable to the cost of checking the
- * query_flags. The info structure is written only on success.
+ * utilization of all tm_queue's with the given priority. The query flags
+ * indicate which information is being requested. The implementation may
+ * choose to return additional information that was not requested.
+ * The info structure is written only on success.
*
* @param odp_tm Specifies the TM system.
* @param priority Supplies the strict priority level used to specify
@@ -1999,11 +2206,9 @@ int odp_tm_priority_query(odp_tm_t odp_tm,
/** The odp_tm_total_query() function can be used to check the queue
* utilization of all tm_queue's in a single TM system. The query_flags
- * indicate whether or not packet counts, byte counts or both are being
- * requested. It is an error to request neither. The implementation may
- * still return both sets of counts regardless of query_flags if the cost of
- * returning all the counts is comparable to the cost of checking the
- * query_flags. The info structure is written only on success.
+ * indicate which information is being requested. The implementation may
+ * choose to return additional information that was not requested.
+ * The info structure is written only on success.
*
* @param odp_tm Specifies the TM system.
* @param query_flags A set of flag bits indicating which counters are
@@ -2041,13 +2246,14 @@ int odp_tm_priority_threshold_config(odp_tm_t odp_tm,
* other than returning these queue threshold values in the
* odp_tm_query_info_t record.
*
- * @param odp_tm Specifies the TM system.
+ * @param tm TM handle
* @param thresholds_profile Specifies the queue threshold profile that
* should now be used for the entire TM
* system.
+ *
* @return Returns 0 upon success and < 0 upon failure.
*/
-int odp_tm_total_threshold_config(odp_tm_t odp_tm,
+int odp_tm_total_threshold_config(odp_tm_t tm,
odp_tm_threshold_t thresholds_profile);
/** The odp_tm_is_idle function is used to determine if the specified ODP
@@ -2059,23 +2265,41 @@ int odp_tm_total_threshold_config(odp_tm_t odp_tm,
* since for some implementations this call could take a fairly long time
* to execute!
*
- * @param odp_tm Specifies the TM system.
+ * @param tm TM handle
+ *
* @return Returns 1 if the TM system is idle and 0 otherwise.
*/
-odp_bool_t odp_tm_is_idle(odp_tm_t odp_tm);
+odp_bool_t odp_tm_is_idle(odp_tm_t tm);
/** The odp_tm_stats_print function is used to write implementation-defined
* information about the specified TM system to the ODP log. The intended use
* is for debugging.
*
- * @param odp_tm Specifies the TM system.
+ * @param tm TM handle
*/
-void odp_tm_stats_print(odp_tm_t odp_tm);
+void odp_tm_stats_print(odp_tm_t tm);
+
+/**
+ * Get statistics for a TM queue
+ *
+ * Counters not supported by the queue are set to zero.
+ *
+ * It's implementation defined if odp_pktio_stats_reset() call affects these
+ * counters.
+ *
+ * @param tm_queue TM queue handle
+ * @param[out] stats Statistics structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_tm_queue_stats(odp_tm_queue_t tm_queue, odp_tm_queue_stats_t *stats);
/**
* Get printable value for an odp_tm_t
*
- * @param hdl odp_tm_t handle to be printed
+ * @param tm TM handle
+ *
* @return uint64_t value that can be used to print/display this
* handle
*
@@ -2083,7 +2307,7 @@ void odp_tm_stats_print(odp_tm_t odp_tm);
* to enable applications to generate a printable value that represents
* an odp_tm_t handle.
*/
-uint64_t odp_tm_to_u64(odp_tm_t hdl);
+uint64_t odp_tm_to_u64(odp_tm_t tm);
/**
* Get printable value for an odp_tm_queue_t
diff --git a/include/odp/api/std_clib.h b/include/odp/api/std.h
index 43ba7506e..6af95b55a 100644
--- a/include/odp/api/std_clib.h
+++ b/include/odp/api/std.h
@@ -4,17 +4,17 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_API_STD_CLIB_H_
-#define ODP_API_STD_CLIB_H_
+#ifndef ODP_API_STD_H_
+#define ODP_API_STD_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <odp/api/abi/std_types.h>
-#include <odp/api/abi/std_clib.h>
+#include <odp/api/abi/std.h>
-#include <odp/api/spec/std_clib.h>
+#include <odp/api/spec/std.h>
#ifdef __cplusplus
}
diff --git a/include/odp/api/timer.h b/include/odp/api/timer.h
index 12d3f999e..7c0dd95b6 100644
--- a/include/odp/api/timer.h
+++ b/include/odp/api/timer.h
@@ -20,7 +20,7 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/abi/event.h>
#include <odp/api/abi/pool.h>
-#include <odp/api/abi/queue.h>
+#include <odp/api/abi/queue_types.h>
#include <odp/api/abi/timer.h>
#include <odp/api/spec/timer.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/packet_types.h b/include/odp/arch/arm32-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/proto_stats.h b/include/odp/arch/arm32-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/arm32-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/queue_types.h b/include/odp/arch/arm32-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/std_clib.h b/include/odp/arch/arm32-linux/odp/api/abi/std.h
index 249bfe712..a7243c4d4 100644
--- a/include/odp/arch/mips64-linux/odp/api/abi/std_clib.h
+++ b/include/odp/arch/arm32-linux/odp/api/abi/std.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/std_clib.h>
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/packet_types.h b/include/odp/arch/arm64-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/proto_stats.h b/include/odp/arch/arm64-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/arm64-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/queue_types.h b/include/odp/arch/arm64-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/std_clib.h b/include/odp/arch/arm64-linux/odp/api/abi/std.h
index 249bfe712..a7243c4d4 100644
--- a/include/odp/arch/arm64-linux/odp/api/abi/std_clib.h
+++ b/include/odp/arch/arm64-linux/odp/api/abi/std.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/std_clib.h>
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/packet_types.h b/include/odp/arch/default-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/proto_stats.h b/include/odp/arch/default-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/default-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/queue_types.h b/include/odp/arch/default-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/std_clib.h b/include/odp/arch/default-linux/odp/api/abi/std.h
index 2fa1a5953..bc1abf1a6 100644
--- a/include/odp/arch/default-linux/odp/api/abi/std_clib.h
+++ b/include/odp/arch/default-linux/odp/api/abi/std.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/std_clib.h>
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/packet_types.h b/include/odp/arch/mips64-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/proto_stats.h b/include/odp/arch/mips64-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/mips64-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/queue_types.h b/include/odp/arch/mips64-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/std_clib.h b/include/odp/arch/mips64-linux/odp/api/abi/std.h
index 249bfe712..a7243c4d4 100644
--- a/include/odp/arch/power64-linux/odp/api/abi/std_clib.h
+++ b/include/odp/arch/mips64-linux/odp/api/abi/std.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/std_clib.h>
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/packet_types.h b/include/odp/arch/power64-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/proto_stats.h b/include/odp/arch/power64-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/power64-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/queue_types.h b/include/odp/arch/power64-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/std_clib.h b/include/odp/arch/power64-linux/odp/api/abi/std.h
index 249bfe712..a7243c4d4 100644
--- a/include/odp/arch/arm32-linux/odp/api/abi/std_clib.h
+++ b/include/odp/arch/power64-linux/odp/api/abi/std.h
@@ -4,4 +4,4 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/std_clib.h>
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/packet_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats.h b/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/queue_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/std.h b/include/odp/arch/x86_32-linux/odp/api/abi/std.h
new file mode 100644
index 000000000..a7243c4d4
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/std.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/std_clib.h b/include/odp/arch/x86_32-linux/odp/api/abi/std_clib.h
deleted file mode 100644
index 249bfe712..000000000
--- a/include/odp/arch/x86_32-linux/odp/api/abi/std_clib.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/abi-default/std_clib.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/packet_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..c92ffd684
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/packet_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/packet_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats.h b/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..81108faa5
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <odp/api/abi-default/proto_stats.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..1cb6128b6
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/proto_stats_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/queue_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..51837734a
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/queue_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/queue_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/std.h b/include/odp/arch/x86_64-linux/odp/api/abi/std.h
new file mode 100644
index 000000000..a7243c4d4
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/std.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/std.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/std_clib.h b/include/odp/arch/x86_64-linux/odp/api/abi/std_clib.h
deleted file mode 100644
index 249bfe712..000000000
--- a/include/odp/arch/x86_64-linux/odp/api/abi/std_clib.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/abi-default/std_clib.h>
diff --git a/include/odp_api.h b/include/odp_api.h
index 763c9ed7d..84530d2ab 100644
--- a/include/odp_api.h
+++ b/include/odp_api.h
@@ -49,6 +49,7 @@ extern "C" {
#include <odp/api/packet.h>
#include <odp/api/packet_flags.h>
#include <odp/api/packet_io.h>
+#include <odp/api/proto_stats.h>
#include <odp/api/crypto.h>
#include <odp/api/classification.h>
#include <odp/api/rwlock.h>
@@ -59,7 +60,7 @@ extern "C" {
#include <odp/api/traffic_mngr.h>
#include <odp/api/spinlock_recursive.h>
#include <odp/api/rwlock_recursive.h>
-#include <odp/api/std_clib.h>
+#include <odp/api/std.h>
#include <odp/api/support.h>
#include <odp/api/ipsec.h>
#include <odp/api/stash.h>
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index bff49c692..480851436 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -41,7 +41,7 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/pool_inline_types.h \
include/odp/api/plat/queue_inlines.h \
include/odp/api/plat/queue_inline_types.h \
- include/odp/api/plat/std_clib_inlines.h \
+ include/odp/api/plat/std_inlines.h \
include/odp/api/plat/strong_types.h \
include/odp/api/plat/sync_inlines.h \
include/odp/api/plat/thread_inlines.h \
@@ -65,10 +65,14 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/init.h \
include-abi/odp/api/abi/ipsec.h \
include-abi/odp/api/abi/packet.h \
+ include-abi/odp/api/abi/packet_types.h \
include-abi/odp/api/abi/packet_flags.h \
include-abi/odp/api/abi/packet_io.h \
+ include-abi/odp/api/abi/proto_stats.h \
+ include-abi/odp/api/abi/proto_stats_types.h \
include-abi/odp/api/abi/pool.h \
include-abi/odp/api/abi/queue.h \
+ include-abi/odp/api/abi/queue_types.h \
include-abi/odp/api/abi/rwlock.h \
include-abi/odp/api/abi/rwlock_recursive.h \
include-abi/odp/api/abi/schedule.h \
@@ -77,7 +81,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/spinlock.h \
include-abi/odp/api/abi/spinlock_recursive.h \
include-abi/odp/api/abi/stash.h \
- include-abi/odp/api/abi/std_clib.h \
+ include-abi/odp/api/abi/std.h \
include-abi/odp/api/abi/std_types.h \
include-abi/odp/api/abi/sync.h \
include-abi/odp/api/abi/thread.h \
@@ -164,7 +168,6 @@ __LIB__libodp_dpdk_la_SOURCES = \
odp_crypto.c \
odp_errno.c \
../linux-generic/odp_event.c \
- ../linux-generic/odp_fractional.c \
../linux-generic/odp_hash_crc_gen.c \
odp_init.c \
../linux-generic/odp_impl.c \
@@ -202,6 +205,7 @@ __LIB__libodp_dpdk_la_SOURCES = \
../linux-generic/odp_spinlock.c \
../linux-generic/odp_spinlock_recursive.c \
../linux-generic/odp_stash.c \
+ ../linux-generic/odp_std.c \
odp_system_info.c \
../linux-generic/odp_pcapng.c \
odp_thread.c \
@@ -225,7 +229,7 @@ __LIB__libodp_dpdk_la_SOURCES += \
../linux-generic/odp_packet_flags_api.c \
../linux-generic/odp_pktio_api.c \
../linux-generic/odp_queue_api.c \
- odp_std_clib_api.c \
+ odp_std_api.c \
../linux-generic/odp_sync_api.c \
../linux-generic/odp_thread_api.c \
../linux-generic/odp_ticketlock_api.c \
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
index 3b55e2693..913181b7a 100644
--- a/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
@@ -17,119 +17,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_packet_t);
-
-#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-
-typedef ODP_HANDLE_T(odp_packet_seg_t);
-
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_vector_t);
-
-#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
-
-#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
-
-typedef uint8_t odp_proto_l2_type_t;
-
-#define ODP_PROTO_L2_TYPE_NONE 0
-#define ODP_PROTO_L2_TYPE_ETH 1
-
-typedef uint8_t odp_proto_l3_type_t;
-
-#define ODP_PROTO_L3_TYPE_NONE 0
-#define ODP_PROTO_L3_TYPE_ARP 1
-#define ODP_PROTO_L3_TYPE_RARP 2
-#define ODP_PROTO_L3_TYPE_MPLS 3
-#define ODP_PROTO_L3_TYPE_IPV4 4
-#define ODP_PROTO_L3_TYPE_IPV6 6
-
-typedef uint8_t odp_proto_l4_type_t;
-
-/* Numbers from IANA Assigned Internet Protocol Numbers list */
-#define ODP_PROTO_L4_TYPE_NONE 0
-#define ODP_PROTO_L4_TYPE_ICMPV4 1
-#define ODP_PROTO_L4_TYPE_IGMP 2
-#define ODP_PROTO_L4_TYPE_IPV4 4
-#define ODP_PROTO_L4_TYPE_TCP 6
-#define ODP_PROTO_L4_TYPE_UDP 17
-#define ODP_PROTO_L4_TYPE_IPV6 41
-#define ODP_PROTO_L4_TYPE_GRE 47
-#define ODP_PROTO_L4_TYPE_ESP 50
-#define ODP_PROTO_L4_TYPE_AH 51
-#define ODP_PROTO_L4_TYPE_ICMPV6 58
-#define ODP_PROTO_L4_TYPE_NO_NEXT 59
-#define ODP_PROTO_L4_TYPE_IPCOMP 108
-#define ODP_PROTO_L4_TYPE_SCTP 132
-#define ODP_PROTO_L4_TYPE_ROHC 142
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-typedef enum {
- ODP_PACKET_CHKSUM_UNKNOWN = 0,
- ODP_PACKET_CHKSUM_BAD,
- ODP_PACKET_CHKSUM_OK
-} odp_packet_chksum_status_t;
-
-typedef struct odp_packet_parse_result_flag_t {
- union {
- uint64_t all;
-
- struct {
- uint64_t has_error : 1;
- uint64_t has_l2_error : 1;
- uint64_t has_l3_error : 1;
- uint64_t has_l4_error : 1;
- uint64_t has_l2 : 1;
- uint64_t has_l3 : 1;
- uint64_t has_l4 : 1;
- uint64_t has_eth : 1;
- uint64_t has_eth_bcast : 1;
- uint64_t has_eth_mcast : 1;
- uint64_t has_jumbo : 1;
- uint64_t has_vlan : 1;
- uint64_t has_vlan_qinq : 1;
- uint64_t has_arp : 1;
- uint64_t has_ipv4 : 1;
- uint64_t has_ipv6 : 1;
- uint64_t has_ip_bcast : 1;
- uint64_t has_ip_mcast : 1;
- uint64_t has_ipfrag : 1;
- uint64_t has_ipopt : 1;
- uint64_t has_ipsec : 1;
- uint64_t has_udp : 1;
- uint64_t has_tcp : 1;
- uint64_t has_sctp : 1;
- uint64_t has_icmp : 1;
- };
- };
-
-} odp_packet_parse_result_flag_t;
-
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/packet_vector_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..fadcaacb6
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_TYPES_H_
+#define ODP_API_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_packet
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_packet_t);
+
+#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef ODP_HANDLE_T(odp_packet_seg_t);
+
+#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_buf_t);
+
+#define ODP_PACKET_BUF_INVALID _odp_cast_scalar(odp_packet_buf_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
+
+#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
+
+typedef uint8_t odp_proto_l2_type_t;
+
+#define ODP_PROTO_L2_TYPE_NONE 0
+#define ODP_PROTO_L2_TYPE_ETH 1
+
+typedef uint8_t odp_proto_l3_type_t;
+
+#define ODP_PROTO_L3_TYPE_NONE 0
+#define ODP_PROTO_L3_TYPE_ARP 1
+#define ODP_PROTO_L3_TYPE_RARP 2
+#define ODP_PROTO_L3_TYPE_MPLS 3
+#define ODP_PROTO_L3_TYPE_IPV4 4
+#define ODP_PROTO_L3_TYPE_IPV6 6
+
+typedef uint8_t odp_proto_l4_type_t;
+
+/* Numbers from IANA Assigned Internet Protocol Numbers list */
+#define ODP_PROTO_L4_TYPE_NONE 0
+#define ODP_PROTO_L4_TYPE_ICMPV4 1
+#define ODP_PROTO_L4_TYPE_IGMP 2
+#define ODP_PROTO_L4_TYPE_IPV4 4
+#define ODP_PROTO_L4_TYPE_TCP 6
+#define ODP_PROTO_L4_TYPE_UDP 17
+#define ODP_PROTO_L4_TYPE_IPV6 41
+#define ODP_PROTO_L4_TYPE_GRE 47
+#define ODP_PROTO_L4_TYPE_ESP 50
+#define ODP_PROTO_L4_TYPE_AH 51
+#define ODP_PROTO_L4_TYPE_ICMPV6 58
+#define ODP_PROTO_L4_TYPE_NO_NEXT 59
+#define ODP_PROTO_L4_TYPE_IPCOMP 108
+#define ODP_PROTO_L4_TYPE_SCTP 132
+#define ODP_PROTO_L4_TYPE_ROHC 142
+
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+typedef struct odp_packet_parse_result_flag_t {
+ union {
+ uint64_t all;
+
+ struct {
+ uint64_t has_error : 1;
+ uint64_t has_l2_error : 1;
+ uint64_t has_l3_error : 1;
+ uint64_t has_l4_error : 1;
+ uint64_t has_l2 : 1;
+ uint64_t has_l3 : 1;
+ uint64_t has_l4 : 1;
+ uint64_t has_eth : 1;
+ uint64_t has_eth_bcast : 1;
+ uint64_t has_eth_mcast : 1;
+ uint64_t has_jumbo : 1;
+ uint64_t has_vlan : 1;
+ uint64_t has_vlan_qinq : 1;
+ uint64_t has_arp : 1;
+ uint64_t has_ipv4 : 1;
+ uint64_t has_ipv6 : 1;
+ uint64_t has_ip_bcast : 1;
+ uint64_t has_ip_mcast : 1;
+ uint64_t has_ipfrag : 1;
+ uint64_t has_ipopt : 1;
+ uint64_t has_ipsec : 1;
+ uint64_t has_udp : 1;
+ uint64_t has_tcp : 1;
+ uint64_t has_sctp : 1;
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h
new file mode 120000
index 000000000..812d2a7a8
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/proto_stats.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h
new file mode 120000
index 000000000..5988e9a2e
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/proto_stats_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h
new file mode 120000
index 000000000..caf4bceb7
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/queue_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/std.h b/platform/linux-dpdk/include-abi/odp/api/abi/std.h
new file mode 120000
index 000000000..8cc2509ea
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/std.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/std.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/std_clib.h b/platform/linux-dpdk/include-abi/odp/api/abi/std_clib.h
deleted file mode 120000
index 01f60436e..000000000
--- a/platform/linux-dpdk/include-abi/odp/api/abi/std_clib.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include-abi/odp/api/abi/std_clib.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
index 4882e92df..e9e6cf028 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
@@ -17,7 +17,7 @@
extern "C" {
#endif
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/plat/packet_inline_types.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
index 9a7e52dd4..ed2778789 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
@@ -19,6 +19,7 @@ extern "C" {
#endif
#include <odp/api/abi/packet.h>
+#include <odp/api/packet_types.h>
#include <odp/api/pool.h>
#include <odp/api/abi/packet_io.h>
#include <odp/api/hints.h>
diff --git a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h b/platform/linux-dpdk/include/odp/api/plat/std_inlines.h
index 7dbba2be8..b52b0512a 100644
--- a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/std_inlines.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_PLAT_STD_CLIB_INLINE_H_
-#define ODP_PLAT_STD_CLIB_INLINE_H_
+#ifndef ODP_PLAT_STD_INLINE_H_
+#define ODP_PLAT_STD_INLINE_H_
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 0db8cf056..b5ce4cdd9 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -61,11 +62,16 @@ typedef struct ODP_ALIGNED_CACHE {
#else
odp_spinlock_t lock ODP_ALIGNED_CACHE;
#endif
+ odp_pool_t pool_hdl;
+ uint32_t pool_idx;
+
+ /* Everything under this mark is memset() to zero on pool create */
+ uint8_t memset_mark;
+ uint8_t type;
+ uint8_t pool_ext;
char name[ODP_POOL_NAME_LEN];
odp_pool_param_t params;
- odp_pool_t pool_hdl;
struct rte_mempool *rte_mempool;
- uint32_t pool_idx;
uint32_t seg_len;
} pool_t;
diff --git a/platform/linux-dpdk/m4/odp_libconfig.m4 b/platform/linux-dpdk/m4/odp_libconfig.m4
index f2aa36946..b0e37e2ec 100644
--- a/platform/linux-dpdk/m4/odp_libconfig.m4
+++ b/platform/linux-dpdk/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [12])
+m4_define([_odp_config_version_minor], [13])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
index 3c9d8cc3e..a5846ef42 100644
--- a/platform/linux-dpdk/odp_crypto.c
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -575,7 +575,14 @@ static void capability_process(struct rte_cryptodev_info *dev_info,
auths->bit.sha512_hmac = 1;
if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
auths->bit.aes_gmac = 1;
- if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_CMAC)
+
+ /* Using AES-CMAC with the aesni_mb driver for IPsec
+ * causes a crash inside the intel-mb library.
+ * As a workaround, we do not use AES-CMAC with
+ * the aesni_mb driver.
+ */
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_CMAC &&
+ !is_dev_aesni_mb(dev_info))
auths->bit.aes_cmac = 1;
/* Combination of (3)DES-CBC and AES-XCBC-MAC does not
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index b0225629d..79c20a92f 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -16,6 +16,7 @@
#include <odp/api/packet_io.h>
#include <odp/api/plat/pktio_inlines.h>
#include <odp_errno_define.h>
+#include <odp/api/proto_stats.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -170,7 +171,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
{
pool_t *pool = pool_entry_from_hdl(pool_hdl);
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return ODP_PACKET_INVALID;
}
@@ -187,7 +188,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
int i;
pool_t *pool = pool_entry_from_hdl(pool_hdl);
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return -1;
}
@@ -2390,3 +2391,62 @@ int odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
(void)res;
return -ENOTSUP;
}
+
+void *odp_packet_buf_head(odp_packet_buf_t pkt_buf ODP_UNUSED)
+{
+ return NULL;
+}
+
+uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf ODP_UNUSED)
+{
+ return 0;
+}
+
+uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf ODP_UNUSED)
+{
+ return 0;
+}
+
+uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf ODP_UNUSED)
+{
+ return 0;
+}
+
+void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf ODP_UNUSED,
+ uint32_t data_offset ODP_UNUSED,
+ uint32_t data_len ODP_UNUSED)
+{
+}
+
+odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl ODP_UNUSED,
+ void *head ODP_UNUSED)
+{
+ return ODP_PACKET_BUF_INVALID;
+}
+
+uint32_t odp_packet_disassemble(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_buf_t pkt_buf[] ODP_UNUSED,
+ uint32_t num ODP_UNUSED)
+{
+ return 0;
+}
+
+odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl ODP_UNUSED,
+ odp_packet_buf_t pkt_buf[] ODP_UNUSED,
+ uint32_t num ODP_UNUSED)
+{
+ return ODP_PACKET_INVALID;
+}
+
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt)
+{
+ (void)pkt;
+ (void)opt;
+}
+
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt)
+{
+ (void)pkt;
+
+ return ODP_PROTO_STATS_INVALID;
+}
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 3ac6abd03..a6f053b35 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -24,6 +24,7 @@
#include <odp_event_vector_internal.h>
#include <string.h>
+#include <stddef.h>
#include <stdlib.h>
#include <math.h>
#include <inttypes.h>
@@ -293,7 +294,7 @@ struct mbuf_ctor_arg {
pool_t *pool;
uint16_t seg_buf_offset; /* To skip the ODP buf/pkt/tmo header */
uint16_t seg_buf_size; /* size of user data */
- int type; /* ODP pool type */
+ odp_pool_type_t type; /* ODP pool type */
int event_type; /* ODP event type */
int pkt_uarea_size; /* size of user area in bytes */
};
@@ -574,6 +575,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
struct rte_pktmbuf_pool_private mbp_ctor_arg;
struct mbuf_ctor_arg mb_ctor_arg;
odp_pool_t pool_hdl = ODP_POOL_INVALID;
+ odp_pool_type_t type = params->type;
unsigned int mb_size, i, cache_size;
size_t hdr_size;
pool_t *pool;
@@ -606,7 +608,10 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
continue;
}
- switch (params->type) {
+ memset(&pool->memset_mark, 0,
+ sizeof(pool_t) - offsetof(pool_t, memset_mark));
+
+ switch (type) {
case ODP_POOL_BUFFER:
buf_align = params->buf.align;
blk_size = params->buf.size;
@@ -702,8 +707,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
ODP_DBG("type: vector, name: %s, num: %u\n", pool_name, num);
break;
default:
- ODP_ERR("Bad type %i\n",
- params->type);
+ ODP_ERR("Bad pool type %i\n", (int)type);
UNLOCK(&pool->lock);
return ODP_POOL_INVALID;
}
@@ -711,7 +715,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
mb_ctor_arg.seg_buf_offset =
(uint16_t)ROUNDUP_CACHE_LINE(hdr_size);
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.mbuf_data_room_size;
- mb_ctor_arg.type = params->type;
+ mb_ctor_arg.type = type;
mb_ctor_arg.event_type = event_type;
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
mb_ctor_arg.pool = pool;
@@ -725,7 +729,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
format_pool_name(pool_name, rte_name);
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
uint16_t data_room_size, priv_size;
data_room_size = mbp_ctor_arg.mbuf_data_room_size;
@@ -763,6 +767,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
}
pool->rte_mempool = mp;
+ pool->type = type;
pool->params = *params;
ODP_DBG("Header/element/trailer size: %u/%u/%u, "
"total pool size: %lu\n",
@@ -803,9 +808,9 @@ static inline int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[],
int i;
struct rte_mempool *mp = pool->rte_mempool;
- ODP_ASSERT(pool->params.type == ODP_POOL_BUFFER ||
- pool->params.type == ODP_POOL_TIMEOUT ||
- pool->params.type == ODP_POOL_VECTOR);
+ ODP_ASSERT(pool->type == ODP_POOL_BUFFER ||
+ pool->type == ODP_POOL_TIMEOUT ||
+ pool->type == ODP_POOL_VECTOR);
for (i = 0; i < num; i++) {
struct rte_mbuf *mbuf;
@@ -890,7 +895,7 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
info->name = pool->name;
info->params = pool->params;
- if (pool->params.type == ODP_POOL_PACKET)
+ if (pool->type == ODP_POOL_PACKET)
info->pkt.max_num = pool->rte_mempool->size;
memset(&args, 0, sizeof(struct mem_cb_arg_t));
@@ -987,3 +992,37 @@ int odp_pool_stats_reset(odp_pool_t pool_hdl ODP_UNUSED)
{
return 0;
}
+
+int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *capa)
+{
+ if (type != ODP_POOL_PACKET)
+ return -1;
+
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ capa->type = type;
+ capa->max_pools = 0;
+
+ return 0;
+}
+
+void odp_pool_ext_param_init(odp_pool_type_t type ODP_UNUSED,
+ odp_pool_ext_param_t *param)
+{
+ memset(param, 0, sizeof(odp_pool_ext_param_t));
+}
+
+odp_pool_t odp_pool_ext_create(const char *name ODP_UNUSED,
+ const odp_pool_ext_param_t *param ODP_UNUSED)
+{
+ return ODP_POOL_INVALID;
+}
+
+int odp_pool_ext_populate(odp_pool_t pool_hdl ODP_UNUSED,
+ void *buf[] ODP_UNUSED,
+ uint32_t buf_size ODP_UNUSED,
+ uint32_t num ODP_UNUSED,
+ uint32_t flags ODP_UNUSED)
+{
+ return -1;
+}
diff --git a/platform/linux-dpdk/odp_std_clib_api.c b/platform/linux-dpdk/odp_std_api.c
index 0846e69ec..251e6f874 100644
--- a/platform/linux-dpdk/odp_std_clib_api.c
+++ b/platform/linux-dpdk/odp_std_api.c
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_clib.h>
+#include <odp/api/std.h>
/* Include non-inlined versions of API functions */
#define _ODP_NO_INLINE
-#include <odp/api/plat/std_clib_inlines.h>
+#include <odp/api/plat/std_inlines.h>
diff --git a/platform/linux-dpdk/test/crypto.conf b/platform/linux-dpdk/test/crypto.conf
index 25e0652bf..995c2ee0e 100644
--- a/platform/linux-dpdk/test/crypto.conf
+++ b/platform/linux-dpdk/test/crypto.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.12"
+config_file_version = "0.1.13"
system: {
# One crypto queue pair is required per thread for lockless operation
diff --git a/platform/linux-dpdk/test/sched-basic.conf b/platform/linux-dpdk/test/sched-basic.conf
index 8a46a3ccc..19a550b66 100644
--- a/platform/linux-dpdk/test/sched-basic.conf
+++ b/platform/linux-dpdk/test/sched-basic.conf
@@ -1,8 +1,9 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.12"
+config_file_version = "0.1.13"
+# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
- # Test scheduler with an odd spread value
prio_spread = 3
+ load_balance = 0
}
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index b6721dce4..8c75e5ec0 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -43,7 +43,7 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/pool_inline_types.h \
include/odp/api/plat/queue_inlines.h \
include/odp/api/plat/queue_inline_types.h \
- include/odp/api/plat/std_clib_inlines.h \
+ include/odp/api/plat/std_inlines.h \
include/odp/api/plat/strong_types.h \
include/odp/api/plat/sync_inlines.h \
include/odp/api/plat/thread_inlines.h \
@@ -67,10 +67,14 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/init.h \
include-abi/odp/api/abi/ipsec.h \
include-abi/odp/api/abi/packet.h \
+ include-abi/odp/api/abi/packet_types.h \
include-abi/odp/api/abi/packet_flags.h \
include-abi/odp/api/abi/packet_io.h \
+ include-abi/odp/api/abi/proto_stats.h \
+ include-abi/odp/api/abi/proto_stats_types.h \
include-abi/odp/api/abi/pool.h \
include-abi/odp/api/abi/queue.h \
+ include-abi/odp/api/abi/queue_types.h \
include-abi/odp/api/abi/rwlock.h \
include-abi/odp/api/abi/rwlock_recursive.h \
include-abi/odp/api/abi/schedule.h \
@@ -79,7 +83,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/spinlock.h \
include-abi/odp/api/abi/spinlock_recursive.h \
include-abi/odp/api/abi/stash.h \
- include-abi/odp/api/abi/std_clib.h \
+ include-abi/odp/api/abi/std.h \
include-abi/odp/api/abi/std_types.h \
include-abi/odp/api/abi/sync.h \
include-abi/odp/api/abi/thread.h \
@@ -174,7 +178,6 @@ __LIB__libodp_linux_la_SOURCES = \
odp_errno.c \
odp_event.c \
odp_fdserver.c \
- odp_fractional.c \
odp_hash_crc_gen.c \
odp_impl.c \
odp_init.c \
@@ -212,6 +215,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_spinlock.c \
odp_spinlock_recursive.c \
odp_stash.c \
+ odp_std.c \
odp_system_info.c \
odp_pcapng.c \
odp_thread.c \
@@ -258,7 +262,7 @@ __LIB__libodp_linux_la_SOURCES += \
odp_packet_flags_api.c \
odp_pktio_api.c \
odp_queue_api.c \
- odp_std_clib_api.c \
+ odp_std_api.c \
odp_sync_api.c \
odp_thread_api.c \
odp_ticketlock_api.c \
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet.h b/platform/linux-generic/include-abi/odp/api/abi/packet.h
index 28e97637c..5703141d4 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/packet.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet.h
@@ -18,121 +18,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_packet_t);
-
-#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-
-typedef ODP_HANDLE_T(odp_packet_seg_t);
-
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_vector_t);
-
-#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
-
-#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-
-typedef uint8_t odp_proto_l2_type_t;
-
-#define ODP_PROTO_L2_TYPE_NONE 0
-#define ODP_PROTO_L2_TYPE_ETH 1
-
-typedef uint8_t odp_proto_l3_type_t;
-
-#define ODP_PROTO_L3_TYPE_NONE 0
-#define ODP_PROTO_L3_TYPE_ARP 1
-#define ODP_PROTO_L3_TYPE_RARP 2
-#define ODP_PROTO_L3_TYPE_MPLS 3
-#define ODP_PROTO_L3_TYPE_IPV4 4
-#define ODP_PROTO_L3_TYPE_IPV6 6
-
-typedef uint8_t odp_proto_l4_type_t;
-
-/* Numbers from IANA Assigned Internet Protocol Numbers list */
-#define ODP_PROTO_L4_TYPE_NONE 0
-#define ODP_PROTO_L4_TYPE_ICMPV4 1
-#define ODP_PROTO_L4_TYPE_IGMP 2
-#define ODP_PROTO_L4_TYPE_IPV4 4
-#define ODP_PROTO_L4_TYPE_TCP 6
-#define ODP_PROTO_L4_TYPE_UDP 17
-#define ODP_PROTO_L4_TYPE_IPV6 41
-#define ODP_PROTO_L4_TYPE_GRE 47
-#define ODP_PROTO_L4_TYPE_ESP 50
-#define ODP_PROTO_L4_TYPE_AH 51
-#define ODP_PROTO_L4_TYPE_ICMPV6 58
-#define ODP_PROTO_L4_TYPE_NO_NEXT 59
-#define ODP_PROTO_L4_TYPE_IPCOMP 108
-#define ODP_PROTO_L4_TYPE_SCTP 132
-#define ODP_PROTO_L4_TYPE_ROHC 142
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-typedef enum {
- ODP_PACKET_CHKSUM_UNKNOWN = 0,
- ODP_PACKET_CHKSUM_BAD,
- ODP_PACKET_CHKSUM_OK
-} odp_packet_chksum_status_t;
-
-typedef struct odp_packet_parse_result_flag_t {
- union {
- uint64_t all;
-
- struct {
- uint64_t has_error : 1;
- uint64_t has_l2_error : 1;
- uint64_t has_l3_error : 1;
- uint64_t has_l4_error : 1;
- uint64_t has_l2 : 1;
- uint64_t has_l3 : 1;
- uint64_t has_l4 : 1;
- uint64_t has_eth : 1;
- uint64_t has_eth_bcast : 1;
- uint64_t has_eth_mcast : 1;
- uint64_t has_jumbo : 1;
- uint64_t has_vlan : 1;
- uint64_t has_vlan_qinq : 1;
- uint64_t has_arp : 1;
- uint64_t has_ipv4 : 1;
- uint64_t has_ipv6 : 1;
- uint64_t has_ip_bcast : 1;
- uint64_t has_ip_mcast : 1;
- uint64_t has_ipfrag : 1;
- uint64_t has_ipopt : 1;
- uint64_t has_ipsec : 1;
- uint64_t has_udp : 1;
- uint64_t has_tcp : 1;
- uint64_t has_sctp : 1;
- uint64_t has_icmp : 1;
- };
- };
-
-} odp_packet_parse_result_flag_t;
-
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/packet_vector_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..be2cb9df6
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_TYPES_H_
+#define ODP_API_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_packet
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_packet_t);
+
+#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef ODP_HANDLE_T(odp_packet_seg_t);
+
+#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_buf_t);
+
+#define ODP_PACKET_BUF_INVALID _odp_cast_scalar(odp_packet_buf_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
+
+#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef uint8_t odp_proto_l2_type_t;
+
+#define ODP_PROTO_L2_TYPE_NONE 0
+#define ODP_PROTO_L2_TYPE_ETH 1
+
+typedef uint8_t odp_proto_l3_type_t;
+
+#define ODP_PROTO_L3_TYPE_NONE 0
+#define ODP_PROTO_L3_TYPE_ARP 1
+#define ODP_PROTO_L3_TYPE_RARP 2
+#define ODP_PROTO_L3_TYPE_MPLS 3
+#define ODP_PROTO_L3_TYPE_IPV4 4
+#define ODP_PROTO_L3_TYPE_IPV6 6
+
+typedef uint8_t odp_proto_l4_type_t;
+
+/* Numbers from IANA Assigned Internet Protocol Numbers list */
+#define ODP_PROTO_L4_TYPE_NONE 0
+#define ODP_PROTO_L4_TYPE_ICMPV4 1
+#define ODP_PROTO_L4_TYPE_IGMP 2
+#define ODP_PROTO_L4_TYPE_IPV4 4
+#define ODP_PROTO_L4_TYPE_TCP 6
+#define ODP_PROTO_L4_TYPE_UDP 17
+#define ODP_PROTO_L4_TYPE_IPV6 41
+#define ODP_PROTO_L4_TYPE_GRE 47
+#define ODP_PROTO_L4_TYPE_ESP 50
+#define ODP_PROTO_L4_TYPE_AH 51
+#define ODP_PROTO_L4_TYPE_ICMPV6 58
+#define ODP_PROTO_L4_TYPE_NO_NEXT 59
+#define ODP_PROTO_L4_TYPE_IPCOMP 108
+#define ODP_PROTO_L4_TYPE_SCTP 132
+#define ODP_PROTO_L4_TYPE_ROHC 142
+
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+typedef struct odp_packet_parse_result_flag_t {
+ union {
+ uint64_t all;
+
+ struct {
+ uint64_t has_error : 1;
+ uint64_t has_l2_error : 1;
+ uint64_t has_l3_error : 1;
+ uint64_t has_l4_error : 1;
+ uint64_t has_l2 : 1;
+ uint64_t has_l3 : 1;
+ uint64_t has_l4 : 1;
+ uint64_t has_eth : 1;
+ uint64_t has_eth_bcast : 1;
+ uint64_t has_eth_mcast : 1;
+ uint64_t has_jumbo : 1;
+ uint64_t has_vlan : 1;
+ uint64_t has_vlan_qinq : 1;
+ uint64_t has_arp : 1;
+ uint64_t has_ipv4 : 1;
+ uint64_t has_ipv6 : 1;
+ uint64_t has_ip_bcast : 1;
+ uint64_t has_ip_mcast : 1;
+ uint64_t has_ipfrag : 1;
+ uint64_t has_ipopt : 1;
+ uint64_t has_ipsec : 1;
+ uint64_t has_udp : 1;
+ uint64_t has_tcp : 1;
+ uint64_t has_sctp : 1;
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..d81035df2
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_H_
+#define ODP_API_ABI_PROTO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Placeholder for inlined functions for non-ABI compat mode */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..2ebddce62
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats types
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_TYPES_H_
+#define ODP_API_ABI_PROTO_STATS_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_proto_stats
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_proto_stats_t);
+
+#define ODP_PROTO_STATS_INVALID _odp_cast_scalar(odp_proto_stats_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/queue.h b/platform/linux-generic/include-abi/odp/api/abi/queue.h
index 6ec922600..6c34123df 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/queue.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue.h
@@ -17,26 +17,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_queue
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_queue_t);
-
-#define ODP_QUEUE_INVALID _odp_cast_scalar(odp_queue_t, 0)
-
-#define ODP_QUEUE_NAME_LEN 32
-
/* Inlined functions for non-ABI compat mode */
#include <odp/api/plat/queue_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/queue_types.h b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..1a56c7682
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP queue
+ */
+
+#ifndef ODP_API_ABI_QUEUE_TYPES_H_
+#define ODP_API_ABI_QUEUE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_queue
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_queue_t);
+
+#define ODP_QUEUE_INVALID _odp_cast_scalar(odp_queue_t, 0)
+
+#define ODP_QUEUE_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/std_clib.h b/platform/linux-generic/include-abi/odp/api/abi/std.h
index d41dd1403..175b606c5 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/std_clib.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/std.h
@@ -10,15 +10,15 @@
* ODP barrier
*/
-#ifndef ODP_API_ABI_STD_CLIB_H_
-#define ODP_API_ABI_STD_CLIB_H_
+#ifndef ODP_API_ABI_STD_H_
+#define ODP_API_ABI_STD_H_
#ifdef __cplusplus
extern "C" {
#endif
#define _ODP_INLINE static inline
-#include <odp/api/plat/std_clib_inlines.h>
+#include <odp/api/plat/std_inlines.h>
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
index 6f32b46c6..6eb34a39b 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
@@ -13,7 +13,7 @@
#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_H_
#define _ODP_PLAT_PACKET_FLAG_INLINES_H_
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/hints.h>
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index 08048a993..8a0f54134 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -15,6 +15,7 @@
#define _ODP_PLAT_PACKET_INLINES_H_
#include <odp/api/abi/packet.h>
+#include <odp/api/packet_types.h>
#include <odp/api/pool.h>
#include <odp/api/abi/packet_io.h>
#include <odp/api/hints.h>
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
index e59c7f55a..5ce767a16 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
@@ -12,7 +12,7 @@ extern "C" {
#endif
#include <stdint.h>
-#include <odp/api/spec/queue_types.h>
+#include <odp/api/queue_types.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h b/platform/linux-generic/include/odp/api/plat/std_inlines.h
index 4265eaf1d..3f6a7e9d4 100644
--- a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/std_inlines.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_PLAT_STD_CLIB_INLINE_H_
-#define ODP_PLAT_STD_CLIB_INLINE_H_
+#ifndef ODP_PLAT_STD_INLINE_H_
+#define ODP_PLAT_STD_INLINE_H_
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index ebd0107f9..cc0e7f081 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -145,6 +145,10 @@ struct cos_s {
odp_queue_param_t queue_param;
char name[ODP_COS_NAME_LEN]; /* name */
uint8_t index;
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t packets;
+ } stats[CLS_COS_QUEUE_MAX];
};
typedef union cos_u {
@@ -230,6 +234,17 @@ typedef struct pmr_tbl {
pmr_t pmr[CLS_PMR_MAX_ENTRY];
} pmr_tbl_t;
+/**
+Classifier global data
+**/
+typedef struct cls_global_t {
+ cos_tbl_t cos_tbl;
+ pmr_tbl_t pmr_tbl;
+ _cls_queue_grp_tbl_t queue_grp_tbl;
+ odp_shm_t shm;
+
+} cls_global_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_classification_internal.h b/platform/linux-generic/include/odp_classification_internal.h
index 48ee0526e..bc0a12f8f 100644
--- a/platform/linux-generic/include/odp_classification_internal.h
+++ b/platform/linux-generic/include/odp_classification_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,14 +19,58 @@
extern "C" {
#endif
+#include <odp/api/atomic.h>
#include <odp/api/classification.h>
+#include <odp/api/hints.h>
#include <odp/api/queue.h>
#include <odp_packet_internal.h>
#include <odp/api/packet_io.h>
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
-cos_t *_odp_cos_entry_from_idx(uint32_t ndx);
+extern cls_global_t *_odp_cls_global;
+
+static inline cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
+{
+ return &_odp_cls_global->cos_tbl.cos_entry[ndx];
+}
+
+static inline int _odp_cos_queue_idx(const cos_t *cos, odp_queue_t queue)
+{
+ uint32_t i, tbl_idx;
+ int queue_idx = -1;
+
+ if (cos->s.num_queue == 1) {
+ if (odp_unlikely(cos->s.queue != queue))
+ return -1;
+ return 0;
+ }
+
+ tbl_idx = cos->s.index * CLS_COS_QUEUE_MAX;
+ for (i = 0; i < cos->s.num_queue; i++) {
+ if (_odp_cls_global->queue_grp_tbl.s.queue[tbl_idx + i] == queue) {
+ queue_idx = i;
+ break;
+ }
+ }
+ return queue_idx;
+}
+
+static inline void _odp_cos_queue_stats_add(cos_t *cos, odp_queue_t queue,
+ uint64_t packets, uint64_t discards)
+{
+ int queue_idx = _odp_cos_queue_idx(cos, queue);
+
+ if (odp_unlikely(queue_idx < 0)) {
+ ODP_ERR("Queue not attached to the CoS\n");
+ return;
+ }
+
+ if (packets)
+ odp_atomic_add_u64(&cos->s.stats[queue_idx].packets, packets);
+ if (discards)
+ odp_atomic_add_u64(&cos->s.stats[queue_idx].discards, discards);
+}
/** Classification Internal function **/
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 4af4bf062..497ea4aee 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -230,8 +230,8 @@ static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len)
* segment occupied by the allocated length.
*/
pkt_hdr->frame_len = len;
- pkt_hdr->headroom = CONFIG_PACKET_HEADROOM;
- pkt_hdr->tailroom = pool->seg_len - seg_len + CONFIG_PACKET_TAILROOM;
+ pkt_hdr->headroom = pool->headroom;
+ pkt_hdr->tailroom = pool->seg_len - seg_len + pool->tailroom;
if (odp_unlikely(pkt_hdr->subtype != ODP_EVENT_PACKET_BASIC))
pkt_hdr->subtype = ODP_EVENT_PACKET_BASIC;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index a0e4c5c65..dc4754710 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -1,5 +1,5 @@
-/* Copyright (c) 2019, Nokia
- * Copyright (c) 2013-2018, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -42,6 +42,9 @@ typedef struct ODP_ALIGNED_CACHE {
/* Ring data: buffer handles */
odp_buffer_hdr_t *buf_hdr[CONFIG_POOL_MAX_NUM + 1];
+ /* Index to pointer look-up table for external memory pool */
+ odp_buffer_hdr_t *buf_hdr_by_index[0];
+
} pool_ring_t;
/* Callback function for pool destroy */
@@ -49,11 +52,16 @@ typedef void (*pool_destroy_cb_fn)(void *pool);
typedef struct pool_t {
odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+ odp_pool_t pool_hdl;
+ uint32_t pool_idx;
+ uint8_t reserved;
+ /* Everything under this mark are memset() to zero on pool create */
+ uint8_t memset_mark;
+ uint8_t type;
+ uint8_t pool_ext;
char name[ODP_POOL_NAME_LEN];
odp_pool_param_t params;
- odp_pool_t pool_hdl;
- uint32_t pool_idx;
uint32_t ring_mask;
uint32_t cache_size;
uint32_t burst_size;
@@ -61,7 +69,6 @@ typedef struct pool_t {
odp_shm_t uarea_shm;
uint64_t shm_size;
uint64_t uarea_shm_size;
- int reserved;
uint32_t num;
uint32_t align;
uint32_t headroom;
@@ -69,12 +76,15 @@ typedef struct pool_t {
uint32_t seg_len;
uint32_t max_seg_len;
uint32_t max_len;
+ uint32_t param_uarea_size;
uint32_t uarea_size;
uint32_t block_size;
uint32_t block_offset;
+ uint32_t num_populated;
uint8_t *base_addr;
uint8_t *max_addr;
uint8_t *uarea_base_addr;
+ odp_pool_ext_param_t ext_param;
/* Used by DPDK zero-copy pktio */
uint32_t dpdk_elt_size;
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h
index 8a65a1685..a54847319 100644
--- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
+++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
@@ -284,8 +284,15 @@ struct tm_queue_obj_s {
uint8_t tm_idx;
uint8_t delayed_cnt;
uint8_t blocked_cnt;
+ odp_bool_t ordered_enqueue;
tm_status_t status;
odp_queue_t queue;
+ /* Statistics for odp_tm_queue_stats_t */
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t errors;
+ odp_atomic_u64_t packets;
+ } stats;
};
struct tm_node_obj_s {
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
index ecfb28b7f..ccbf1d6f5 100644
--- a/platform/linux-generic/m4/odp_libconfig.m4
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [16])
+m4_define([_odp_config_version_minor], [18])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index bc31d01eb..3b50232d6 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -42,15 +42,7 @@ static cos_tbl_t *cos_tbl;
static pmr_tbl_t *pmr_tbl;
static _cls_queue_grp_tbl_t *queue_grp_tbl;
-typedef struct cls_global_t {
- cos_tbl_t cos_tbl;
- pmr_tbl_t pmr_tbl;
- _cls_queue_grp_tbl_t queue_grp_tbl;
- odp_shm_t shm;
-
-} cls_global_t;
-
-static cls_global_t *cls_global;
+cls_global_t *_odp_cls_global;
static const rss_key default_rss = {
.u8 = {
@@ -62,11 +54,6 @@ static const rss_key default_rss = {
}
};
-cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
-{
- return &cos_tbl->cos_entry[ndx];
-}
-
static inline uint32_t _odp_cos_to_ndx(odp_cos_t cos)
{
return _odp_typeval(cos) - 1;
@@ -109,13 +96,13 @@ int _odp_classification_init_global(void)
if (shm == ODP_SHM_INVALID)
return -1;
- cls_global = odp_shm_addr(shm);
- memset(cls_global, 0, sizeof(cls_global_t));
+ _odp_cls_global = odp_shm_addr(shm);
+ memset(_odp_cls_global, 0, sizeof(cls_global_t));
- cls_global->shm = shm;
- cos_tbl = &cls_global->cos_tbl;
- pmr_tbl = &cls_global->pmr_tbl;
- queue_grp_tbl = &cls_global->queue_grp_tbl;
+ _odp_cls_global->shm = shm;
+ cos_tbl = &_odp_cls_global->cos_tbl;
+ pmr_tbl = &_odp_cls_global->pmr_tbl;
+ queue_grp_tbl = &_odp_cls_global->queue_grp_tbl;
for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
/* init locks */
@@ -136,7 +123,7 @@ int _odp_classification_init_global(void)
int _odp_classification_term_global(void)
{
- if (cls_global && odp_shm_free(cls_global->shm)) {
+ if (_odp_cls_global && odp_shm_free(_odp_cls_global->shm)) {
ODP_ERR("shm free failed\n");
return -1;
}
@@ -163,7 +150,9 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param)
int odp_cls_capability(odp_cls_capability_t *capability)
{
- unsigned count = 0;
+ unsigned int count = 0;
+
+ memset(capability, 0, sizeof(odp_cls_capability_t));
for (int i = 0; i < CLS_PMR_MAX_ENTRY; i++)
if (!pmr_tbl->pmr[i].s.valid)
@@ -197,6 +186,9 @@ int odp_cls_capability(odp_cls_capability_t *capability)
capability->threshold_bp.all_bits = 0;
capability->max_hash_queues = CLS_COS_QUEUE_MAX;
capability->max_mark = MAX_MARK;
+ capability->stats.queue.counter.discards = 1;
+ capability->stats.queue.counter.packets = 1;
+
return 0;
}
@@ -306,6 +298,11 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param)
} else {
cos->s.queue = param->queue;
}
+ /* Initialize statistics counters */
+ for (j = 0; j < cos->s.num_queue; j++) {
+ odp_atomic_init_u64(&cos->s.stats[j].discards, 0);
+ odp_atomic_init_u64(&cos->s.stats[j].packets, 0);
+ }
cos->s.pool = param->pool;
cos->s.headroom = 0;
@@ -1783,6 +1780,35 @@ uint64_t odp_pmr_to_u64(odp_pmr_t hdl)
return _odp_pri(hdl);
}
+int odp_cls_queue_stats(odp_cos_t hdl, odp_queue_t queue,
+ odp_cls_queue_stats_t *stats)
+{
+ cos_t *cos = get_cos_entry(hdl);
+ int queue_idx;
+
+ if (odp_unlikely(cos == NULL)) {
+ ODP_ERR("Invalid odp_cos_t handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(stats == NULL)) {
+ ODP_ERR("Output structure NULL\n");
+ return -1;
+ }
+
+ queue_idx = _odp_cos_queue_idx(cos, queue);
+ if (odp_unlikely(queue_idx < 0)) {
+ ODP_ERR("Invalid odp_queue_t handle\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(odp_cls_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&cos->s.stats[queue_idx].discards);
+ stats->packets = odp_atomic_load_u64(&cos->s.stats[queue_idx].packets);
+
+ return 0;
+}
+
static
void print_cos_ident(struct cos_s *cos)
{
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 15590523c..137e7b435 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -244,8 +244,10 @@ int odp_ipsec_auth_capability(odp_auth_alg_t auth,
continue;
}
- if (out < num)
+ if (out < num) {
capa[out].key_len = crypto_capa[i].key_len;
+ capa[out].icv_len = crypto_capa[i].digest_len;
+ }
out++;
}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 407192dcf..0eea57a10 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -430,6 +430,8 @@ uint32_t _odp_ipsec_auth_digest_len(odp_auth_alg_t auth)
return 16;
case ODP_AUTH_ALG_AES_CCM:
return 16;
+ case ODP_AUTH_ALG_AES_CMAC:
+ return 12;
case ODP_AUTH_ALG_CHACHA20_POLY1305:
return 16;
default:
@@ -629,6 +631,10 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
crypto_param.auth_digest_len =
_odp_ipsec_auth_digest_len(crypto_param.auth_alg);
+ if (param->crypto.icv_len != 0 &&
+ param->crypto.icv_len != crypto_param.auth_digest_len)
+ goto error;
+
if ((uint32_t)-1 == crypto_param.cipher_iv.length ||
(uint32_t)-1 == crypto_param.auth_digest_len)
goto error;
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index fdf711735..0986056e6 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -18,6 +18,7 @@
#include <odp/api/plat/byteorder_inlines.h>
#include <odp/api/packet_io.h>
#include <odp/api/plat/pktio_inlines.h>
+#include <odp/api/proto_stats.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -134,10 +135,11 @@ static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr)
static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_seg)
{
odp_buffer_hdr_t *hdr = &pkt_seg->buf_hdr;
+ pool_t *pool = hdr->pool_ptr;
uint8_t *base = hdr->base_data;
uint8_t *head = pkt_seg->seg_data;
- return CONFIG_PACKET_HEADROOM + (head - base);
+ return pool->headroom + (head - base);
}
static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_seg)
@@ -690,7 +692,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
odp_packet_t pkt;
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return ODP_PACKET_INVALID;
}
@@ -713,7 +715,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
pool_t *pool = pool_entry_from_hdl(pool_hdl);
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return -1;
}
@@ -1747,8 +1749,8 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
odp_packet_hdr_t *dsthdr = packet_hdr(dstpkt);
pool_t *src_pool = srchdr->buf_hdr.pool_ptr;
pool_t *dst_pool = dsthdr->buf_hdr.pool_ptr;
- uint32_t src_uarea_size = src_pool->params.pkt.uarea_size;
- uint32_t dst_uarea_size = dst_pool->params.pkt.uarea_size;
+ uint32_t src_uarea_size = src_pool->param_uarea_size;
+ uint32_t dst_uarea_size = dst_pool->param_uarea_size;
dsthdr->input = srchdr->input;
dsthdr->dst_queue = srchdr->dst_queue;
@@ -2949,3 +2951,172 @@ odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
(void)res;
return -ENOTSUP;
}
+
+static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
+{
+ return (odp_packet_hdr_t *)(uintptr_t)pkt_buf;
+}
+
+void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return NULL;
+ }
+
+ return (uint8_t *)pkt_hdr + head_offset;
+}
+
+uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ return pool->ext_param.pkt.buf_size - head_offset;
+}
+
+uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+
+ return (uintptr_t)pkt_hdr->seg_data - (uintptr_t)odp_packet_buf_head(pkt_buf);
+}
+
+uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+
+ return pkt_hdr->seg_len;
+}
+
+void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset, uint32_t data_len)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ uint8_t *head = odp_packet_buf_head(pkt_buf);
+
+ pkt_hdr->seg_len = data_len;
+ pkt_hdr->seg_data = head + data_offset;
+}
+
+odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl, void *head)
+{
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_BUF_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_BUF_INVALID;
+ }
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
+
+uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i;
+ odp_packet_seg_t seg;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t num_segs = odp_packet_num_segs(pkt);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(num < num_segs)) {
+ ODP_ERR("Not enough buffer handles %u. Packet has %u segments.\n", num, num_segs);
+ return 0;
+ }
+
+ seg = odp_packet_first_seg(pkt);
+
+ for (i = 0; i < num_segs; i++) {
+ pkt_buf[i] = (odp_packet_buf_t)(uintptr_t)packet_seg_to_hdr(seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ return num_segs;
+}
+
+odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i, data_len, tailroom;
+ odp_packet_hdr_t *cur_seg, *next_seg;
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[0];
+ uint32_t headroom = odp_packet_buf_data_offset(pkt_buf[0]);
+
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(num == 0)) {
+ ODP_ERR("Bad number of buffers: %u\n", num);
+ return ODP_PACKET_INVALID;
+ }
+
+ cur_seg = pkt_hdr;
+ data_len = 0;
+
+ for (i = 0; i < num; i++) {
+ next_seg = NULL;
+ if (i < num - 1)
+ next_seg = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[i + 1];
+
+ data_len += cur_seg->seg_len;
+ cur_seg->seg_next = next_seg;
+ cur_seg = next_seg;
+ }
+
+ tailroom = pool->ext_param.pkt.buf_size - sizeof(odp_packet_hdr_t);
+ tailroom -= pool->ext_param.pkt.app_header_size;
+ tailroom -= odp_packet_buf_data_len(pkt_buf[num - 1]);
+
+ pkt_hdr->seg_count = num;
+ pkt_hdr->frame_len = data_len;
+ pkt_hdr->headroom = headroom;
+ pkt_hdr->tailroom = tailroom;
+
+ /* Reset metadata */
+ pkt_hdr->subtype = ODP_EVENT_PACKET_BASIC;
+ pkt_hdr->input = ODP_PKTIO_INVALID;
+ packet_parse_reset(pkt_hdr, 1);
+
+ return packet_handle(pkt_hdr);
+}
+
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt)
+{
+ (void)pkt;
+ (void)opt;
+}
+
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt)
+{
+ (void)pkt;
+
+ return ODP_PROTO_STATS_INVALID;
+}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 39cbd72f6..bd8bb58e8 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -30,6 +30,7 @@
#include <odp/api/plat/queue_inlines.h>
#include <odp_libconfig_internal.h>
#include <odp_event_vector_internal.h>
+#include <odp/api/proto_stats.h>
#include <string.h>
#include <inttypes.h>
@@ -727,7 +728,7 @@ odp_pktio_t odp_pktio_lookup(const char *name)
}
static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
- uint32_t num, const cos_t *cos_hdr)
+ uint32_t num, cos_t *cos_hdr)
{
odp_packet_vector_t pktv;
odp_pool_t pool = cos_hdr->s.vector.pool;
@@ -748,6 +749,7 @@ static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
}
if (odp_unlikely(i == 0)) {
odp_event_free_multi(events, num);
+ _odp_cos_queue_stats_add(cos_hdr, queue, 0, num);
return;
}
num_pktv = i;
@@ -768,9 +770,15 @@ static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
}
ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
- if (odp_unlikely(ret != num_pktv)) {
+ if (odp_likely(ret == num_pktv)) {
+ _odp_cos_queue_stats_add(cos_hdr, queue, num_enq, num - num_enq);
+ } else {
+ uint32_t enqueued;
+
if (ret < 0)
ret = 0;
+ enqueued = max_size * ret;
+ _odp_cos_queue_stats_add(cos_hdr, queue, enqueued, num - enqueued);
odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
}
}
@@ -900,6 +908,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
}
for (i = 0; i < num_dst; i++) {
+ cos_t *cos_hdr = NULL;
int num_enq, ret;
int idx = dst_idx[i];
@@ -910,7 +919,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
if (cos[i] != CLS_COS_IDX_NONE) {
/* Packets from classifier */
- cos_t *cos_hdr = _odp_cos_entry_from_idx(cos[i]);
+ cos_hdr = _odp_cos_entry_from_idx(cos[i]);
if (cos_hdr->s.vector.enable) {
packet_vector_enq_cos(dst[i], &ev[idx], num_enq, cos_hdr);
@@ -929,8 +938,11 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
if (ret < num_enq)
odp_event_free_multi(&ev[idx + ret], num_enq - ret);
- }
+ /* Update CoS statistics */
+ if (cos[i] != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, dst[i], ret, num_enq - ret);
+ }
return num_rx;
}
@@ -1136,6 +1148,7 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
odp_queue_t queue;
odp_bool_t vector_enabled = entry->s.in_queue[rx_queue].vector.enable;
uint32_t num = QUEUE_MULTI_MAX;
+ cos_t *cos_hdr = NULL;
if (odp_unlikely(entry->s.state != PKTIO_STATE_STARTED)) {
if (entry->s.state < PKTIO_STATE_ACTIVE ||
@@ -1162,12 +1175,13 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
pkt_hdr = packet_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
odp_event_t event = odp_packet_to_event(pkt);
+ uint16_t cos_idx = pkt_hdr->cos;
queue = pkt_hdr->dst_queue;
- if (pkt_hdr->cos != CLS_COS_IDX_NONE) {
+ if (cos_idx != CLS_COS_IDX_NONE) {
/* Packets from classifier */
- cos_t *cos_hdr = _odp_cos_entry_from_idx(pkt_hdr->cos);
+ cos_hdr = _odp_cos_entry_from_idx(cos_idx);
if (cos_hdr->s.vector.enable) {
packet_vector_enq_cos(queue, &event, 1, cos_hdr);
@@ -1182,7 +1196,13 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
if (odp_unlikely(odp_queue_enq(queue, event))) {
/* Queue full? */
odp_packet_free(pkt);
- odp_atomic_inc_u64(&entry->s.stats_extra.in_discards);
+ if (cos_idx != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, queue, 0, 1);
+ else
+ odp_atomic_inc_u64(&entry->s.stats_extra.in_discards);
+ } else {
+ if (cos_idx != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, queue, 1, 0);
}
} else {
evt_tbl[num_rx++] = odp_packet_to_event(pkt);
@@ -3328,3 +3348,64 @@ int odp_pktout_send_lso(odp_pktout_queue_t queue, const odp_packet_t packet[], i
return i;
}
+
+void
+odp_proto_stats_param_init(odp_proto_stats_param_t *param)
+{
+ if (param)
+ memset(param, 0, sizeof(*param));
+}
+
+int
+odp_proto_stats_capability(odp_pktio_t pktio, odp_proto_stats_capability_t *capa)
+{
+ (void)pktio;
+
+ if (capa == NULL)
+ return -EINVAL;
+
+ memset(capa, 0, sizeof(*capa));
+
+ return 0;
+}
+
+odp_proto_stats_t
+odp_proto_stats_lookup(const char *name)
+{
+ (void)name;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+odp_proto_stats_t
+odp_proto_stats_create(const char *name, const odp_proto_stats_param_t *param)
+{
+ (void)name;
+ (void)param;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+int
+odp_proto_stats_destroy(odp_proto_stats_t stat)
+{
+ (void)stat;
+
+ return 0;
+}
+
+int
+odp_proto_stats(odp_proto_stats_t stat, odp_proto_stats_data_t *data)
+{
+ (void)stat;
+
+ memset(data, 0, sizeof(odp_proto_stats_data_t));
+
+ return 0;
+}
+
+void
+odp_proto_stats_print(odp_proto_stats_t stat)
+{
+ (void)stat;
+}
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
index 98f373814..d97bb96a1 100644
--- a/platform/linux-generic/odp_packet_vector.c
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -39,7 +39,7 @@ odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool)
{
odp_buffer_t buf;
- ODP_ASSERT(pool_entry_from_hdl(pool)->params.type == ODP_POOL_VECTOR);
+ ODP_ASSERT(pool_entry_from_hdl(pool)->type == ODP_POOL_VECTOR);
buf = odp_buffer_alloc(pool);
if (odp_unlikely(buf == ODP_BUFFER_INVALID))
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 07da3d9cc..d1fc94369 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -27,6 +27,7 @@
#include <string.h>
#include <stdio.h>
+#include <stddef.h>
#include <inttypes.h>
#include <odp/api/plat/pool_inline_types.h>
@@ -66,7 +67,7 @@ static __thread pool_local_t local;
/* Fill in pool header field offsets for inline functions */
const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
.pool_hdl = offsetof(pool_t, pool_hdl),
- .uarea_size = offsetof(pool_t, params.pkt.uarea_size)
+ .uarea_size = offsetof(pool_t, param_uarea_size)
};
#include <odp/visibility_end.h>
@@ -369,9 +370,11 @@ int _odp_pool_term_local(void)
return 0;
}
-static pool_t *reserve_pool(uint32_t shmflags)
+static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
{
int i;
+ odp_shm_t shm;
+ uint32_t mem_size;
pool_t *pool;
char ring_name[ODP_POOL_NAME_LEN];
@@ -382,19 +385,30 @@ static pool_t *reserve_pool(uint32_t shmflags)
if (pool->reserved == 0) {
pool->reserved = 1;
UNLOCK(&pool->lock);
+
+ memset(&pool->memset_mark, 0,
+ sizeof(pool_t) - offsetof(pool_t, memset_mark));
sprintf(ring_name, "_odp_pool_ring_%d", i);
- pool->ring_shm =
- odp_shm_reserve(ring_name,
- sizeof(pool_ring_t),
- ODP_CACHE_LINE_SIZE, shmflags);
- if (odp_unlikely(pool->ring_shm == ODP_SHM_INVALID)) {
+
+ /* Reserve memory for the ring, and for lookup table in case of pool ext */
+ mem_size = sizeof(pool_ring_t);
+ if (pool_ext)
+ mem_size += num * sizeof(odp_buffer_hdr_t *);
+
+ shm = odp_shm_reserve(ring_name, mem_size, ODP_CACHE_LINE_SIZE, shmflags);
+
+ if (odp_unlikely(shm == ODP_SHM_INVALID)) {
ODP_ERR("Unable to alloc pool ring %d\n", i);
LOCK(&pool->lock);
pool->reserved = 0;
UNLOCK(&pool->lock);
break;
}
- pool->ring = odp_shm_addr(pool->ring_shm);
+
+ pool->ring_shm = shm;
+ pool->ring = odp_shm_addr(shm);
+ pool->pool_ext = pool_ext;
+
return pool;
}
UNLOCK(&pool->lock);
@@ -403,20 +417,59 @@ static pool_t *reserve_pool(uint32_t shmflags)
return NULL;
}
+static void init_buffer_hdr(pool_t *pool, odp_buffer_hdr_t *buf_hdr, uint32_t buf_index,
+ uint32_t hdr_len, uint8_t *data_ptr, void *uarea)
+{
+ odp_pool_type_t type = pool->type;
+
+ memset(buf_hdr, 0, hdr_len);
+
+ /* Initialize buffer metadata */
+ buf_hdr->index.u32 = 0;
+ buf_hdr->index.pool = pool->pool_idx;
+ buf_hdr->index.buffer = buf_index;
+ buf_hdr->type = type;
+ buf_hdr->event_type = type;
+ buf_hdr->pool_ptr = pool;
+ buf_hdr->uarea_addr = uarea;
+ odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
+
+ /* Store base values for fast init */
+ buf_hdr->base_data = data_ptr;
+ buf_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+
+ /* Initialize segmentation metadata */
+ if (type == ODP_POOL_PACKET) {
+ odp_packet_hdr_t *pkt_hdr = (void *)buf_hdr;
+
+ pkt_hdr->seg_data = data_ptr;
+ pkt_hdr->seg_len = pool->seg_len;
+ pkt_hdr->seg_count = 1;
+ pkt_hdr->seg_next = NULL;
+ }
+
+ /* Initialize event vector metadata */
+ if (type == ODP_POOL_VECTOR) {
+ odp_event_vector_hdr_t *vect_hdr = (void *)buf_hdr;
+
+ vect_hdr->size = 0;
+ buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
+ }
+}
+
static void init_buffers(pool_t *pool)
{
uint64_t i;
odp_buffer_hdr_t *buf_hdr;
odp_packet_hdr_t *pkt_hdr;
- odp_event_vector_hdr_t *vect_hdr;
odp_shm_info_t shm_info;
void *addr;
void *uarea = NULL;
uint8_t *data;
- uint32_t offset;
+ uint32_t offset, hdr_len;
ring_ptr_t *ring;
uint32_t mask;
- int type;
+ odp_pool_type_t type;
uint64_t page_size;
int skipped_blocks = 0;
@@ -426,7 +479,7 @@ static void init_buffers(pool_t *pool)
page_size = shm_info.page_size;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- type = pool->params.type;
+ type = pool->type;
for (i = 0; i < pool->num + skipped_blocks ; i++) {
int skip = 0;
@@ -435,11 +488,10 @@ static void init_buffers(pool_t *pool)
pool->block_offset];
buf_hdr = addr;
pkt_hdr = addr;
- vect_hdr = addr;
+
/* Skip packet buffers which cross huge page boundaries. Some
* NICs cannot handle buffers which cross page boundaries. */
- if (pool->params.type == ODP_POOL_PACKET &&
- page_size >= FIRST_HP_SIZE) {
+ if (type == ODP_POOL_PACKET && page_size >= FIRST_HP_SIZE) {
uint64_t first_page;
uint64_t last_page;
@@ -467,37 +519,8 @@ static void init_buffers(pool_t *pool)
while (((uintptr_t)&data[offset]) % pool->align != 0)
offset++;
- memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
-
- /* Initialize buffer metadata */
- buf_hdr->index.u32 = 0;
- buf_hdr->index.pool = pool->pool_idx;
- buf_hdr->index.buffer = i;
- buf_hdr->type = type;
- buf_hdr->event_type = type;
- if (type == ODP_POOL_VECTOR)
- buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
- buf_hdr->pool_ptr = pool;
- buf_hdr->uarea_addr = uarea;
-
- /* Initialize segmentation metadata */
- if (type == ODP_POOL_PACKET) {
- pkt_hdr->seg_data = &data[offset];
- pkt_hdr->seg_len = pool->seg_len;
- pkt_hdr->seg_count = 1;
- pkt_hdr->seg_next = NULL;
- }
-
- odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
-
- /* Initialize event vector metadata */
- if (type == ODP_POOL_VECTOR)
- vect_hdr->size = 0;
-
- /* Store base values for fast init */
- buf_hdr->base_data = &data[offset];
- buf_hdr->buf_end = &data[offset + pool->seg_len +
- pool->tailroom];
+ hdr_len = (uintptr_t)data - (uintptr_t)buf_hdr;
+ init_buffer_hdr(pool, buf_hdr, i, hdr_len, &data[offset], uarea);
/* Store buffer into the global pool */
if (!skip)
@@ -522,6 +545,67 @@ static bool shm_is_from_huge_pages(odp_shm_t shm)
return (info.page_size >= huge_page_size);
}
+static void set_pool_name(pool_t *pool, const char *name)
+{
+ if (name == NULL) {
+ pool->name[0] = 0;
+ } else {
+ strncpy(pool->name, name, ODP_POOL_NAME_LEN - 1);
+ pool->name[ODP_POOL_NAME_LEN - 1] = 0;
+ }
+}
+
+static void set_pool_cache_size(pool_t *pool, uint32_t cache_size)
+{
+ uint32_t burst_size;
+
+ pool->cache_size = 0;
+ pool->burst_size = 1;
+
+ if (cache_size > 1) {
+ cache_size = (cache_size / 2) * 2;
+ burst_size = _odp_pool_glb->config.burst_size;
+
+ if ((cache_size / burst_size) < 2)
+ burst_size = cache_size / 2;
+
+ pool->cache_size = cache_size;
+ pool->burst_size = burst_size;
+ }
+}
+
+static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, uint32_t shmflags)
+{
+ odp_shm_t shm;
+ const char *max_prefix = "pool_000_uarea_";
+ int max_prefix_len = strlen(max_prefix);
+ char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
+
+ pool->uarea_shm = ODP_SHM_INVALID;
+
+ if (uarea_size == 0) {
+ pool->param_uarea_size = 0;
+ pool->uarea_size = 0;
+ pool->uarea_shm_size = 0;
+ return 0;
+ }
+
+ sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
+
+ pool->param_uarea_size = uarea_size;
+ pool->uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
+
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, shmflags);
+
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ pool->uarea_shm = shm;
+ pool->uarea_base_addr = odp_shm_addr(shm);
+ return 0;
+}
+
static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t shmflags)
{
@@ -529,17 +613,17 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
uint32_t seg_len, align, num, hdr_size, block_size;
- uint32_t max_len, cache_size, burst_size;
+ uint32_t max_len, cache_size;
uint32_t ring_size;
+ odp_pool_type_t type = params->type;
uint32_t num_extra = 0;
- const char *max_prefix = "pool_000_uarea_";
+ const char *max_prefix = "pool_000_";
int max_prefix_len = strlen(max_prefix);
char shm_name[ODP_POOL_NAME_LEN + max_prefix_len];
- char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
align = 0;
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
uint32_t align_req = params->pkt.align;
if (align_req &&
@@ -551,7 +635,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
align = _odp_pool_glb->config.pkt_base_align;
} else {
- if (params->type == ODP_POOL_BUFFER)
+ if (type == ODP_POOL_BUFFER)
align = params->buf.align;
if (align < _odp_pool_glb->config.buf_min_align)
@@ -572,7 +656,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uarea_size = 0;
cache_size = 0;
- switch (params->type) {
+ switch (type) {
case ODP_POOL_BUFFER:
num = params->buf.num;
seg_len = params->buf.size;
@@ -630,32 +714,23 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
return ODP_POOL_INVALID;
}
- if (uarea_size)
- uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
-
- pool = reserve_pool(shmflags);
+ pool = reserve_pool(shmflags, 0, num);
if (pool == NULL) {
ODP_ERR("No more free pools\n");
return ODP_POOL_INVALID;
}
- if (name == NULL) {
- pool->name[0] = 0;
- } else {
- strncpy(pool->name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->name[ODP_POOL_NAME_LEN - 1] = 0;
- }
+ set_pool_name(pool, name);
/* Format SHM names from prefix, pool index and pool name. */
sprintf(shm_name, "pool_%03i_%s", pool->pool_idx, pool->name);
- sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
+ pool->type = type;
pool->params = *params;
pool->block_offset = 0;
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
uint32_t dpdk_obj_size;
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
@@ -681,9 +756,9 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t align_pad = (align > ODP_CACHE_LINE_SIZE) ?
align - ODP_CACHE_LINE_SIZE : 0;
- if (params->type == ODP_POOL_BUFFER)
+ if (type == ODP_POOL_BUFFER)
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
- else if (params->type == ODP_POOL_TIMEOUT)
+ else if (type == ODP_POOL_TIMEOUT)
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
else
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
@@ -693,7 +768,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
/* Allocate extra memory for skipping packet buffers which cross huge
* page boundaries. */
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
num_extra = ((((uint64_t)num * block_size) +
FIRST_HP_SIZE - 1) / FIRST_HP_SIZE);
num_extra += ((((uint64_t)num_extra * block_size) +
@@ -715,25 +790,11 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
pool->max_len = max_len;
pool->tailroom = tailroom;
pool->block_size = block_size;
- pool->uarea_size = uarea_size;
pool->shm_size = (num + num_extra) * (uint64_t)block_size;
- pool->uarea_shm_size = num * (uint64_t)uarea_size;
pool->ext_desc = NULL;
pool->ext_destroy = NULL;
- pool->cache_size = 0;
- pool->burst_size = 1;
-
- if (cache_size > 1) {
- cache_size = (cache_size / 2) * 2;
- burst_size = _odp_pool_glb->config.burst_size;
-
- if ((cache_size / burst_size) < 2)
- burst_size = cache_size / 2;
-
- pool->cache_size = cache_size;
- pool->burst_size = burst_size;
- }
+ set_pool_cache_size(pool, cache_size);
shm = odp_shm_reserve(shm_name, pool->shm_size, ODP_PAGE_SIZE,
shmflags);
@@ -750,26 +811,16 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
pool->base_addr = odp_shm_addr(pool->shm);
pool->max_addr = pool->base_addr + pool->shm_size - 1;
- pool->uarea_shm = ODP_SHM_INVALID;
- if (uarea_size) {
- shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size,
- ODP_PAGE_SIZE, shmflags);
-
- pool->uarea_shm = shm;
-
- if (shm == ODP_SHM_INVALID) {
- ODP_ERR("SHM reserve failed (uarea)\n");
- goto error;
- }
-
- pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
+ if (reserve_uarea(pool, uarea_size, num, shmflags)) {
+ ODP_ERR("User area SHM reserve failed\n");
+ goto error;
}
ring_ptr_init(&pool->ring->hdr);
init_buffers(pool);
/* Create zero-copy DPDK memory pool. NOP if zero-copy is disabled. */
- if (params->type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
+ if (type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
ODP_ERR("Creating DPDK packet pool failed\n");
goto error;
}
@@ -994,7 +1045,8 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
cache_flush(&pool->local_cache[i], pool);
- odp_shm_free(pool->shm);
+ if (pool->pool_ext == 0)
+ odp_shm_free(pool->shm);
if (pool->uarea_shm != ODP_SHM_INVALID)
odp_shm_free(pool->uarea_shm);
@@ -1044,10 +1096,18 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
if (pool == NULL || info == NULL)
return -1;
+ memset(info, 0, sizeof(odp_pool_info_t));
+
info->name = pool->name;
- info->params = pool->params;
- if (pool->params.type == ODP_POOL_PACKET)
+ if (pool->pool_ext) {
+ info->pool_ext = 1;
+ info->pool_ext_param = pool->ext_param;
+ } else {
+ info->params = pool->params;
+ }
+
+ if (pool->type == ODP_POOL_PACKET)
info->pkt.max_num = pool->num;
info->min_data_addr = (uintptr_t)pool->base_addr;
@@ -1314,10 +1374,10 @@ void odp_pool_print(odp_pool_t pool_hdl)
odp_pool_to_u64(pool->pool_hdl));
ODP_PRINT(" name %s\n", pool->name);
ODP_PRINT(" pool type %s\n",
- pool->params.type == ODP_POOL_BUFFER ? "buffer" :
- (pool->params.type == ODP_POOL_PACKET ? "packet" :
- (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" :
- (pool->params.type == ODP_POOL_VECTOR ? "vector" :
+ pool->type == ODP_POOL_BUFFER ? "buffer" :
+ (pool->type == ODP_POOL_PACKET ? "packet" :
+ (pool->type == ODP_POOL_TIMEOUT ? "timeout" :
+ (pool->type == ODP_POOL_VECTOR ? "vector" :
"unknown"))));
ODP_PRINT(" pool shm %" PRIu64 "\n",
odp_shm_to_u64(pool->shm));
@@ -1492,3 +1552,250 @@ int odp_buffer_is_valid(odp_buffer_t buf)
return 1;
}
+
+/* No actual head pointer alignment requirement. Anyway, require even byte address. */
+#define MIN_HEAD_ALIGN 2
+
+int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *capa)
+{
+ odp_pool_stats_opt_t supported_stats;
+
+ if (type != ODP_POOL_PACKET)
+ return -1;
+
+ supported_stats.all = 0;
+
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ capa->type = type;
+ capa->max_pools = ODP_CONFIG_POOLS - 1;
+ capa->min_cache_size = 0;
+ capa->max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->stats.all = supported_stats.all;
+
+ capa->pkt.max_num_buf = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.max_buf_size = MAX_SIZE;
+ capa->pkt.odp_header_size = sizeof(odp_packet_hdr_t);
+ capa->pkt.odp_trailer_size = 0;
+ capa->pkt.min_mem_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_buf_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_head_align = MIN_HEAD_ALIGN;
+ capa->pkt.buf_size_aligned = 0;
+ capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_headroom_size = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
+ capa->pkt.max_uarea_size = MAX_SIZE;
+
+ return 0;
+}
+
+void odp_pool_ext_param_init(odp_pool_type_t type, odp_pool_ext_param_t *param)
+{
+ uint32_t default_cache_size = _odp_pool_glb->config.local_cache_size;
+
+ memset(param, 0, sizeof(odp_pool_ext_param_t));
+
+ if (type != ODP_POOL_PACKET)
+ return;
+
+ param->type = ODP_POOL_PACKET;
+ param->cache_size = default_cache_size;
+ param->pkt.headroom = CONFIG_PACKET_HEADROOM;
+}
+
+static int check_pool_ext_param(const odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+
+ if (param->type != ODP_POOL_PACKET) {
+ ODP_ERR("Pool type not supported\n");
+ return -1;
+ }
+
+ if (odp_pool_ext_capability(param->type, &capa)) {
+ ODP_ERR("Capa failed\n");
+ return -1;
+ }
+
+ if (param->cache_size > capa.max_cache_size) {
+ ODP_ERR("Too large cache size %u\n", param->cache_size);
+ return -1;
+ }
+
+ if (param->stats.all != capa.stats.all) {
+ ODP_ERR("Pool statistics not supported\n");
+ return -1;
+ }
+
+ if (param->pkt.num_buf > capa.pkt.max_num_buf) {
+ ODP_ERR("Too many packet buffers\n");
+ return -1;
+ }
+
+ if (param->pkt.buf_size > capa.pkt.max_buf_size) {
+ ODP_ERR("Too large packet buffer size %u\n", param->pkt.buf_size);
+ return -1;
+ }
+
+ if (param->pkt.uarea_size > capa.pkt.max_uarea_size) {
+ ODP_ERR("Too large user area size %u\n", param->pkt.uarea_size);
+ return -1;
+ }
+
+ if (param->pkt.headroom > capa.pkt.max_headroom) {
+ ODP_ERR("Too large headroom size\n");
+ return -1;
+ }
+
+ if (head_offset % capa.pkt.min_head_align) {
+ ODP_ERR("Head pointer not %u byte aligned\n", capa.pkt.min_head_align);
+ return -1;
+ }
+
+ return 0;
+}
+
+odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *param)
+{
+ pool_t *pool;
+ uint32_t ring_size;
+ uint32_t num_buf = param->pkt.num_buf;
+ uint32_t buf_size = param->pkt.buf_size;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+ uint32_t headroom = param->pkt.headroom;
+ uint32_t shm_flags = 0;
+
+ if (check_pool_ext_param(param)) {
+ ODP_ERR("Bad pool ext param\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (odp_global_ro.shm_single_va)
+ shm_flags |= ODP_SHM_SINGLE_VA;
+
+ pool = reserve_pool(shm_flags, 1, num_buf);
+
+ if (pool == NULL) {
+ ODP_ERR("No more free pools\n");
+ return ODP_POOL_INVALID;
+ }
+
+ pool->ext_param = *param;
+ set_pool_name(pool, name);
+ set_pool_cache_size(pool, param->cache_size);
+
+ if (reserve_uarea(pool, param->pkt.uarea_size, num_buf, shm_flags)) {
+ ODP_ERR("User area SHM reserve failed\n");
+ goto error;
+ }
+
+ /* Ring size must be larger than the number of items stored */
+ if (num_buf + 1 <= RING_SIZE_MIN)
+ ring_size = RING_SIZE_MIN;
+ else
+ ring_size = ROUNDUP_POWER2_U32(num_buf + 1);
+
+ pool->ring_mask = ring_size - 1;
+ pool->type = param->type;
+ pool->num = num_buf;
+ pool->headroom = headroom;
+ pool->tailroom = 0;
+ pool->seg_len = buf_size - head_offset - headroom - pool->tailroom;
+ pool->max_seg_len = headroom + pool->seg_len + pool->tailroom;
+ pool->max_len = PKT_MAX_SEGS * pool->seg_len;
+
+ ring_ptr_init(&pool->ring->hdr);
+
+ return pool->pool_hdl;
+
+error:
+ if (pool->ring_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->ring_shm);
+
+ LOCK(&pool->lock);
+ pool->reserved = 0;
+ UNLOCK(&pool->lock);
+
+ return ODP_POOL_INVALID;
+}
+
+int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, uint32_t num,
+ uint32_t flags)
+{
+ pool_t *pool;
+ odp_buffer_hdr_t *buf_hdr;
+ ring_ptr_t *ring;
+ uint32_t i, ring_mask, buf_index, head_offset;
+ uint32_t num_populated;
+ uint8_t *data_ptr;
+ uint32_t hdr_size = sizeof(odp_packet_hdr_t);
+ void *uarea = NULL;
+
+ if (pool_hdl == ODP_POOL_INVALID) {
+ ODP_ERR("Bad pool handle\n");
+ return -1;
+ }
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ if (pool->type != ODP_POOL_PACKET || pool->pool_ext == 0) {
+ ODP_ERR("Bad pool type\n");
+ return -1;
+ }
+
+ if (buf_size != pool->ext_param.pkt.buf_size) {
+ ODP_ERR("Bad buffer size\n");
+ return -1;
+ }
+
+ num_populated = pool->num_populated;
+
+ if (num_populated + num > pool->num) {
+ ODP_ERR("Trying to over populate the pool\n");
+ return -1;
+ }
+
+ if ((num_populated + num == pool->num) && !(flags & ODP_POOL_POPULATE_DONE)) {
+ ODP_ERR("Missing ODP_POOL_POPULATE_DONE flag\n");
+ return -1;
+ }
+
+ if ((num_populated + num < pool->num) && flags) {
+ ODP_ERR("Unexpected flags: 0x%x\n", flags);
+ return -1;
+ }
+
+ ring = &pool->ring->hdr;
+ ring_mask = pool->ring_mask;
+ buf_index = pool->num_populated;
+ head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ for (i = 0; i < num; i++) {
+ buf_hdr = buf[i];
+
+ if ((uintptr_t)buf_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
+ ODP_ERR("Bad packet buffer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (((uintptr_t)buf_hdr + head_offset) & (MIN_HEAD_ALIGN - 1)) {
+ ODP_ERR("Bad head pointer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (pool->uarea_size)
+ uarea = &pool->uarea_base_addr[buf_index * pool->uarea_size];
+
+ data_ptr = (uint8_t *)buf_hdr + head_offset + pool->headroom;
+ init_buffer_hdr(pool, buf_hdr, buf_index, hdr_size, data_ptr, uarea);
+ pool->ring->buf_hdr_by_index[buf_index] = buf_hdr;
+ buf_index++;
+
+ ring_ptr_enq(ring, ring_mask, buf_hdr);
+ }
+
+ pool->num_populated += num;
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 5d328b84c..479d6f956 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -22,7 +22,7 @@
#include <odp/api/plat/thread_inlines.h>
#include <odp/api/time.h>
#include <odp/api/plat/time_inlines.h>
-#include <odp/api/spinlock.h>
+#include <odp/api/ticketlock.h>
#include <odp/api/hints.h>
#include <odp/api/cpu.h>
#include <odp/api/thrmask.h>
@@ -51,6 +51,18 @@
/* Group weight table size */
#define GRP_WEIGHT_TBL_SIZE NUM_SCHED_GRPS
+/* Spread balancing frequency. Balance every BALANCE_ROUNDS_M1 + 1 scheduling rounds. */
+#define BALANCE_ROUNDS_M1 0xfffff
+
+/* Load of a queue */
+#define QUEUE_LOAD 256
+
+/* Margin for load balance hysteresis */
+#define QUEUE_LOAD_MARGIN 8
+
+/* Ensure that load calculation does not wrap around */
+ODP_STATIC_ASSERT((QUEUE_LOAD * CONFIG_MAX_SCHED_QUEUES) < UINT32_MAX, "Load_value_too_large");
+
/* Maximum priority queue spread */
#define MAX_SPREAD 8
@@ -123,10 +135,12 @@ ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
/* Scheduler local data */
typedef struct ODP_ALIGNED_CACHE {
+ uint32_t sched_round;
uint16_t thr;
uint8_t pause;
uint8_t sync_ctx;
- uint16_t grp_round;
+ uint8_t balance_on;
+ uint16_t balance_start;
uint16_t spread_round;
struct {
@@ -188,11 +202,12 @@ typedef struct {
uint8_t prefer_ratio;
} config;
+ uint8_t load_balance;
uint16_t max_spread;
uint32_t ring_mask;
- odp_spinlock_t mask_lock;
odp_atomic_u32_t grp_epoch;
odp_shm_t shm;
+ odp_ticketlock_t mask_lock[NUM_SCHED_GRPS];
prio_q_mask_t prio_q_mask[NUM_SCHED_GRPS][NUM_PRIO];
struct {
@@ -213,7 +228,7 @@ typedef struct {
uint32_t prio_q_count[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
odp_thrmask_t mask_all;
- odp_spinlock_t grp_lock;
+ odp_ticketlock_t grp_lock;
struct {
char name[ODP_SCHED_GROUP_NAME_LEN];
@@ -225,7 +240,7 @@ typedef struct {
struct {
int num_pktin;
} pktio[NUM_PKTIO];
- odp_spinlock_t pktio_lock;
+ odp_ticketlock_t pktio_lock;
order_context_t order[CONFIG_MAX_SCHED_QUEUES];
@@ -289,6 +304,22 @@ static int read_config_file(sched_global_t *sched)
sched->config.prefer_ratio = val + 1;
ODP_PRINT(" %s: %i\n", str, val);
+ str = "sched_basic.load_balance";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > 1 || val < 0) {
+ ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ sched->load_balance = 1;
+ if (val == 0 || sched->config.num_spread == 1)
+ sched->load_balance = 0;
+
str = "sched_basic.burst_size_default";
if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
NUM_PRIO) {
@@ -357,14 +388,16 @@ static int read_config_file(sched_global_t *sched)
sched->config_if.group_enable.control = val;
ODP_PRINT(" %s: %i\n", str, val);
+ ODP_PRINT(" dynamic load balance: %s\n", sched->load_balance ? "ON" : "OFF");
+
ODP_PRINT("\n");
return 0;
}
-static inline uint8_t spread_index(uint32_t index)
+/* Spread from thread or other index */
+static inline uint8_t spread_from_index(uint32_t index)
{
- /* thread/queue index to spread index */
return index % sched->config.num_spread;
}
@@ -381,15 +414,14 @@ static void sched_local_init(void)
sched_local.sync_ctx = NO_SYNC_CONTEXT;
sched_local.stash.queue = ODP_QUEUE_INVALID;
- spread = spread_index(sched_local.thr);
+ spread = spread_from_index(sched_local.thr);
prefer_ratio = sched->config.prefer_ratio;
for (i = 0; i < SPREAD_TBL_SIZE; i++) {
sched_local.spread_tbl[i] = spread;
if (num_spread > 1 && (i % prefer_ratio) == 0) {
- sched_local.spread_tbl[i] = spread_index(spread +
- offset);
+ sched_local.spread_tbl[i] = spread_from_index(spread + offset);
offset++;
if (offset == num_spread)
offset = 1;
@@ -402,7 +434,7 @@ static int schedule_init_global(void)
odp_shm_t shm;
int i, j, grp;
int prefer_ratio;
- uint32_t ring_size;
+ uint32_t ring_size, num_rings;
ODP_DBG("Schedule init ... ");
@@ -429,20 +461,29 @@ static int schedule_init_global(void)
/* When num_spread == 1, only spread_tbl[0] is used. */
sched->max_spread = (sched->config.num_spread - 1) * prefer_ratio;
- ring_size = MAX_RING_SIZE / sched->config.num_spread;
+ /* Dynamic load balance may move all queues into a single ring.
+ * Ring size can be smaller with fixed spreading. */
+ if (sched->load_balance) {
+ ring_size = MAX_RING_SIZE;
+ num_rings = 1;
+ } else {
+ ring_size = MAX_RING_SIZE / sched->config.num_spread;
+ num_rings = sched->config.num_spread;
+ }
+
ring_size = ROUNDUP_POWER2_U32(ring_size);
ODP_ASSERT(ring_size <= MAX_RING_SIZE);
sched->ring_mask = ring_size - 1;
/* Each ring can hold in maximum ring_size-1 queues. Due to ring size round up,
* total capacity of rings may be larger than CONFIG_MAX_SCHED_QUEUES. */
- sched->max_queues = sched->ring_mask * sched->config.num_spread;
+ sched->max_queues = sched->ring_mask * num_rings;
if (sched->max_queues > CONFIG_MAX_SCHED_QUEUES)
sched->max_queues = CONFIG_MAX_SCHED_QUEUES;
- odp_spinlock_init(&sched->mask_lock);
-
for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ odp_ticketlock_init(&sched->mask_lock[grp]);
+
for (i = 0; i < NUM_PRIO; i++) {
for (j = 0; j < MAX_SPREAD; j++) {
prio_queue_t *prio_q;
@@ -453,11 +494,11 @@ static int schedule_init_global(void)
}
}
- odp_spinlock_init(&sched->pktio_lock);
+ odp_ticketlock_init(&sched->pktio_lock);
for (i = 0; i < NUM_PKTIO; i++)
sched->pktio[i].num_pktin = 0;
- odp_spinlock_init(&sched->grp_lock);
+ odp_ticketlock_init(&sched->grp_lock);
odp_atomic_init_u32(&sched->grp_epoch, 0);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
@@ -538,7 +579,7 @@ static inline int grp_update_tbl(void)
int num = 0;
int thr = sched_local.thr;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
if (sched->sched_grp[i].allocated == 0)
@@ -550,7 +591,7 @@ static inline int grp_update_tbl(void)
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
if (odp_unlikely(num == 0))
return 0;
@@ -593,13 +634,68 @@ static inline int prio_level_from_api(int api_prio)
return schedule_max_prio() - api_prio;
}
+static inline void dec_queue_count(int grp, int prio, int spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_count[grp][prio][spr]--;
+
+ /* Clear mask bit only when the last queue is removed */
+ if (sched->prio_q_count[grp][prio][spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+static inline void update_queue_count(int grp, int prio, int old_spr, int new_spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_mask[grp][prio] |= 1 << new_spr;
+ sched->prio_q_count[grp][prio][new_spr]++;
+
+ sched->prio_q_count[grp][prio][old_spr]--;
+
+ if (sched->prio_q_count[grp][prio][old_spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << old_spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+/* Select the spread that has least queues */
+static uint8_t allocate_spread(int grp, int prio)
+{
+ uint8_t i;
+ uint32_t num;
+ uint32_t min = UINT32_MAX;
+ uint8_t num_spread = sched->config.num_spread;
+ uint8_t spr = 0;
+
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ for (i = 0; i < num_spread; i++) {
+ num = sched->prio_q_count[grp][prio][i];
+ if (num < min) {
+ spr = i;
+ min = num;
+ }
+ }
+
+ sched->prio_q_mask[grp][prio] |= 1 << spr;
+ sched->prio_q_count[grp][prio][spr]++;
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+
+ return spr;
+}
+
static int schedule_create_queue(uint32_t queue_index,
const odp_schedule_param_t *sched_param)
{
int i;
+ uint8_t spread;
int grp = sched_param->group;
int prio = prio_level_from_api(sched_param->prio);
- uint8_t spread = spread_index(queue_index);
if (odp_global_rw->schedule_configured == 0) {
ODP_ERR("Scheduler has not been configured\n");
@@ -623,13 +719,7 @@ static int schedule_create_queue(uint32_t queue_index,
return -1;
}
- odp_spinlock_lock(&sched->mask_lock);
-
- /* update scheduler prio queue usage status */
- sched->prio_q_mask[grp][prio] |= 1 << spread;
- sched->prio_q_count[grp][prio][spread]++;
-
- odp_spinlock_unlock(&sched->mask_lock);
+ spread = allocate_spread(grp, prio);
sched->queue[queue_index].grp = grp;
sched->queue[queue_index].prio = prio;
@@ -658,17 +748,9 @@ static void schedule_destroy_queue(uint32_t queue_index)
{
int grp = sched->queue[queue_index].grp;
int prio = sched->queue[queue_index].prio;
- uint8_t spread = spread_index(queue_index);
-
- odp_spinlock_lock(&sched->mask_lock);
-
- /* Clear mask bit when last queue is removed*/
- sched->prio_q_count[grp][prio][spread]--;
-
- if (sched->prio_q_count[grp][prio][spread] == 0)
- sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spread));
+ int spread = sched->queue[queue_index].spread;
- odp_spinlock_unlock(&sched->mask_lock);
+ dec_queue_count(grp, prio, spread);
sched->queue[queue_index].grp = 0;
sched->queue[queue_index].prio = 0;
@@ -860,7 +942,7 @@ static void schedule_group_clear(odp_schedule_group_t group)
static int schedule_config(const odp_schedule_config_t *config)
{
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
sched->config_if.group_enable.all = config->sched_group.all;
sched->config_if.group_enable.control = config->sched_group.control;
@@ -876,11 +958,51 @@ static int schedule_config(const odp_schedule_config_t *config)
if (!config->sched_group.control)
schedule_group_clear(ODP_SCHED_GROUP_CONTROL);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
+/* Spread load after adding 'num' queues */
+static inline uint32_t spread_load(int grp, int prio, int spr, int num)
+{
+ uint32_t num_q, num_thr;
+
+ num_q = sched->prio_q_count[grp][prio][spr];
+ num_thr = sched->sched_grp[grp].spread_thrs[spr];
+
+ if (num_thr == 0)
+ return UINT32_MAX;
+
+ return ((num_q + num) * QUEUE_LOAD) / num_thr;
+}
+
+static inline int balance_spread(int grp, int prio, int cur_spr)
+{
+ int spr;
+ uint64_t cur_load, min_load, load;
+ int num_spread = sched->config.num_spread;
+ int new_spr = cur_spr;
+
+ cur_load = spread_load(grp, prio, cur_spr, 0);
+ min_load = cur_load;
+
+ for (spr = 0; spr < num_spread; spr++) {
+ if (spr == cur_spr)
+ continue;
+
+ load = spread_load(grp, prio, spr, 1);
+
+ /* Move queue if improvement is larger than marginal */
+ if ((load + QUEUE_LOAD_MARGIN) < min_load) {
+ new_spr = spr;
+ min_load = load;
+ }
+ }
+
+ return new_spr;
+}
+
static inline int copy_from_stash(odp_event_t out_ev[], unsigned int max)
{
int i = 0;
@@ -986,10 +1108,10 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
/* Pktio stopped or closed. Call stop_finalize when we have stopped
* polling all pktin queues of the pktio. */
if (odp_unlikely(num < 0)) {
- odp_spinlock_lock(&sched->pktio_lock);
+ odp_ticketlock_lock(&sched->pktio_lock);
sched->pktio[pktio_index].num_pktin--;
num_pktin = sched->pktio[pktio_index].num_pktin;
- odp_spinlock_unlock(&sched->pktio_lock);
+ odp_ticketlock_unlock(&sched->pktio_lock);
_odp_sched_queue_set_status(qi, QUEUE_STATUS_NOTSCHED);
@@ -1021,9 +1143,9 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
}
static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
- unsigned int max_num, int grp, int first_spr)
+ unsigned int max_num, int grp, int first_spr, int balance)
{
- int prio, spr, i, ret;
+ int prio, spr, new_spr, i, ret;
uint32_t qi;
uint16_t burst_def;
int num_spread = sched->config.num_spread;
@@ -1091,6 +1213,19 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
pktin = queue_is_pktin(qi);
+ /* Update queue spread before dequeue. Dequeue changes status of an empty
+ * queue, which enables a following enqueue operation to insert the queue
+ * back into scheduling (with new spread). */
+ if (odp_unlikely(balance)) {
+ new_spr = balance_spread(grp, prio, spr);
+
+ if (new_spr != spr) {
+ sched->queue[qi].spread = new_spr;
+ ring = &sched->prio_q[grp][prio][new_spr].ring;
+ update_queue_count(grp, prio, spr, new_spr);
+ }
+ }
+
num = _odp_sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
if (odp_unlikely(num < 0)) {
@@ -1186,8 +1321,10 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
unsigned int max_num)
{
int i, num_grp, ret, spr, grp_id;
+ uint32_t sched_round;
uint16_t spread_round, grp_round;
uint32_t epoch;
+ int balance = 0;
if (sched_local.stash.num_ev) {
ret = copy_from_stash(out_ev, max_num);
@@ -1207,10 +1344,28 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
if (odp_unlikely(sched_local.pause))
return 0;
+ sched_round = sched_local.sched_round++;
+ grp_round = sched_round & (GRP_WEIGHT_TBL_SIZE - 1);
+
/* Each thread prefers a priority queue. Spread weight table avoids
* starvation of other priority queues on low thread counts. */
spread_round = sched_local.spread_round;
- grp_round = (sched_local.grp_round++) & (GRP_WEIGHT_TBL_SIZE - 1);
+
+ if (odp_likely(sched->load_balance)) {
+ /* Spread balance is checked max_spread times in every BALANCE_ROUNDS_M1 + 1
+ * scheduling rounds. */
+ if (odp_unlikely(sched_local.balance_on)) {
+ balance = 1;
+
+ if (sched_local.balance_start == spread_round)
+ sched_local.balance_on = 0;
+ }
+
+ if (odp_unlikely((sched_round & BALANCE_ROUNDS_M1) == 0)) {
+ sched_local.balance_start = spread_round;
+ sched_local.balance_on = 1;
+ }
+ }
if (odp_unlikely(spread_round + 1 >= sched->max_spread))
sched_local.spread_round = 0;
@@ -1234,7 +1389,7 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
int grp;
grp = sched_local.grp[grp_id];
- ret = do_schedule_grp(out_queue, out_ev, max_num, grp, spr);
+ ret = do_schedule_grp(out_queue, out_ev, max_num, grp, spr, balance);
if (odp_likely(ret))
return ret;
@@ -1424,7 +1579,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
if (!sched->sched_grp[i].allocated) {
@@ -1445,7 +1600,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return group;
}
@@ -1456,7 +1611,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
odp_thrmask_zero(&zero);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
sched->sched_grp[group].allocated) {
@@ -1469,7 +1624,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1478,7 +1633,7 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
if (strcmp(name, sched->sched_grp[i].name) == 0) {
@@ -1487,7 +1642,7 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return group;
}
@@ -1521,23 +1676,23 @@ static int schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *
thr = odp_thrmask_next(mask, thr);
}
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (sched->sched_grp[group].allocated == 0) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
ODP_ERR("Bad group status\n");
return -1;
}
for (i = 0; i < count; i++) {
- spread = spread_index(thr_tbl[i]);
+ spread = spread_from_index(thr_tbl[i]);
sched->sched_grp[group].spread_thrs[spread]++;
}
odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, mask);
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1573,23 +1728,23 @@ static int schedule_group_leave(odp_schedule_group_t group, const odp_thrmask_t
odp_thrmask_xor(&new_mask, mask, &sched->mask_all);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (sched->sched_grp[group].allocated == 0) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
ODP_ERR("Bad group status\n");
return -1;
}
for (i = 0; i < count; i++) {
- spread = spread_index(thr_tbl[i]);
+ spread = spread_from_index(thr_tbl[i]);
sched->sched_grp[group].spread_thrs[spread]--;
}
odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1598,7 +1753,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
{
int ret;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
*thrmask = sched->sched_grp[group].mask;
@@ -1607,7 +1762,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1616,7 +1771,7 @@ static int schedule_group_info(odp_schedule_group_t group,
{
int ret;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
info->name = sched->sched_grp[group].name;
@@ -1626,7 +1781,7 @@ static int schedule_group_info(odp_schedule_group_t group,
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1634,7 +1789,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
- uint8_t spread = spread_index(thr);
+ uint8_t spread = spread_from_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1642,10 +1797,10 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
odp_thrmask_zero(&mask);
odp_thrmask_set(&mask, thr);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (!sched->sched_grp[group].allocated) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1653,7 +1808,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
sched->sched_grp[group].spread_thrs[spread]++;
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1662,7 +1817,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
- uint8_t spread = spread_index(thr);
+ uint8_t spread = spread_from_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1671,10 +1826,10 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
odp_thrmask_set(&mask, thr);
odp_thrmask_xor(&new_mask, &mask, &sched->mask_all);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (!sched->sched_grp[group].allocated) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1682,7 +1837,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
sched->sched_grp[group].spread_thrs[spread]--;
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
diff --git a/platform/linux-generic/odp_fractional.c b/platform/linux-generic/odp_std.c
index c98f3a4b2..9db5a35b3 100644
--- a/platform/linux-generic/odp_fractional.c
+++ b/platform/linux-generic/odp_std.c
@@ -4,7 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_types.h>
+#include <odp/api/std.h>
double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract)
{
diff --git a/platform/linux-generic/odp_std_clib_api.c b/platform/linux-generic/odp_std_api.c
index a0ba25ae8..0bcd68de2 100644
--- a/platform/linux-generic/odp_std_clib_api.c
+++ b/platform/linux-generic/odp_std_api.c
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_clib.h>
+#include <odp/api/std.h>
/* Include non-inlined versions of API functions */
#define _ODP_NO_INLINE
-#include <odp/api/plat/std_clib_inlines.h>
+#include <odp/api/plat/std_inlines.h>
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index cd7a9ecd9..de01af96c 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -2024,14 +2024,18 @@ static int tm_enqueue(tm_system_t *tm_system,
drop = random_early_discard(tm_system, tm_queue_obj,
initial_tm_wred_node, pkt_color);
if (drop)
- return -1;
+ return -2;
}
work_item.queue_num = tm_queue_obj->queue_num;
work_item.pkt = pkt;
- _odp_sched_fn->order_lock();
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_lock();
+
rc = input_work_queue_append(tm_system, &work_item);
- _odp_sched_fn->order_unlock();
+
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_unlock();
if (rc < 0) {
ODP_DBG("%s work queue full\n", __func__);
@@ -2230,6 +2234,7 @@ static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
odp_packet_t odp_pkt;
pkt_desc_t *pkt_desc;
uint32_t cnt;
+ int ret;
for (cnt = 1; cnt <= max_sends; cnt++) {
pkt_desc = &tm_system->egress_pkt_desc;
@@ -2248,8 +2253,16 @@ static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
tm_system->egress_pkt_desc = EMPTY_PKT_DESC;
if (tm_system->egress.egress_kind == ODP_TM_EGRESS_PKT_IO) {
- if (odp_pktout_send(tm_system->pktout, &odp_pkt, 1) != 1)
+ ret = odp_pktout_send(tm_system->pktout, &odp_pkt, 1);
+ if (odp_unlikely(ret != 1)) {
odp_packet_free(odp_pkt);
+ if (odp_unlikely(ret < 0))
+ odp_atomic_inc_u64(&tm_queue_obj->stats.errors);
+ else
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ } else {
+ odp_atomic_inc_u64(&tm_queue_obj->stats.packets);
+ }
} else if (tm_system->egress.egress_kind == ODP_TM_EGRESS_FN) {
tm_system->egress.egress_fcn(odp_pkt);
} else {
@@ -2535,6 +2548,8 @@ odp_bool_t odp_tm_is_idle(odp_tm_t odp_tm)
void odp_tm_requirements_init(odp_tm_requirements_t *requirements)
{
memset(requirements, 0, sizeof(odp_tm_requirements_t));
+
+ requirements->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_PRESERVE;
}
void odp_tm_egress_init(odp_tm_egress_t *egress)
@@ -2542,8 +2557,8 @@ void odp_tm_egress_init(odp_tm_egress_t *egress)
memset(egress, 0, sizeof(odp_tm_egress_t));
}
-int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
- uint32_t capabilities_size)
+static int tm_capabilities(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size)
{
odp_tm_level_capabilities_t *per_level_cap;
odp_tm_capabilities_t *cap_ptr;
@@ -2565,6 +2580,11 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr->vlan_marking_supported = true;
cap_ptr->ecn_marking_supported = true;
cap_ptr->drop_prec_marking_supported = true;
+ cap_ptr->tm_queue_threshold = true;
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
cap_ptr->dynamic_topology_update = true;
cap_ptr->dynamic_shaper_update = true;
@@ -2572,6 +2592,9 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr->dynamic_wred_update = true;
cap_ptr->dynamic_threshold_update = true;
+ /* We only support pkt priority mode preserve */
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] = true;
@@ -2589,11 +2612,48 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
per_level_cap->tm_node_dual_slope_supported = true;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+ per_level_cap->tm_node_threshold = true;
}
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
+
return 1;
}
+int ODP_DEPRECATE(odp_tm_capabilities)(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size)
+{
+ return tm_capabilities(capabilities, capabilities_size);
+}
+
+int odp_tm_egress_capabilities(odp_tm_capabilities_t *capabilities,
+ const odp_tm_egress_t *egress)
+{
+ pktio_entry_t *entry;
+ int ret;
+
+ memset(capabilities, 0, sizeof(odp_tm_capabilities_t));
+ if (egress->egress_kind == ODP_TM_EGRESS_PKT_IO) {
+ entry = get_pktio_entry(egress->pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %" PRIuPTR " does not exist\n",
+ (uintptr_t)egress->pktio);
+ return -1;
+ }
+
+ /* Report not capable if pktout mode is not TM */
+ if (entry->s.param.out_mode != ODP_PKTOUT_MODE_TM)
+ return 0;
+ }
+
+ ret = tm_capabilities(capabilities, 1);
+ if (ret <= 0)
+ return -1;
+ return 0;
+}
+
static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_requirements_t *req_ptr)
{
@@ -2601,10 +2661,11 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_level_capabilities_t *per_level_cap;
odp_packet_color_t color;
odp_bool_t shaper_supported, wred_supported;
- odp_bool_t dual_slope;
+ odp_bool_t dual_slope, threshold;
uint32_t num_levels, level_idx, max_nodes;
uint32_t max_queues, max_fanin;
- uint8_t max_priority, min_weight, max_weight;
+ uint32_t min_weight, max_weight;
+ uint8_t max_priority;
num_levels = MAX(MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
memset(cap_ptr, 0, sizeof(odp_tm_capabilities_t));
@@ -2614,6 +2675,7 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
shaper_supported = req_ptr->tm_queue_shaper_needed;
wred_supported = req_ptr->tm_queue_wred_needed;
dual_slope = req_ptr->tm_queue_dual_slope_needed;
+ threshold = req_ptr->tm_queue_threshold_needed;
cap_ptr->max_tm_queues = max_queues;
cap_ptr->max_levels = num_levels;
@@ -2624,6 +2686,11 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
cap_ptr->ecn_marking_supported = req_ptr->ecn_marking_needed;
cap_ptr->drop_prec_marking_supported =
req_ptr->drop_prec_marking_needed;
+ cap_ptr->tm_queue_threshold = threshold;
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
cap_ptr->dynamic_topology_update = true;
cap_ptr->dynamic_shaper_update = true;
@@ -2631,6 +2698,8 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
cap_ptr->dynamic_wred_update = true;
cap_ptr->dynamic_threshold_update = true;
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] =
req_ptr->marking_colors_needed[color];
@@ -2652,6 +2721,7 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
shaper_supported = per_level_req->tm_node_shaper_needed;
wred_supported = per_level_req->tm_node_wred_needed;
dual_slope = per_level_req->tm_node_dual_slope_needed;
+ threshold = per_level_req->tm_node_threshold_needed;
per_level_cap->max_num_tm_nodes = max_nodes;
per_level_cap->max_fanin_per_node = max_fanin;
@@ -2664,7 +2734,12 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
per_level_cap->tm_node_dual_slope_supported = dual_slope;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+ per_level_cap->tm_node_threshold = threshold;
}
+
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
}
static int affinitize_main_thread(void)
@@ -2932,6 +3007,11 @@ odp_tm_t odp_tm_create(const char *name,
return ODP_TM_INVALID;
}
+ /* We only support global pkt priority mode */
+ if (requirements->pkt_prio_mode != ODP_TM_PKT_PRIO_MODE_PRESERVE) {
+ ODP_ERR("Unsupported Packet priority mode\n");
+ return ODP_TM_INVALID;
+ }
odp_ticketlock_lock(&tm_glb->create_lock);
/* If we are using pktio output (usual case) get the first associated
@@ -3960,6 +4040,8 @@ int odp_tm_node_context_set(odp_tm_node_t tm_node, void *user_context)
void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
{
memset(params, 0, sizeof(odp_tm_queue_params_t));
+
+ params->ordered_enqueue = true;
}
odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
@@ -3996,11 +4078,15 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
memset(queue_obj, 0, sizeof(tm_queue_obj_t));
queue_obj->user_context = params->user_context;
queue_obj->priority = params->priority;
+ queue_obj->ordered_enqueue = params->ordered_enqueue;
queue_obj->tm_idx = tm_system->tm_idx;
queue_obj->queue_num = (uint32_t)_odp_int_pkt_queue;
queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
queue_obj->pkt = ODP_PACKET_INVALID;
odp_ticketlock_init(&queue_obj->tm_wred_node.tm_wred_node_lock);
+ odp_atomic_init_u64(&queue_obj->stats.discards, 0);
+ odp_atomic_init_u64(&queue_obj->stats.errors, 0);
+ odp_atomic_init_u64(&queue_obj->stats.packets, 0);
queue = odp_queue_create(NULL, NULL);
if (queue == ODP_QUEUE_INVALID) {
@@ -4438,6 +4524,40 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
return pkt_cnt;
}
+int odp_tm_enq_multi(odp_tm_queue_t tm_queue, const odp_packet_t packets[],
+ int num)
+{
+ tm_queue_obj_t *tm_queue_obj;
+ tm_system_t *tm_system;
+ int i, rc;
+
+ tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+ if (!tm_queue_obj)
+ return -1;
+
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
+ if (!tm_system)
+ return -1;
+
+ if (odp_atomic_load_u64(&tm_system->destroying))
+ return -1;
+
+ for (i = 0; i < num; i++) {
+ rc = tm_enqueue(tm_system, tm_queue_obj, packets[i]);
+ if (rc < 0 && rc != -2)
+ break;
+ /* For RED failure, just drop current pkt but
+ * continue with next pkts.
+ */
+ if (rc == -2) {
+ odp_packet_free(packets[i]);
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ }
+ }
+
+ return i;
+}
+
int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
{
tm_queue_thresholds_t *threshold_params;
@@ -4754,6 +4874,23 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
}
}
+int odp_tm_queue_stats(odp_tm_queue_t tm_queue, odp_tm_queue_stats_t *stats)
+{
+ tm_queue_obj_t *tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+
+ if (!tm_queue_obj) {
+ ODP_ERR("Invalid TM queue handle\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(odp_tm_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&tm_queue_obj->stats.discards);
+ stats->errors = odp_atomic_load_u64(&tm_queue_obj->stats.errors);
+ stats->packets = odp_atomic_load_u64(&tm_queue_obj->stats.packets);
+
+ return 0;
+}
+
uint64_t odp_tm_to_u64(odp_tm_t hdl)
{
return _odp_pri(hdl);
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 4841402aa..a98324ee4 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -116,6 +116,7 @@ ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) &&
typedef struct {
int num_rx_desc;
int num_tx_desc;
+ uint8_t multicast_en;
uint8_t rx_drop_en;
uint8_t set_flow_hash;
} dpdk_opt_t;
@@ -227,8 +228,13 @@ static int init_options(pktio_entry_t *pktio_entry,
return -1;
opt->set_flow_hash = !!val;
+ if (!lookup_opt("multicast_en", NULL, &val))
+ return -1;
+ opt->multicast_en = !!val;
+
ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name,
pkt_priv(pktio_entry)->port_id);
+ ODP_DBG(" multicast_en: %d\n", opt->multicast_en);
ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc);
ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc);
ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en);
@@ -1669,8 +1675,9 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
char pool_name[RTE_MEMPOOL_NAMESIZE];
uint16_t data_room;
uint32_t mtu;
- int i;
+ int i, ret;
pool_t *pool_entry;
+ uint16_t port_id;
if (disable_pktio)
return -1;
@@ -1679,8 +1686,15 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
return -1;
pool_entry = pool_entry_from_hdl(pool);
- if (!dpdk_netdev_is_valid(netdev)) {
- ODP_ERR("Invalid dpdk netdev: %s\n", netdev);
+ /* Init pktio entry */
+ memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
+
+ if (!rte_eth_dev_get_port_by_name(netdev, &port_id))
+ pkt_dpdk->port_id = port_id;
+ else if (dpdk_netdev_is_valid(netdev))
+ pkt_dpdk->port_id = atoi(netdev);
+ else {
+ ODP_ERR("Invalid DPDK interface name: %s\n", netdev);
return -1;
}
@@ -1691,11 +1705,7 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
odp_global_rw->dpdk_initialized = 1;
}
- /* Init pktio entry */
- memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
-
pkt_dpdk->pool = pool;
- pkt_dpdk->port_id = atoi(netdev);
/* rte_eth_dev_count() was removed in v18.05 */
#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0)
@@ -1727,6 +1737,23 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
promisc_mode_check(pkt_dpdk);
+#if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0)
+ ret = 0;
+ if (pkt_dpdk->opt.multicast_en)
+ rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ rte_eth_allmulticast_disable(pkt_dpdk->port_id);
+#else
+ if (pkt_dpdk->opt.multicast_en)
+ ret = rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ ret = rte_eth_allmulticast_disable(pkt_dpdk->port_id);
+#endif
+
+ /* Not supported by all PMDs, so ignore the return value */
+ if (ret)
+ ODP_DBG("Configuring multicast reception not supported by the PMD\n");
+
/* Drivers requiring minimum burst size. Supports also *_vf versions
* of the drivers. */
if (!strncmp(dev_info.driver_name, IXGBE_DRV_NAME,
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
index 93195f5a8..90709d86d 100644
--- a/platform/linux-generic/test/inline-timer.conf
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
timer: {
# Enable inline timer implementation
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
index 58a73f2df..f9b39abf6 100644
--- a/platform/linux-generic/test/packet_align.conf
+++ b/platform/linux-generic/test/packet_align.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
pool: {
pkt: {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
index a6e6080d2..a4b5d3f39 100644
--- a/platform/linux-generic/test/process-mode.conf
+++ b/platform/linux-generic/test/process-mode.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
# Shared memory options
shm: {
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
index 79537b454..4ef0ab044 100644
--- a/platform/linux-generic/test/sched-basic.conf
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -1,8 +1,9 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
+# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
- # Test scheduler with an odd spread value
prio_spread = 3
+ load_balance = 0
}
diff --git a/test/common/Makefile.am b/test/common/Makefile.am
index 37582d55a..745b1d16a 100644
--- a/test/common/Makefile.am
+++ b/test/common/Makefile.am
@@ -14,7 +14,8 @@ libthrmask_common_la_CFLAGS = $(AM_CFLAGS) -DTEST_THRMASK
endif
-noinst_HEADERS = test_packet_custom.h \
+noinst_HEADERS = test_common_macros.h \
+ test_packet_custom.h \
test_packet_ipsec.h \
test_packet_ipv4.h \
test_packet_ipv4_with_crc.h \
diff --git a/test/common/test_common_macros.h b/test/common/test_common_macros.h
new file mode 100644
index 000000000..344ac8159
--- /dev/null
+++ b/test/common/test_common_macros.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_COMMON_MACROS_H_
+#define TEST_COMMON_MACROS_H_
+
+/*
+ * Common macros for validation tests
+ */
+
+/* Check if 'x' is a power of two value */
+#define TEST_CHECK_POW2(x) ((((x) - 1) & (x)) == 0)
+
+#endif
diff --git a/test/m4/configure.m4 b/test/m4/configure.m4
index 67db257b7..96a48832c 100644
--- a/test/m4/configure.m4
+++ b/test/m4/configure.m4
@@ -40,7 +40,7 @@ AC_CONFIG_FILES([test/common/Makefile
test/validation/api/scheduler/Makefile
test/validation/api/shmem/Makefile
test/validation/api/stash/Makefile
- test/validation/api/std_clib/Makefile
+ test/validation/api/std/Makefile
test/validation/api/system/Makefile
test/validation/api/thread/Makefile
test/validation/api/time/Makefile
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index 4ec4f4352..da818931c 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -30,6 +30,8 @@
typedef struct test_options_t {
uint32_t num_cpu;
uint32_t num_queue;
+ uint32_t num_low;
+ uint32_t num_high;
uint32_t num_dummy;
uint32_t num_event;
uint32_t num_sched;
@@ -48,6 +50,7 @@ typedef struct test_options_t {
uint32_t ctx_rd_words;
uint32_t ctx_rw_words;
uint64_t wait_ns;
+ int verbose;
} test_options_t;
@@ -94,6 +97,10 @@ static void print_usage(void)
"\n"
" -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1.\n"
" -q, --num_queue Number of queues. Default: 1.\n"
+ " -L, --num_low Number of lowest priority queues out of '--num_queue' queues. Rest of\n"
+ " the queues are default (or highest) priority. Default: 0.\n"
+ " -H, --num_high Number of highest priority queues out of '--num_queue' queues. Rest of\n"
+ " the queues are default (or lowest) priority. Default: 0.\n"
" -d, --num_dummy Number of empty queues. Default: 0.\n"
" -e, --num_event Number of events per queue. Default: 100.\n"
" -s, --num_sched Number of events to schedule per thread. Default: 100 000.\n"
@@ -111,6 +118,7 @@ static void print_usage(void)
" -l, --ctx_rw_words Number of queue context words (uint64_t) to modify on every event. Default: 0.\n"
" -n, --rd_words Number of event data words (uint64_t) to read before enqueueing it. Default: 0.\n"
" -m, --rw_words Number of event data words (uint64_t) to modify before enqueueing it. Default: 0.\n"
+ " -v, --verbose Verbose output.\n"
" -h, --help This help\n"
"\n");
}
@@ -124,6 +132,8 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
static const struct option longopts[] = {
{"num_cpu", required_argument, NULL, 'c'},
{"num_queue", required_argument, NULL, 'q'},
+ {"num_low", required_argument, NULL, 'L'},
+ {"num_high", required_argument, NULL, 'H'},
{"num_dummy", required_argument, NULL, 'd'},
{"num_event", required_argument, NULL, 'e'},
{"num_sched", required_argument, NULL, 's'},
@@ -137,14 +147,17 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{"ctx_rw_words", required_argument, NULL, 'l'},
{"rd_words", required_argument, NULL, 'n'},
{"rw_words", required_argument, NULL, 'm'},
+ {"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:q:d:e:s:g:j:b:t:f:w:k:l:n:m:h";
+ static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:w:k:l:n:m:vh";
test_options->num_cpu = 1;
test_options->num_queue = 1;
+ test_options->num_low = 0;
+ test_options->num_high = 0;
test_options->num_dummy = 0;
test_options->num_event = 100;
test_options->num_sched = 100000;
@@ -158,6 +171,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->rd_words = 0;
test_options->rw_words = 0;
test_options->wait_ns = 0;
+ test_options->verbose = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -172,6 +186,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'q':
test_options->num_queue = atoi(optarg);
break;
+ case 'L':
+ test_options->num_low = atoi(optarg);
+ break;
+ case 'H':
+ test_options->num_high = atoi(optarg);
+ break;
case 'd':
test_options->num_dummy = atoi(optarg);
break;
@@ -211,6 +231,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'w':
test_options->wait_ns = atoll(optarg);
break;
+ case 'v':
+ test_options->verbose = 1;
+ break;
case 'h':
/* fall through */
default:
@@ -224,8 +247,13 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->rw_words;
if ((test_options->num_queue + test_options->num_dummy) > MAX_QUEUES) {
- printf("Error: Too many queues. Max supported %i.\n",
- MAX_QUEUES);
+ ODPH_ERR("Too many queues. Max supported %i.\n", MAX_QUEUES);
+ ret = -1;
+ }
+
+ if ((test_options->num_low + test_options->num_high) > test_options->num_queue) {
+ ODPH_ERR("Number of low/high prio %u/%u exceed number of queues %u.\n",
+ test_options->num_low, test_options->num_high, test_options->num_queue);
ret = -1;
}
@@ -344,12 +372,14 @@ static int create_pool(test_global_t *global)
}
printf("\nScheduler performance test\n");
- printf(" num sched %u\n", num_sched);
- printf(" num cpu %u\n", num_cpu);
- printf(" num queues %u\n", num_queue);
- printf(" num empty queues %u\n", num_dummy);
- printf(" total queues %u\n", tot_queue);
- printf(" num groups %i", num_group);
+ printf(" num sched %u\n", num_sched);
+ printf(" num cpu %u\n", num_cpu);
+ printf(" num queues %u\n", num_queue);
+ printf(" num lowest prio queues %u\n", test_options->num_low);
+ printf(" num highest prio queues %u\n", test_options->num_high);
+ printf(" num empty queues %u\n", num_dummy);
+ printf(" total queues %u\n", tot_queue);
+ printf(" num groups %i", num_group);
if (num_group == -1)
printf(" (ODP_SCHED_GROUP_WORKER)\n");
else if (num_group == 0)
@@ -357,14 +387,14 @@ static int create_pool(test_global_t *global)
else
printf("\n");
- printf(" num join %u\n", num_join);
- printf(" forward events %i\n", forward ? 1 : 0);
- printf(" wait nsec %" PRIu64 "\n", wait_ns);
- printf(" events per queue %u\n", num_event);
- printf(" queue size %u\n", queue_size);
- printf(" max burst size %u\n", max_burst);
- printf(" total events %u\n", tot_event);
- printf(" event size %u bytes", event_size);
+ printf(" num join %u\n", num_join);
+ printf(" forward events %i\n", forward ? 1 : 0);
+ printf(" wait nsec %" PRIu64 "\n", wait_ns);
+ printf(" events per queue %u\n", num_event);
+ printf(" queue size %u\n", queue_size);
+ printf(" max burst size %u\n", max_burst);
+ printf(" total events %u\n", tot_event);
+ printf(" event size %u bytes", event_size);
if (touch_data) {
printf(" (rd: %u, rw: %u)\n",
8 * test_options->rd_words,
@@ -373,7 +403,7 @@ static int create_pool(test_global_t *global)
printf("\n");
}
- printf(" context size %u bytes", ctx_size);
+ printf(" context size %u bytes", ctx_size);
if (test_options->ctx_rd_words || test_options->ctx_rw_words) {
printf(" (rd: %u, rw: %u)\n",
8 * test_options->ctx_rd_words,
@@ -464,12 +494,16 @@ static int create_queues(test_global_t *global)
odp_queue_t queue;
odp_buffer_t buf;
odp_schedule_sync_t sync;
+ odp_schedule_prio_t prio;
const char *type_str;
uint32_t i, j, first;
test_options_t *test_options = &global->test_options;
uint32_t num_event = test_options->num_event;
uint32_t queue_size = test_options->queue_size;
uint32_t tot_queue = test_options->tot_queue;
+ uint32_t num_low = test_options->num_low;
+ uint32_t num_high = test_options->num_high;
+ uint32_t num_default = test_options->num_queue - num_low - num_high;
int num_group = test_options->num_group;
int type = test_options->queue_type;
odp_pool_t pool = global->pool;
@@ -487,7 +521,7 @@ static int create_queues(test_global_t *global)
sync = ODP_SCHED_SYNC_ORDERED;
}
- printf(" queue type %s\n\n", type_str);
+ printf(" queue type %s\n\n", type_str);
if (tot_queue > global->schedule_config.num_queues) {
printf("Max queues supported %u\n",
@@ -512,7 +546,6 @@ static int create_queues(test_global_t *global)
odp_queue_param_init(&queue_param);
queue_param.type = ODP_QUEUE_TYPE_SCHED;
- queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue_param.sched.sync = sync;
queue_param.size = queue_size;
if (num_group == -1)
@@ -520,6 +553,8 @@ static int create_queues(test_global_t *global)
else
queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ first = test_options->num_dummy;
+
for (i = 0; i < tot_queue; i++) {
if (num_group > 0) {
odp_schedule_group_t group;
@@ -529,6 +564,49 @@ static int create_queues(test_global_t *global)
queue_param.sched.group = group;
}
+ /* Create low, high and default queues in a mixed order. Dummy queues are created
+ * first and with default priority. */
+ prio = ODP_SCHED_PRIO_DEFAULT;
+ if (i >= first) {
+ switch (i % 3) {
+ case 0:
+ if (num_low) {
+ num_low--;
+ prio = ODP_SCHED_PRIO_LOWEST;
+ } else if (num_high) {
+ num_high--;
+ prio = ODP_SCHED_PRIO_HIGHEST;
+ } else {
+ num_default--;
+ }
+ break;
+ case 1:
+ if (num_high) {
+ num_high--;
+ prio = ODP_SCHED_PRIO_HIGHEST;
+ } else if (num_low) {
+ num_low--;
+ prio = ODP_SCHED_PRIO_LOWEST;
+ } else {
+ num_default--;
+ }
+ break;
+ default:
+ if (num_default) {
+ num_default--;
+ } else if (num_high) {
+ num_high--;
+ prio = ODP_SCHED_PRIO_HIGHEST;
+ } else {
+ num_low--;
+ prio = ODP_SCHED_PRIO_LOWEST;
+ }
+ break;
+ }
+ }
+
+ queue_param.sched.prio = prio;
+
queue = odp_queue_create(NULL, &queue_param);
global->queue[i] = queue;
@@ -539,8 +617,6 @@ static int create_queues(test_global_t *global)
}
}
- first = test_options->num_dummy;
-
/* Store events into queues. Dummy queues are allocated from
* the beginning of the array, so that usage of those affect allocation
* of active queues. Dummy queues are left empty. */
@@ -903,6 +979,11 @@ static int test_sched(void *arg)
odp_event_t event;
uint64_t sched_wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
+ /* Print schedule status at the end of the test, before any queues
+ * are emptied or destroyed. */
+ if (test_options->verbose)
+ odp_schedule_print();
+
while ((event = odp_schedule(NULL, sched_wait)) != ODP_EVENT_INVALID)
odp_event_free(event);
}
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
index ce29781d6..591fe8a82 100644
--- a/test/validation/api/Makefile.am
+++ b/test/validation/api/Makefile.am
@@ -19,7 +19,7 @@ ODP_MODULES = atomic \
random \
scheduler \
stash \
- std_clib \
+ std \
thread \
time \
timer \
@@ -64,7 +64,7 @@ TESTS = \
scheduler/scheduler_main$(EXEEXT) \
scheduler/scheduler_no_predef_groups$(EXEEXT) \
stash/stash_main$(EXEEXT) \
- std_clib/std_clib_main$(EXEEXT) \
+ std/std_main$(EXEEXT) \
thread/thread_main$(EXEEXT) \
time/time_main$(EXEEXT) \
timer/timer_main$(EXEEXT) \
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index d9c44aea7..41b4ab02f 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020, Nokia
+ * Copyright (c) 2020-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -453,6 +453,80 @@ void test_pktio_default_cos(odp_bool_t enable_pktv)
odp_packet_free(pkt);
}
+static int classification_check_queue_stats(void)
+{
+ odp_cls_capability_t capa;
+
+ if (odp_cls_capability(&capa))
+ return ODP_TEST_INACTIVE;
+
+ if (capa.stats.queue.all_counters)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void classification_test_queue_stats(odp_bool_t enable_pktv)
+{
+ odp_cls_capability_t capa;
+ odp_cls_queue_stats_t stats_start;
+ odp_cls_queue_stats_t stats_stop;
+ odp_cos_t cos;
+ odp_queue_t queue;
+
+ /* Default CoS used for test packets */
+ if (!tc.default_cos || !TEST_DEFAULT) {
+ printf("Default CoS not supported, skipping test\n");
+ return;
+ }
+
+ cos = cos_list[CLS_DEFAULT];
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+ queue = odp_cos_queue(cos);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
+
+ CU_ASSERT(odp_cls_queue_stats(cos, queue, &stats_start) == 0);
+
+ test_pktio_default_cos(enable_pktv);
+
+ CU_ASSERT(odp_cls_queue_stats(cos, queue, &stats_stop) == 0);
+
+ if (capa.stats.queue.counter.packets)
+ CU_ASSERT(stats_stop.packets > stats_start.packets);
+ if (capa.stats.queue.counter.octets)
+ CU_ASSERT(stats_stop.octets > stats_start.octets);
+ CU_ASSERT((stats_stop.discards - stats_start.discards) == 0);
+ CU_ASSERT((stats_stop.errors - stats_start.errors) == 0);
+
+ printf("\nQueue statistics\n----------------\n");
+ printf(" discards: %" PRIu64 "\n", stats_stop.discards);
+ printf(" errors: %" PRIu64 "\n", stats_stop.errors);
+ printf(" octets: %" PRIu64 "\n", stats_stop.octets);
+ printf(" packets: %" PRIu64 "\n", stats_stop.packets);
+
+ /* Check that all unsupported counters are still zero */
+ if (!capa.stats.queue.counter.discards)
+ CU_ASSERT(stats_stop.discards == 0);
+ if (!capa.stats.queue.counter.errors)
+ CU_ASSERT(stats_stop.errors == 0);
+ if (!capa.stats.queue.counter.octets)
+ CU_ASSERT(stats_stop.octets == 0);
+ if (!capa.stats.queue.counter.packets)
+ CU_ASSERT(stats_stop.packets == 0);
+}
+
+static void classification_test_queue_stats_pkt(void)
+{
+ classification_test_queue_stats(false);
+}
+
+static void classification_test_queue_stats_pktv(void)
+{
+ classification_test_queue_stats(true);
+}
+
void configure_pktio_error_cos(odp_bool_t enable_pktv)
{
int retval;
@@ -933,6 +1007,8 @@ odp_testinfo_t classification_suite[] = {
ODP_TEST_INFO(classification_test_pktio_set_headroom),
ODP_TEST_INFO(classification_test_pktio_configure),
ODP_TEST_INFO(classification_test_pktio_test),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_queue_stats_pkt,
+ classification_check_queue_stats),
ODP_TEST_INFO_NULL,
};
@@ -941,5 +1017,7 @@ odp_testinfo_t classification_suite_pktv[] = {
classification_check_pktv),
ODP_TEST_INFO_CONDITIONAL(classification_test_pktio_test_pktv,
classification_check_pktv),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_queue_stats_pktv,
+ classification_check_queue_stats),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index 981bc9155..5bbc9c025 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -1175,7 +1175,7 @@ int ipsec_config(odp_instance_t ODP_UNUSED inst)
if (ipsec_config.inbound.reassembly.max_wait_time > capa.reassembly.max_wait_time)
ipsec_config.inbound.reassembly.max_wait_time = capa.reassembly.max_wait_time;
- ipsec_config.inbound.reassembly.max_num_frags = capa.reassembly.max_num_frags;
+ ipsec_config.inbound.reassembly.max_num_frags = MAX_FRAGS;
if (capa.reassembly.ip) {
ipsec_config.inbound.reassembly.en_ipv4 = true;
@@ -1205,7 +1205,7 @@ int ipsec_config(odp_instance_t ODP_UNUSED inst)
ipsec_config.inbound.reassembly.en_ipv6 = false;
}
- if (ipsec_config.inbound.reassembly.max_num_frags > MAX_FRAGS) {
+ if (capa.reassembly.max_num_frags < MAX_FRAGS) {
ipsec_config.inbound.reassembly.en_ipv4 = false;
ipsec_config.inbound.reassembly.en_ipv6 = false;
}
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
index 08512c8fb..ef6996f1d 100644
--- a/test/validation/api/ipsec/ipsec_test_in.c
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -1782,7 +1782,7 @@ static void test_multi_out_in(odp_ipsec_sa_t out_sa,
ipsec_test_part test_out;
ipsec_test_part test_in;
ipsec_test_packet test_pkt;
- odp_packet_t pkt;
+ odp_packet_t pkt = ODP_PACKET_INVALID;
/*
* Convert plain text packet to IPsec packet through
@@ -1875,27 +1875,15 @@ static void test_in_ipv4_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
static void test_in_ipv4_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 1);
-
- part_prep_esp(test_out, 1, false);
-
- test_out[0].pkt_in = &pkt_ipv4_udp_p1_f1;
-
- part_prep_plain(&test_in[0], 1, false, true);
- test_in[0].out[0].pkt_res = &pkt_ipv4_udp_p1_f1;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[0], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[0].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1_f1;
- ipsec_check_in_one(&test_in[0], in_sa);
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv4_esp_reass_success(void)
@@ -2068,27 +2056,15 @@ static void test_in_ipv6_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
static void test_in_ipv6_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 1);
-
- part_prep_esp(test_out, 1, true);
-
- test_out[0].pkt_in = &pkt_ipv6_udp_p1_f1;
-
- part_prep_plain(&test_in[0], 1, true, true);
- test_in[0].out[0].pkt_res = &pkt_ipv6_udp_p1_f1;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[0], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[0].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1_f1;
- ipsec_check_in_one(&test_in[0], in_sa);
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv6_esp_reass_success(void)
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 3349ded99..733db10b9 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -68,6 +68,7 @@ static struct auth_param auths[] = {
ALG(ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256, NULL),
ALG(ODP_AUTH_ALG_SHA384_HMAC, &key_5a_384, NULL),
ALG(ODP_AUTH_ALG_SHA512_HMAC, &key_5a_512, NULL),
+ ALG(ODP_AUTH_ALG_AES_CMAC, &key_5a_128, NULL),
ALG(ODP_AUTH_ALG_AES_XCBC_MAC, &key_5a_128, NULL)
};
@@ -1583,6 +1584,7 @@ static void ipsec_test_default_values(void)
CU_ASSERT(sa_param.proto == ODP_IPSEC_ESP);
CU_ASSERT(sa_param.crypto.cipher_alg == ODP_CIPHER_ALG_NULL);
CU_ASSERT(sa_param.crypto.auth_alg == ODP_AUTH_ALG_NULL);
+ CU_ASSERT(sa_param.crypto.icv_len == 0);
CU_ASSERT(sa_param.opt.esn == 0);
CU_ASSERT(sa_param.opt.udp_encap == 0);
CU_ASSERT(sa_param.opt.copy_dscp == 0);
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 838d50fd8..3b6cf52dd 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -2578,6 +2578,186 @@ static void pktio_test_extra_stats(void)
CU_ASSERT(odp_pktio_close(pktio) == 0);
}
+static int pktio_check_proto_statistics_counters(void)
+{
+ odp_proto_stats_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_t pktio;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_proto_stats_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || capa.tx.counters.all_bits == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void validate_proto_stats(odp_proto_stats_t stat, odp_packet_proto_stats_opt_t opt,
+ odp_proto_stats_capability_t capa, uint64_t pkts)
+{
+ odp_proto_stats_data_t data;
+ int ret;
+
+ ret = odp_proto_stats(stat, &data);
+ CU_ASSERT(ret == 0);
+
+ CU_ASSERT(!(capa.tx.counters.bit.tx_pkt_drops && (data.tx_pkt_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_oct_count0_drops && (data.tx_oct_count0_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_oct_count1_drops && (data.tx_oct_count1_drops > 0)));
+ CU_ASSERT(!(capa.tx.counters.bit.tx_pkts && (data.tx_pkts != pkts)));
+
+ if (capa.tx.counters.bit.tx_oct_count0) {
+ int64_t counted_bytes = PKT_LEN_NORMAL;
+
+ if (capa.tx.oct_count0_adj)
+ counted_bytes += opt.oct_count0_adj;
+ CU_ASSERT(data.tx_oct_count0 == counted_bytes * pkts);
+ }
+
+ if (capa.tx.counters.bit.tx_oct_count1) {
+ int64_t counted_bytes = PKT_LEN_NORMAL;
+
+ if (capa.tx.oct_count1_adj)
+ counted_bytes += opt.oct_count1_adj;
+ CU_ASSERT(data.tx_oct_count1 == counted_bytes * pkts);
+ }
+}
+
+static void pktio_test_proto_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ const uint32_t num_pkts = 10;
+ odp_packet_t tx_pkt[num_pkts];
+ uint32_t pkt_seq[num_pkts];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ uint64_t flow0_pkts = 0, flow1_pkts = 0;
+ odp_proto_stats_capability_t capa;
+ odp_packet_proto_stats_opt_t opt0;
+ odp_packet_proto_stats_opt_t opt1;
+ odp_proto_stats_param_t param;
+ odp_pktio_config_t config;
+ odp_proto_stats_t stat0;
+ odp_proto_stats_t stat1;
+
+ memset(&pktout, 0, sizeof(pktout));
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ /* Enable protocol stats on Tx interface */
+ odp_pktio_config_init(&config);
+ config.pktout.bit.proto_stats_ena = 1;
+ ret = odp_pktio_config(pktio_tx, &config);
+ CU_ASSERT(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ ret = odp_pktio_start(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ odp_proto_stats_param_init(&param);
+ odp_proto_stats_capability(pktio_tx, &capa);
+ CU_ASSERT(capa.tx.counters.all_bits != 0);
+ param.counters.all_bits = capa.tx.counters.all_bits;
+ /* Create statistics object with all supported counters */
+ stat0 = odp_proto_stats_create("flow0_stat", &param);
+ CU_ASSERT_FATAL(stat0 != ODP_PROTO_STATS_INVALID);
+ stat1 = odp_proto_stats_create("flow1_stat", &param);
+ CU_ASSERT_FATAL(stat1 != ODP_PROTO_STATS_INVALID);
+
+ /* Flow-0 options */
+ opt0.stat = stat0;
+ opt0.oct_count0_adj = 0;
+ /* oct1 contains byte count of packets excluding Ethernet header */
+ opt0.oct_count1_adj = -14;
+
+ /* Flow-1 options */
+ opt1.stat = stat1;
+ opt1.oct_count0_adj = -8;
+ opt1.oct_count1_adj = 14;
+
+ alloc = create_packets(tx_pkt, pkt_seq, num_pkts, pktio_tx, pktio_rx);
+
+ /* Attach statistics object to all Tx packets */
+ for (pkts = 0; pkts < alloc; pkts++) {
+ if ((pkts % 2) == 0) {
+ odp_packet_proto_stats_request(tx_pkt[pkts], &opt0);
+ flow0_pkts++;
+ } else {
+ odp_packet_proto_stats_request(tx_pkt[pkts], &opt1);
+ flow1_pkts++;
+ }
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < (int)num_pkts && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ /* Validate Flow-0 packet statistics */
+ validate_proto_stats(stat0, opt0, capa, flow0_pkts);
+
+ /* Validate Flow-1 packet statistics */
+ validate_proto_stats(stat1, opt1, capa, flow1_pkts);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ /* Destroy proto statistics object */
+ CU_ASSERT(odp_proto_stats_destroy(stat0) == 0);
+ CU_ASSERT(odp_proto_stats_destroy(stat1) == 0);
+}
+
static int pktio_check_start_stop(void)
{
if (getenv("ODP_PKTIO_TEST_DISABLE_START_STOP"))
@@ -4617,6 +4797,8 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO_CONDITIONAL(pktio_test_event_queue_statistics_counters,
pktio_check_event_queue_statistics_counters),
ODP_TEST_INFO(pktio_test_extra_stats),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_proto_statistics_counters,
+ pktio_check_proto_statistics_counters),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
pktio_check_pktin_ts),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_ts,
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 78037c65c..d791063e2 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -1,6 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2020, Marvell
- * Copyright (c) 2020, Nokia
+ * Copyright (c) 2020-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,6 +8,8 @@
#include <odp_api.h>
#include "odp_cunit_common.h"
+#include "test_common_macros.h"
+#include <odp/helper/odph_api.h>
#define BUF_SIZE 1500
#define BUF_NUM 1000
@@ -19,6 +21,14 @@
#define CACHE_SIZE 32
#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
+#define EXT_NUM_BUF 10
+#define EXT_BUF_SIZE 2048
+#define EXT_BUF_ALIGN 64
+#define EXT_APP_HDR_SIZE 128
+#define EXT_UAREA_SIZE 32
+#define EXT_HEADROOM 16
+#define MAGIC_U8 0x7a
+
typedef struct {
odp_barrier_t init_barrier;
odp_atomic_u32_t index;
@@ -30,6 +40,7 @@ static global_shared_mem_t *global_mem;
static odp_pool_capability_t global_pool_capa;
static odp_pool_param_t default_pool_param;
+static odp_pool_ext_capability_t global_pool_ext_capa;
static void pool_create_destroy(odp_pool_param_t *param)
{
@@ -914,7 +925,7 @@ static int pool_check_timeout_pool_statistics(void)
return ODP_TEST_ACTIVE;
}
-static void pool_test_pool_statistics(int pool_type)
+static void pool_test_pool_statistics(odp_pool_type_t pool_type)
{
odp_pool_stats_t stats;
odp_pool_param_t param;
@@ -1121,6 +1132,504 @@ static void pool_test_timeout_pool_statistics(void)
pool_test_pool_statistics(ODP_POOL_TIMEOUT);
}
+static void pool_ext_init_packet_pool_param(odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset, head_align, trailer_size;
+ odp_pool_type_t type = ODP_POOL_PACKET;
+ uint32_t num_buf = EXT_NUM_BUF;
+ uint32_t buf_size = EXT_BUF_SIZE;
+ uint32_t uarea_size = EXT_UAREA_SIZE;
+ uint32_t headroom = EXT_HEADROOM;
+ uint32_t app_hdr_size = EXT_APP_HDR_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(type, &capa) == 0);
+
+ odp_pool_ext_param_init(type, param);
+
+ if (num_buf > capa.pkt.max_num_buf)
+ num_buf = capa.pkt.max_num_buf;
+
+ if (buf_size > capa.pkt.max_buf_size)
+ buf_size = capa.pkt.max_buf_size;
+
+ if (uarea_size > capa.pkt.max_uarea_size)
+ uarea_size = capa.pkt.max_uarea_size;
+
+ if (headroom > capa.pkt.max_headroom)
+ headroom = capa.pkt.max_headroom;
+
+ head_align = capa.pkt.min_head_align;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+ trailer_size = capa.pkt.odp_trailer_size;
+
+ CU_ASSERT_FATAL(head_offset < buf_size);
+ CU_ASSERT_FATAL((head_offset + trailer_size) < buf_size);
+
+ while (head_offset % head_align) {
+ app_hdr_size++;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+
+ if (head_offset >= buf_size) {
+ ODPH_ERR("Head align too large (%u). No room for data.\n", head_align);
+ break;
+ }
+ }
+
+ CU_ASSERT_FATAL(head_offset < buf_size);
+ CU_ASSERT_FATAL((head_offset + trailer_size) < buf_size);
+ CU_ASSERT_FATAL((head_offset % head_align) == 0);
+
+ param->pkt.num_buf = num_buf;
+ param->pkt.buf_size = buf_size;
+ param->pkt.app_header_size = app_hdr_size;
+ param->pkt.uarea_size = uarea_size;
+ param->pkt.headroom = headroom;
+}
+
+static void test_packet_pool_ext_capa(void)
+{
+ odp_pool_ext_capability_t capa;
+ odp_pool_type_t type = ODP_POOL_PACKET;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(type, &capa) == 0);
+
+ CU_ASSERT(capa.type == type);
+
+ /* External memory pools not supported */
+ if (capa.max_pools == 0)
+ return;
+
+ CU_ASSERT(capa.max_pools > 0);
+ CU_ASSERT(capa.min_cache_size <= capa.max_cache_size);
+ CU_ASSERT(capa.pkt.max_num_buf > 0);
+ CU_ASSERT(capa.pkt.max_buf_size > 0);
+ CU_ASSERT(capa.pkt.min_mem_align > 0);
+ CU_ASSERT(TEST_CHECK_POW2(capa.pkt.min_mem_align));
+ CU_ASSERT(capa.pkt.min_buf_align > 0);
+ CU_ASSERT(capa.pkt.min_head_align > 0);
+ CU_ASSERT(capa.pkt.max_headroom > 0);
+ CU_ASSERT(capa.pkt.max_headroom_size > 0);
+ CU_ASSERT(capa.pkt.max_headroom_size >= capa.pkt.max_headroom);
+ CU_ASSERT(capa.pkt.max_segs_per_pkt > 0);
+ CU_ASSERT(capa.pkt.max_uarea_size > 0);
+}
+
+static void test_packet_pool_ext_param_init(void)
+{
+ odp_pool_ext_param_t param;
+
+ odp_pool_ext_param_init(ODP_POOL_PACKET, &param);
+
+ CU_ASSERT(param.type == ODP_POOL_PACKET);
+ CU_ASSERT(param.cache_size >= global_pool_ext_capa.min_cache_size &&
+ param.cache_size <= global_pool_ext_capa.max_cache_size);
+ CU_ASSERT(param.stats.all == 0);
+ CU_ASSERT(param.pkt.app_header_size == 0);
+ CU_ASSERT(param.pkt.uarea_size == 0);
+}
+
+static void test_packet_pool_ext_create(void)
+{
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_packet_pool_ext_lookup(void)
+{
+ odp_pool_t pool, pool_1;
+ odp_pool_ext_param_t param;
+ const char *name = "pool_ext_0";
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create(name, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pool_1 = odp_pool_lookup(name);
+
+ CU_ASSERT_FATAL(pool_1 != ODP_POOL_INVALID);
+ CU_ASSERT(pool == pool_1);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void test_packet_pool_ext_info(void)
+{
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_info_t info;
+ const char *name = "pool_ext_0";
+
+ pool_ext_init_packet_pool_param(&param);
+
+ pool = odp_pool_ext_create(name, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ memset(&info, 0, sizeof(odp_pool_info_t));
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+
+ CU_ASSERT(info.pool_ext);
+ CU_ASSERT(strncmp(name, info.name, strlen(name)) == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static odp_shm_t populate_pool(odp_pool_t pool, odp_pool_ext_capability_t *capa,
+ void *buf[], uint32_t num, uint32_t buf_size)
+{
+ odp_shm_t shm;
+ uint8_t *buf_ptr;
+ uint32_t i;
+ uint32_t shm_size, mem_align;
+ uint32_t flags = 0;
+ uint32_t buf_align = EXT_BUF_ALIGN;
+ uint32_t min_align = capa->pkt.min_buf_align;
+
+ CU_ASSERT_FATAL(min_align > 0);
+
+ if (min_align > buf_align)
+ buf_align = min_align;
+
+ if (capa->pkt.buf_size_aligned) {
+ buf_align = buf_size;
+ CU_ASSERT_FATAL((buf_size % min_align) == 0);
+ }
+
+ mem_align = buf_align;
+ if (capa->pkt.min_mem_align > mem_align)
+ mem_align = capa->pkt.min_mem_align;
+
+ /* Prepare to align every buffer */
+ shm_size = (num + 1) * (buf_size + buf_align);
+
+ shm = odp_shm_reserve("test_pool_ext_populate", shm_size, mem_align, 0);
+ if (shm == ODP_SHM_INVALID)
+ return ODP_SHM_INVALID;
+
+ buf_ptr = odp_shm_addr(shm);
+ CU_ASSERT_FATAL((uintptr_t)buf_ptr % mem_align == 0);
+
+ /* initialize entire pool memory with a pattern */
+ memset(buf_ptr, MAGIC_U8, shm_size);
+
+ /* Move from mem_align to buf_align */
+ while ((uintptr_t)buf_ptr % buf_align)
+ buf_ptr++;
+
+ for (i = 0; i < num; i++) {
+ if (i == num - 1)
+ flags = ODP_POOL_POPULATE_DONE;
+
+ buf[i] = buf_ptr;
+ CU_ASSERT_FATAL(odp_pool_ext_populate(pool, &buf[i], buf_size, 1, flags) == 0);
+
+ buf_ptr += buf_size;
+ while ((uintptr_t)buf_ptr % buf_align)
+ buf_ptr++;
+ }
+
+ return shm;
+}
+
+static void test_packet_pool_ext_populate(void)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t buf_size, num_buf;
+ void *buf[EXT_NUM_BUF];
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+
+ CU_ASSERT_FATAL(capa.pkt.min_head_align > 0);
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static uint32_t find_buf(odp_packet_t pkt, void *buf[], uint32_t num, uint32_t head_offset)
+{
+ uint32_t i;
+ uint8_t *ptr;
+ uint8_t *head = odp_packet_head(pkt);
+
+ for (i = 0; i < num; i++) {
+ ptr = buf[i];
+ ptr += head_offset;
+
+ if (head == ptr)
+ break;
+ }
+
+ return i;
+}
+
+#define PKT_LEN_NORMAL 0
+#define PKT_LEN_MAX 1
+#define PKT_LEN_SEGMENTED 2
+
+static void packet_pool_ext_alloc(int len_test)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t i, j, buf_size, num_buf, num_pkt, num_alloc, buf_index;
+ uint32_t pkt_len, head_offset, trailer_size, headroom, max_headroom;
+ uint32_t hr, tr, uarea_size, max_payload, buf_data_size, app_hdr_size;
+ int num_seg;
+ uint8_t *app_hdr;
+ void *buf[EXT_NUM_BUF];
+ odp_packet_t pkt[EXT_NUM_BUF];
+ uint32_t seg_len = 0;
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+ uarea_size = param.pkt.uarea_size;
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ app_hdr_size = param.pkt.app_header_size;
+ head_offset = capa.pkt.odp_header_size + app_hdr_size;
+ max_headroom = capa.pkt.max_headroom_size;
+ headroom = param.pkt.headroom;
+ trailer_size = capa.pkt.odp_trailer_size;
+ buf_data_size = buf_size - head_offset - trailer_size;
+ max_payload = buf_data_size - max_headroom;
+ num_pkt = num_buf;
+ num_seg = 1;
+
+ if (len_test == PKT_LEN_NORMAL) {
+ pkt_len = (buf_data_size - headroom) / 2;
+ } else if (len_test == PKT_LEN_MAX) {
+ pkt_len = max_payload;
+ } else {
+ CU_ASSERT_FATAL(capa.pkt.max_segs_per_pkt > 1);
+ /* length that results 2 segments */
+ pkt_len = max_payload + (buf_size / 2);
+ num_seg = 2;
+ num_pkt = num_buf / num_seg;
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ pkt[i] = odp_packet_alloc(pool, pkt_len);
+ CU_ASSERT(pkt[i] != ODP_PACKET_INVALID);
+ if (pkt[i] == ODP_PACKET_INVALID)
+ break;
+
+ CU_ASSERT(odp_packet_len(pkt[i]) == pkt_len);
+ CU_ASSERT(odp_packet_headroom(pkt[i]) >= headroom);
+ buf_index = find_buf(pkt[i], buf, num_buf, head_offset);
+ CU_ASSERT(buf_index < num_buf);
+ hr = (uintptr_t)odp_packet_data(pkt[i]) - (uintptr_t)odp_packet_head(pkt[i]);
+ CU_ASSERT(hr == odp_packet_headroom(pkt[i]));
+ CU_ASSERT(num_seg == odp_packet_num_segs(pkt[i]));
+ CU_ASSERT(odp_packet_data(pkt[i]) == odp_packet_data_seg_len(pkt[i], &seg_len));
+ CU_ASSERT(odp_packet_seg_len(pkt[i]) == seg_len);
+
+ if (num_seg == 1) {
+ tr = buf_data_size - hr - pkt_len;
+ CU_ASSERT(tr == odp_packet_tailroom(pkt[i]));
+ CU_ASSERT(odp_packet_seg_len(pkt[i]) == pkt_len);
+ } else {
+ odp_packet_seg_t seg = odp_packet_last_seg(pkt[i]);
+ uint32_t last_seg_len = odp_packet_seg_data_len(pkt[i], seg);
+ uint32_t max_tr = buf_data_size - last_seg_len;
+
+ CU_ASSERT(odp_packet_tailroom(pkt[i]) <= max_tr);
+ CU_ASSERT(pkt_len == (odp_packet_seg_len(pkt[i]) + last_seg_len));
+ }
+
+ CU_ASSERT(odp_packet_buf_len(pkt[i]) == num_seg * buf_data_size);
+
+ if (uarea_size) {
+ CU_ASSERT(odp_packet_user_area(pkt[i]) != NULL);
+ CU_ASSERT(odp_packet_user_area_size(pkt[i]) == uarea_size);
+ }
+
+ /* Check that application header content has not changed */
+ app_hdr = (uint8_t *)odp_packet_head(pkt[i]) - app_hdr_size;
+ for (j = 0; j < app_hdr_size; j++)
+ CU_ASSERT(app_hdr[j] == MAGIC_U8);
+ }
+
+ num_alloc = i;
+ CU_ASSERT(num_alloc == num_pkt);
+
+ /* Pool is now empty */
+ CU_ASSERT(odp_packet_alloc(pool, pkt_len) == ODP_PACKET_INVALID);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_free(pkt[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static void test_packet_pool_ext_alloc(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_NORMAL);
+}
+
+static void test_packet_pool_ext_alloc_max(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_MAX);
+}
+
+static void test_packet_pool_ext_alloc_seg(void)
+{
+ packet_pool_ext_alloc(PKT_LEN_SEGMENTED);
+}
+
+static void test_packet_pool_ext_disassemble(void)
+{
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_ext_capability_t capa;
+ uint32_t i, j, buf_size, num_buf, num_pkt, num_alloc, buf_index;
+ uint32_t pkt_len, head_offset, trailer_size, headroom, max_headroom;
+ uint32_t hr, max_payload, buf_data_size;
+ uint32_t num_seg;
+ void *buf[EXT_NUM_BUF];
+ odp_packet_t pkt_tbl[EXT_NUM_BUF];
+
+ CU_ASSERT_FATAL(odp_pool_ext_capability(ODP_POOL_PACKET, &capa) == 0);
+ CU_ASSERT_FATAL(capa.pkt.max_segs_per_pkt > 1);
+
+ pool_ext_init_packet_pool_param(&param);
+ num_buf = param.pkt.num_buf;
+ buf_size = param.pkt.buf_size;
+
+ pool = odp_pool_ext_create("pool_ext_0", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = populate_pool(pool, &capa, buf, num_buf, buf_size);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ head_offset = capa.pkt.odp_header_size + param.pkt.app_header_size;
+ max_headroom = capa.pkt.max_headroom_size;
+ headroom = param.pkt.headroom;
+ trailer_size = capa.pkt.odp_trailer_size;
+ buf_data_size = buf_size - head_offset - trailer_size;
+ max_payload = buf_data_size - max_headroom;
+
+ /* length that results 2 segments */
+ pkt_len = max_payload + (buf_size / 2);
+ num_seg = 2;
+ num_pkt = num_buf / num_seg;
+
+ for (i = 0; i < num_pkt; i++) {
+ odp_packet_t pkt;
+ odp_packet_seg_t seg;
+ uint32_t num_pkt_buf, data_offset, data_len;
+ void *head, *data, *pkt_head;
+ odp_packet_buf_t pkt_buf[num_seg];
+ void *seg_data[num_seg];
+ uint32_t seg_len[num_seg];
+
+ pkt = odp_packet_alloc(pool, pkt_len);
+ pkt_tbl[i] = pkt;
+ CU_ASSERT(pkt != ODP_PACKET_INVALID);
+ if (pkt == ODP_PACKET_INVALID)
+ break;
+
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ CU_ASSERT(odp_packet_headroom(pkt) >= headroom);
+ buf_index = find_buf(pkt, buf, num_buf, head_offset);
+ CU_ASSERT(buf_index < num_buf);
+ pkt_head = odp_packet_head(pkt);
+ hr = (uintptr_t)odp_packet_data(pkt) - (uintptr_t)pkt_head;
+ CU_ASSERT(hr == odp_packet_headroom(pkt));
+ CU_ASSERT((int)num_seg == odp_packet_num_segs(pkt));
+
+ seg = odp_packet_first_seg(pkt);
+ for (j = 0; j < num_seg; j++) {
+ seg_data[j] = odp_packet_seg_data(pkt, seg);
+ seg_len[j] = odp_packet_seg_data_len(pkt, seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ CU_ASSERT(odp_packet_data(pkt) == seg_data[0]);
+ CU_ASSERT(odp_packet_seg_len(pkt) == seg_len[0])
+
+ /* Disassemble packet */
+ num_pkt_buf = odp_packet_disassemble(pkt, pkt_buf, num_seg);
+ CU_ASSERT_FATAL(num_pkt_buf == num_seg);
+
+ CU_ASSERT(odp_packet_buf_head(pkt_buf[0]) == pkt_head);
+ CU_ASSERT(odp_packet_buf_data_offset(pkt_buf[0]) == hr);
+
+ for (j = 0; j < num_seg; j++) {
+ CU_ASSERT(odp_packet_buf_size(pkt_buf[j]) == buf_data_size);
+
+ head = odp_packet_buf_head(pkt_buf[j]);
+ data_offset = odp_packet_buf_data_offset(pkt_buf[j]);
+ data = (uint8_t *)head + data_offset;
+ CU_ASSERT(seg_data[j] == data);
+ data_len = odp_packet_buf_data_len(pkt_buf[j]);
+ CU_ASSERT(seg_len[j] == data_len);
+
+ CU_ASSERT(odp_packet_buf_from_head(pool, head) == pkt_buf[j]);
+
+ /* Pull in head and tail by one byte */
+ odp_packet_buf_data_set(pkt_buf[j], data_offset + 1, data_len - 2);
+ CU_ASSERT(odp_packet_buf_data_offset(pkt_buf[j]) == data_offset + 1);
+ CU_ASSERT(odp_packet_buf_data_len(pkt_buf[j]) == data_len - 2);
+ }
+
+ /* Reassemble packet, each segment is now 2 bytes shorter */
+ pkt = odp_packet_reassemble(pool, pkt_buf, num_seg);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_num_segs(pkt) == (int)num_seg);
+ pkt_tbl[i] = pkt;
+
+ CU_ASSERT(odp_packet_len(pkt) == (pkt_len - (num_seg * 2)));
+ }
+
+ num_alloc = i;
+ CU_ASSERT(num_alloc == num_pkt);
+
+ /* Pool is now empty */
+ CU_ASSERT(odp_packet_alloc(pool, pkt_len) == ODP_PACKET_INVALID);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
static int pool_suite_init(void)
{
memset(&global_pool_capa, 0, sizeof(odp_pool_capability_t));
@@ -1136,6 +1645,39 @@ static int pool_suite_init(void)
return 0;
}
+static int pool_ext_suite_init(void)
+{
+ memset(&global_pool_ext_capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ if (odp_pool_ext_capability(ODP_POOL_PACKET, &global_pool_ext_capa)) {
+ printf("Pool ext capa failed in suite init\n");
+ return -1;
+ }
+
+ if (global_pool_ext_capa.type != ODP_POOL_PACKET) {
+ printf("Bad type from pool ext capa in suite init\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int check_pool_ext_support(void)
+{
+ if (global_pool_ext_capa.max_pools == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_pool_ext_segment_support(void)
+{
+ if (global_pool_ext_capa.max_pools == 0 || global_pool_ext_capa.pkt.max_segs_per_pkt < 2)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_create_destroy_buffer),
ODP_TEST_INFO(pool_test_create_destroy_packet),
@@ -1175,11 +1717,29 @@ odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO_NULL,
};
+odp_testinfo_t pool_ext_suite[] = {
+ ODP_TEST_INFO(test_packet_pool_ext_capa),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_param_init, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_create, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_lookup, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_info, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_populate, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc_max, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc_seg, check_pool_ext_segment_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_disassemble, check_pool_ext_segment_support),
+ ODP_TEST_INFO_NULL,
+};
+
odp_suiteinfo_t pool_suites[] = {
{ .name = "Pool tests",
.testinfo_tbl = pool_suite,
.init_func = pool_suite_init,
},
+ { .name = "Ext mem pool tests",
+ .testinfo_tbl = pool_ext_suite,
+ .init_func = pool_ext_suite_init,
+ },
ODP_SUITE_INFO_NULL,
};
diff --git a/test/validation/api/std/.gitignore b/test/validation/api/std/.gitignore
new file mode 100644
index 000000000..51fbc1d95
--- /dev/null
+++ b/test/validation/api/std/.gitignore
@@ -0,0 +1 @@
+std_main
diff --git a/test/validation/api/std/Makefile.am b/test/validation/api/std/Makefile.am
new file mode 100644
index 000000000..7cebadb83
--- /dev/null
+++ b/test/validation/api/std/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = std_main
+std_main_SOURCES = std.c
diff --git a/test/validation/api/std_clib/std_clib.c b/test/validation/api/std/std.c
index 35ad6f92b..56d05a4b4 100644
--- a/test/validation/api/std_clib/std_clib.c
+++ b/test/validation/api/std/std.c
@@ -11,7 +11,7 @@
#define PATTERN 0x5e
-static void std_clib_test_memcpy(void)
+static void std_test_memcpy(void)
{
uint8_t src[] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15};
@@ -27,7 +27,7 @@ static void std_clib_test_memcpy(void)
CU_ASSERT(ret == 0);
}
-static void std_clib_test_memset(void)
+static void std_test_memset(void)
{
uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15};
@@ -43,7 +43,7 @@ static void std_clib_test_memset(void)
CU_ASSERT(ret == 0);
}
-static void std_clib_test_memcmp(void)
+static void std_test_memcmp(void)
{
uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
@@ -80,15 +80,15 @@ static void std_clib_test_memcmp(void)
}
}
-odp_testinfo_t std_clib_suite[] = {
- ODP_TEST_INFO(std_clib_test_memcpy),
- ODP_TEST_INFO(std_clib_test_memset),
- ODP_TEST_INFO(std_clib_test_memcmp),
+odp_testinfo_t std_suite[] = {
+ ODP_TEST_INFO(std_test_memcpy),
+ ODP_TEST_INFO(std_test_memset),
+ ODP_TEST_INFO(std_test_memcmp),
ODP_TEST_INFO_NULL,
};
-odp_suiteinfo_t std_clib_suites[] = {
- {"Std C library", NULL, NULL, std_clib_suite},
+odp_suiteinfo_t std_suites[] = {
+ {"Std", NULL, NULL, std_suite},
ODP_SUITE_INFO_NULL
};
@@ -100,7 +100,7 @@ int main(int argc, char *argv[])
if (odp_cunit_parse_options(argc, argv))
return -1;
- ret = odp_cunit_register(std_clib_suites);
+ ret = odp_cunit_register(std_suites);
if (ret == 0)
ret = odp_cunit_run();
diff --git a/test/validation/api/std_clib/.gitignore b/test/validation/api/std_clib/.gitignore
deleted file mode 100644
index 37828330a..000000000
--- a/test/validation/api/std_clib/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-std_clib_main
diff --git a/test/validation/api/std_clib/Makefile.am b/test/validation/api/std_clib/Makefile.am
deleted file mode 100644
index 9d3b32d3f..000000000
--- a/test/validation/api/std_clib/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-include ../Makefile.inc
-
-test_PROGRAMS = std_clib_main
-std_clib_main_SOURCES = std_clib.c
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
index e511582dc..de49c3a5a 100644
--- a/test/validation/api/system/system.c
+++ b/test/validation/api/system/system.c
@@ -9,6 +9,7 @@
#include <odp/helper/odph_api.h>
#include "odp_cunit_common.h"
+#include "test_common_macros.h"
#define PERIODS_100_MSEC 160
#define RES_TRY_NUM 10
@@ -20,9 +21,6 @@
/* 10 usec wait time assumes >100kHz resolution on CPU cycles counter */
#define WAIT_TIME (10 * ODP_TIME_USEC_IN_NS)
-/* Check if value is power of two */
-#define IS_POW2(x) ((((x) - 1) & (x)) == 0)
-
static void test_version_api_str(void)
{
int char_ok = 0;
@@ -252,8 +250,8 @@ static void system_test_odp_sys_cache_line_size(void)
cache_size = odp_sys_cache_line_size();
CU_ASSERT(0 < cache_size);
CU_ASSERT(0 < ODP_CACHE_LINE_SIZE);
- CU_ASSERT(IS_POW2(cache_size));
- CU_ASSERT(IS_POW2(ODP_CACHE_LINE_SIZE));
+ CU_ASSERT(TEST_CHECK_POW2(cache_size));
+ CU_ASSERT(TEST_CHECK_POW2(ODP_CACHE_LINE_SIZE));
if (ODP_CACHE_LINE_SIZE != cache_size)
printf("WARNING: ODP_CACHE_LINE_SIZE and odp_sys_cache_line_size() not matching\n");
diff --git a/test/validation/api/traffic_mngr/traffic_mngr.c b/test/validation/api/traffic_mngr/traffic_mngr.c
index 55004692d..2ba0d1c9d 100644
--- a/test/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/validation/api/traffic_mngr/traffic_mngr.c
@@ -20,7 +20,6 @@
#define TM_DEBUG 0
-#define MAX_CAPABILITIES 16
#define MAX_NUM_IFACES 2
#define MAX_TM_SYSTEMS 3
#define NUM_LEVELS 3
@@ -386,35 +385,30 @@ static odp_bool_t approx_eq64(uint64_t val, uint64_t correct)
static int test_overall_capabilities(void)
{
odp_tm_level_capabilities_t *per_level;
- odp_tm_capabilities_t capabilities_array[MAX_CAPABILITIES];
+ odp_tm_capabilities_t capabilities_array[2];
odp_tm_capabilities_t *cap_ptr;
+ odp_tm_egress_t egress;
+ odp_bool_t *prio_modes;
uint32_t num_records, idx, num_levels, level;
int rc;
- rc = odp_tm_capabilities(capabilities_array, MAX_CAPABILITIES);
- if (rc < 0) {
- CU_ASSERT(rc < 0);
- return -1;
- }
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_PKT_IO;
+ egress.pktio = xmt_pktio;
- /* Now test the return code (which did not indicate a failure code)
- * to make sure that there is at least ONE capabilities record
- * returned */
- if (rc == 0) {
- CU_ASSERT(rc != 0);
- return -1;
- }
+ rc = odp_tm_egress_capabilities(&capabilities_array[0], &egress);
+ CU_ASSERT_FATAL(rc == 0);
+ num_records = 1;
- /* Now test the return code to see if there were more capabilities
- * records than the call above allowed for. This is not an error,
- * just an interesting fact.
- */
- num_records = MAX_CAPABILITIES;
- if (MAX_CAPABILITIES < rc)
- ODPH_DBG("There were more than %u capabilities (%u)\n",
- MAX_CAPABILITIES, rc);
- else
- num_records = rc;
+ /* Get capabilities for egress kind function. */
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_FN;
+ rc = odp_tm_egress_capabilities(&capabilities_array[1], &egress);
+ CU_ASSERT_FATAL(rc == 0);
+
+ /* Validate this record only if egress function is supported */
+ if (capabilities_array[1].max_tm_queues)
+ num_records++;
/* Loop through the returned capabilities (there MUST be at least one)
* and do some basic checks to prove that it isn't just an empty
@@ -450,6 +444,11 @@ static int test_overall_capabilities(void)
return -1;
}
}
+
+ /* At least one pkt priority mode needs to be supported */
+ prio_modes = cap_ptr->pkt_prio_modes;
+ CU_ASSERT((prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] != 0) ||
+ (prio_modes[ODP_TM_PKT_PRIO_MODE_OVERWRITE] != 0))
}
return 0;
@@ -1098,6 +1097,44 @@ static int make_pkts(uint32_t num_pkts,
return 0;
}
+static uint32_t send_pkts_multi(odp_tm_queue_t tm_queue, uint32_t num_pkts)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_packet_t odp_pkt;
+ uint32_t xmt_pkt_idx, pkts_sent;
+ int64_t rc, i = 0;
+
+ /* Now send the pkts as fast as we can. RED drops are internally
+ * consumed by odp_tm_enq_multi().
+ */
+ xmt_pkt_idx = num_pkts_sent;
+ rc = odp_tm_enq_multi(tm_queue, &xmt_pkts[xmt_pkt_idx], num_pkts);
+ CU_ASSERT(rc <= num_pkts);
+
+ /* Record consumed packets */
+ pkts_sent = 0;
+ for (i = 0; i < rc; i++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx + i];
+ xmt_pkt_desc->xmt_idx = xmt_pkt_idx + i;
+ xmt_pkt_desc->xmt_time = odp_time_local();
+ xmt_pkt_desc->tm_queue = tm_queue;
+ pkts_sent++;
+ }
+
+ /* Free rejected pkts */
+ for (; i < num_pkts; i++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx + i];
+ xmt_pkt_desc->xmt_idx = xmt_pkt_idx + i;
+
+ odp_pkt = xmt_pkts[xmt_pkt_idx + i];
+ odp_packet_free(odp_pkt);
+ xmt_pkts[xmt_pkt_idx + i] = ODP_PACKET_INVALID;
+ }
+ num_pkts_sent += num_pkts;
+
+ return pkts_sent;
+}
+
static uint32_t send_pkts(odp_tm_queue_t tm_queue, uint32_t num_pkts)
{
xmt_pkt_desc_t *xmt_pkt_desc;
@@ -1341,6 +1378,7 @@ static int create_tm_queue(odp_tm_t odp_tm,
queue_params.wred_profile[PKT_GREEN] = green_profile;
queue_params.wred_profile[PKT_YELLOW] = yellow_profile;
queue_params.wred_profile[PKT_RED] = red_profile;
+ queue_params.ordered_enqueue = true;
}
tm_queue = odp_tm_queue_create(odp_tm, &queue_params);
@@ -1404,6 +1442,9 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
node_params.max_fanin = FANIN_RATIO;
node_params.level = level;
+ /* This is ignored when pkt priority mode is not overwrite */
+ node_params.priority = 0;
+
if (parent_node_desc == NULL)
snprintf(node_name, sizeof(node_name), "node_%" PRIu32,
node_idx + 1);
@@ -1610,12 +1651,60 @@ static uint32_t find_child_queues(uint8_t tm_system_idx,
return num_queues;
}
+static void
+set_reqs_based_on_capas(odp_tm_requirements_t *req)
+{
+ odp_packet_color_t color;
+ int j;
+
+ /* Use tm capabilities identified based on egress capabilities
+ * to see what can be enabled.
+ */
+ if (tm_capabilities.ecn_marking_supported)
+ req->ecn_marking_needed = true;
+ if (tm_capabilities.drop_prec_marking_supported)
+ req->drop_prec_marking_needed = true;
+ if (tm_capabilities.tm_queue_wred_supported)
+ req->tm_queue_wred_needed = true;
+ if (tm_capabilities.tm_queue_dual_slope_supported)
+ req->tm_queue_dual_slope_needed = true;
+ if (tm_capabilities.vlan_marking_supported)
+ req->vlan_marking_needed = true;
+ if (tm_capabilities.tm_queue_threshold)
+ req->tm_queue_threshold_needed = true;
+
+ for (j = 0; j < tm_capabilities.max_levels; j++) {
+ if (tm_capabilities.per_level[j].tm_node_threshold)
+ req->per_level[j].tm_node_threshold_needed = true;
+ }
+
+ /* Mark colors as needed if at least one of the marking
+ * feature is needed.
+ * */
+ if (req->ecn_marking_needed || req->drop_prec_marking_needed) {
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
+ req->marking_colors_needed[color] = true;
+ }
+
+ if (tm_capabilities.tm_queue_shaper_supported)
+ req->tm_queue_shaper_needed = true;
+
+ /* We can use any packet priority mode since it does not affect
+ * our tests. Our scheduler test tests scheduling only in a node
+ * directly connected to TM queues and such nodes see the original
+ * packet priority before it could have been overwritten by any node.
+ */
+ req->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_PRESERVE;
+ if (!tm_capabilities.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE])
+ req->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_OVERWRITE;
+
+}
+
static int create_tm_system(void)
{
odp_tm_level_requirements_t *per_level;
odp_tm_requirements_t requirements;
odp_tm_egress_t egress;
- odp_packet_color_t color;
tm_node_desc_t *root_node_desc;
uint32_t level, max_nodes[ODP_TM_MAX_LEVELS];
odp_tm_t odp_tm, found_odp_tm;
@@ -1625,16 +1714,10 @@ static int create_tm_system(void)
odp_tm_requirements_init(&requirements);
odp_tm_egress_init(&egress);
- requirements.max_tm_queues = NUM_TM_QUEUES + 1;
+ requirements.max_tm_queues = NUM_TM_QUEUES;
requirements.num_levels = NUM_LEVELS;
- requirements.tm_queue_shaper_needed = true;
- requirements.tm_queue_wred_needed = true;
- requirements.tm_queue_dual_slope_needed = true;
- requirements.vlan_marking_needed = false;
- requirements.ecn_marking_needed = true;
- requirements.drop_prec_marking_needed = true;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
- requirements.marking_colors_needed[color] = true;
+
+ set_reqs_based_on_capas(&requirements);
/* Set the max_num_tm_nodes to be double the expected number of nodes
* at that level */
@@ -1662,10 +1745,8 @@ static int create_tm_system(void)
snprintf(tm_name, sizeof(tm_name), "TM_system_%" PRIu32,
num_odp_tm_systems);
odp_tm = odp_tm_create(tm_name, &requirements, &egress);
- if (odp_tm == ODP_TM_INVALID) {
- ODPH_ERR("odp_tm_create() failed\n");
- return -1;
- }
+ CU_ASSERT_FATAL(odp_tm != ODP_TM_INVALID);
+
odp_tm_systems[num_odp_tm_systems] = odp_tm;
@@ -2105,9 +2186,10 @@ static int destroy_tm_systems(void)
static int traffic_mngr_suite_init(void)
{
- odp_tm_capabilities_t capabilities_array[MAX_CAPABILITIES];
+ odp_tm_capabilities_t egress_capa;
uint32_t payload_len, copy_len;
- int ret, i;
+ odp_tm_egress_t egress;
+ int j, ret;
/* Initialize some global variables. */
num_pkts_made = 0;
@@ -2156,27 +2238,58 @@ static int traffic_mngr_suite_init(void)
if (ret > 0)
goto skip_tests;
- /* Fetch initial dynamic update capabilities, it will be updated
- * later after TM system is created.
- */
- ret = odp_tm_capabilities(capabilities_array, MAX_CAPABILITIES);
- if (ret <= 0)
- return -1;
+ odp_tm_egress_init(&egress);
+ egress.egress_kind = ODP_TM_EGRESS_PKT_IO;
+ egress.pktio = xmt_pktio;
- for (i = 0; i < ret; i++) {
- if (!capabilities_array[i].dynamic_shaper_update)
- dynamic_shaper_update = false;
+ /* Get TM capabilities */
+ ret = odp_tm_egress_capabilities(&egress_capa, &egress);
+ if (ret) {
+ ODPH_ERR("Failed to retrieve tm capabilities");
+ return ret;
+ }
- if (!capabilities_array[i].dynamic_sched_update)
- dynamic_sched_update = false;
+ /* Check for sufficient TM queues */
+ if (egress_capa.max_tm_queues < NUM_TM_QUEUES)
+ goto skip_tests;
- if (!capabilities_array[i].dynamic_threshold_update)
- dynamic_threshold_update = false;
+ /* Check for sufficient TM levels */
+ if (egress_capa.max_levels < NUM_LEVELS)
+ goto skip_tests;
- if (!capabilities_array[i].dynamic_wred_update)
- dynamic_wred_update = false;
+ for (j = 0; j < NUM_LEVELS; j++) {
+ /* Per node fanin */
+ if (egress_capa.per_level[j].max_fanin_per_node <
+ FANIN_RATIO)
+ break;
}
+ if (j != NUM_LEVELS)
+ goto skip_tests;
+
+ if (egress_capa.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] &&
+ egress_capa.max_schedulers_per_node < NUM_QUEUES_PER_NODE)
+ goto skip_tests;
+
+ if (!egress_capa.pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] &&
+ egress_capa.max_schedulers_per_node < 1)
+ goto skip_tests;
+
+ /* Init tm capabilities with matching egress capa until tm is created */
+ tm_capabilities = egress_capa;
+
+ if (!tm_capabilities.dynamic_shaper_update)
+ dynamic_shaper_update = false;
+
+ if (!tm_capabilities.dynamic_sched_update)
+ dynamic_sched_update = false;
+
+ if (!tm_capabilities.dynamic_threshold_update)
+ dynamic_threshold_update = false;
+
+ if (!tm_capabilities.dynamic_wred_update)
+ dynamic_wred_update = false;
+
return 0;
skip_tests:
/* Mark all tests as inactive under this suite */
@@ -2820,13 +2933,13 @@ static int test_sched_queue_priority(const char *shaper_name,
/* Send the low priority dummy pkts first. The arrival order of
* these pkts will be ignored. */
- pkts_sent = send_pkts(tm_queues[NUM_PRIORITIES - 1], 4);
+ pkts_sent = send_pkts_multi(tm_queues[NUM_PRIORITIES - 1], 4);
/* Now send "num_pkts" first at the lowest priority, then "num_pkts"
* at the second lowest priority, etc until "num_pkts" are sent last
* at the highest priority. */
for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--)
- pkts_sent += send_pkts(tm_queues[priority], num_pkts);
+ pkts_sent += send_pkts_multi(tm_queues[priority], num_pkts);
busy_wait(100 * ODP_TIME_MSEC_IN_NS);
@@ -4108,6 +4221,17 @@ static void traffic_mngr_test_scheduler(void)
INCREASING_WEIGHTS) == 0);
}
+static int traffic_mngr_check_thresholds(void)
+{
+ /* Check only for tm queue threshold support as
+ * we only test queue threshold.
+ */
+ if (!tm_capabilities.tm_queue_threshold)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
static void traffic_mngr_test_thresholds(void)
{
CU_ASSERT(test_threshold("thresh_A", "shaper_A", "node_1_2_1", 0,
@@ -4116,14 +4240,86 @@ static void traffic_mngr_test_thresholds(void)
0, 6400) == 0);
}
-static void traffic_mngr_test_byte_wred(void)
+static int traffic_mngr_check_queue_stats(void)
{
- if (!tm_capabilities.tm_queue_wred_supported) {
- ODPH_DBG("\nwas not run because tm_capabilities indicates"
- " no WRED support\n");
- return;
- }
+ if (tm_capabilities.queue_stats.all_counters == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+static void traffic_mngr_test_queue_stats(void)
+{
+ odp_tm_queue_stats_t stats_start, stats_stop;
+ odp_tm_queue_t tm_queue;
+ odp_tm_capabilities_t capa;
+ pkt_info_t pkt_info;
+ uint32_t pkts_sent;
+ uint32_t num_pkts = MIN(50, MAX_PKTS);
+ uint32_t pkt_len = 256;
+
+ CU_ASSERT_FATAL(odp_tm_capability(odp_tm_systems[0], &capa) == 0);
+
+ /* Reuse threshold test node */
+ tm_queue = find_tm_queue(0, "node_1_2_1", 0);
+ CU_ASSERT_FATAL(tm_queue != ODP_TM_INVALID);
+
+ init_xmt_pkts(&pkt_info);
+ pkt_info.drop_eligible = false;
+ pkt_info.pkt_class = 1;
+ CU_ASSERT_FATAL(make_pkts(num_pkts, pkt_len, &pkt_info) == 0);
+
+ CU_ASSERT(odp_tm_queue_stats(tm_queue, &stats_start) == 0);
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ 1 * GBPS);
+
+ CU_ASSERT(odp_tm_queue_stats(tm_queue, &stats_stop) == 0);
+
+ if (capa.queue_stats.counter.packets)
+ CU_ASSERT(stats_stop.packets >= stats_start.packets + num_rcv_pkts);
+ if (capa.queue_stats.counter.octets)
+ CU_ASSERT(stats_stop.octets >= stats_start.octets + (num_rcv_pkts * pkt_len));
+ CU_ASSERT((stats_stop.discards - stats_start.discards) == 0);
+ CU_ASSERT((stats_stop.discard_octets - stats_start.discard_octets) == 0);
+ CU_ASSERT((stats_stop.errors - stats_start.errors) == 0);
+
+ printf("\nTM queue statistics\n-------------------\n");
+ printf(" discards: %" PRIu64 "\n", stats_stop.discards);
+ printf(" discard octets: %" PRIu64 "\n", stats_stop.discard_octets);
+ printf(" errors: %" PRIu64 "\n", stats_stop.errors);
+ printf(" octets: %" PRIu64 "\n", stats_stop.octets);
+ printf(" packets: %" PRIu64 "\n", stats_stop.packets);
+
+ /* Check that all unsupported counters are still zero */
+ if (!capa.queue_stats.counter.discards)
+ CU_ASSERT(stats_stop.discards == 0);
+ if (!capa.queue_stats.counter.discard_octets)
+ CU_ASSERT(stats_stop.discard_octets == 0);
+ if (!capa.queue_stats.counter.errors)
+ CU_ASSERT(stats_stop.errors == 0);
+ if (!capa.queue_stats.counter.octets)
+ CU_ASSERT(stats_stop.octets == 0);
+ if (!capa.queue_stats.counter.packets)
+ CU_ASSERT(stats_stop.packets == 0);
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+}
+
+static int traffic_mngr_check_wred(void)
+{
+ /* Check if wred is part of created odp_tm_t capabilities */
+ if (!tm_capabilities.tm_queue_wred_supported)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void traffic_mngr_test_byte_wred(void)
+{
CU_ASSERT(test_byte_wred("byte_wred_30G", "byte_bw_30G",
"byte_thresh_30G", "node_1_3_1", 1,
ODP_PACKET_GREEN, TM_PERCENT(30), true) == 0);
@@ -4143,12 +4339,6 @@ static void traffic_mngr_test_pkt_wred(void)
{
int rc;
- if (!tm_capabilities.tm_queue_wred_supported) {
- ODPH_DBG("\ntest_pkt_wred was not run because tm_capabilities "
- "indicates no WRED support\n");
- return;
- }
-
rc = test_pkt_wred("pkt_wred_40G", "pkt_bw_40G",
"pkt_thresh_40G", "node_1_3_2", 1,
ODP_PACKET_GREEN, TM_PERCENT(30), false);
@@ -4180,59 +4370,75 @@ static void traffic_mngr_test_pkt_wred(void)
CU_FAIL("70Y test failed\n");
}
+static int traffic_mngr_check_query(void)
+{
+ uint32_t query_flags = (ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT);
+
+ /* We need both pkt count and byte count query support */
+ if ((tm_capabilities.tm_queue_query_flags & query_flags) != query_flags)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
static void traffic_mngr_test_query(void)
{
CU_ASSERT(test_query_functions("query_shaper", "node_1_3_3", 3, 10)
== 0);
}
-static void traffic_mngr_test_marking(void)
+static int traffic_mngr_check_vlan_marking(void)
{
- odp_packet_color_t color;
- odp_bool_t test_ecn, test_drop_prec;
- int rc;
+ if (!tm_capabilities.vlan_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
- if (tm_capabilities.vlan_marking_supported) {
- for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
- rc = test_vlan_marking("node_1_3_1", color);
- CU_ASSERT(rc == 0);
- }
- } else {
- ODPH_DBG("\ntest_vlan_marking was not run because "
- "tm_capabilities indicates no vlan marking support\n");
- }
+static int traffic_mngr_check_ecn_marking(void)
+{
+ if (!tm_capabilities.ecn_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
- if (tm_capabilities.ecn_marking_supported) {
- test_ecn = true;
- test_drop_prec = false;
+static int traffic_mngr_check_drop_prec_marking(void)
+{
+ if (!tm_capabilities.drop_prec_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
- rc = ip_marking_tests("node_1_3_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- } else {
- ODPH_DBG("\necn_marking tests were not run because "
- "tm_capabilities indicates no ecn marking support\n");
- }
+static int traffic_mngr_check_ecn_drop_prec_marking(void)
+{
+ if (!tm_capabilities.ecn_marking_supported ||
+ !tm_capabilities.drop_prec_marking_supported)
+ return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
+}
- if (tm_capabilities.drop_prec_marking_supported) {
- test_ecn = false;
- test_drop_prec = true;
+static void traffic_mngr_test_vlan_marking(void)
+{
+ odp_packet_color_t color;
- rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- } else {
- ODPH_DBG("\ndrop_prec marking tests were not run because "
- "tm_capabilities indicates no drop precedence "
- "marking support\n");
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ /* Tree is 3 level */
+ CU_ASSERT(test_vlan_marking("node_1_3_1", color) == 0);
}
+}
- if (tm_capabilities.ecn_marking_supported &&
- tm_capabilities.drop_prec_marking_supported) {
- test_ecn = true;
- test_drop_prec = true;
+static void traffic_mngr_test_ecn_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_3_2", true, false) == 0);
+}
- rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
- CU_ASSERT(rc == 0);
- }
+static void traffic_mngr_test_drop_prec_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_4_2", false, true) == 0);
+}
+
+static void traffic_mngr_test_ecn_drop_prec_marking(void)
+{
+ CU_ASSERT(ip_marking_tests("node_1_4_2", true, true) == 0);
}
static void traffic_mngr_test_fanin_info(void)
@@ -4252,17 +4458,32 @@ odp_testinfo_t traffic_mngr_suite[] = {
ODP_TEST_INFO(traffic_mngr_test_tm_create),
ODP_TEST_INFO(traffic_mngr_test_shaper_profile),
ODP_TEST_INFO(traffic_mngr_test_sched_profile),
- ODP_TEST_INFO(traffic_mngr_test_threshold_profile),
- ODP_TEST_INFO(traffic_mngr_test_wred_profile),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_threshold_profile,
+ traffic_mngr_check_thresholds),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_wred_profile,
+ traffic_mngr_check_wred),
ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_shaper,
traffic_mngr_check_shaper),
ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_scheduler,
traffic_mngr_check_scheduler),
- ODP_TEST_INFO(traffic_mngr_test_thresholds),
- ODP_TEST_INFO(traffic_mngr_test_byte_wred),
- ODP_TEST_INFO(traffic_mngr_test_pkt_wred),
- ODP_TEST_INFO(traffic_mngr_test_query),
- ODP_TEST_INFO(traffic_mngr_test_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_thresholds,
+ traffic_mngr_check_thresholds),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_byte_wred,
+ traffic_mngr_check_wred),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_pkt_wred,
+ traffic_mngr_check_wred),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_query,
+ traffic_mngr_check_query),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_queue_stats,
+ traffic_mngr_check_queue_stats),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_vlan_marking,
+ traffic_mngr_check_vlan_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_ecn_marking,
+ traffic_mngr_check_ecn_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_drop_prec_marking,
+ traffic_mngr_check_drop_prec_marking),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_ecn_drop_prec_marking,
+ traffic_mngr_check_ecn_drop_prec_marking),
ODP_TEST_INFO(traffic_mngr_test_fanin_info),
ODP_TEST_INFO(traffic_mngr_test_destroy),
ODP_TEST_INFO_NULL,