aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2021-01-15 10:45:42 +0200
committerGitHub <noreply@github.com>2021-01-15 10:45:42 +0200
commit0ecc932423ba63ef10239f64f1b2cde16e155732 (patch)
treeabbee3e3f4ba6fdda18aa2d5eed9eadb0d4db51e
parent041cfc475dcd48de182e8225aaec76ed55257fd7 (diff)
parent8bdd456481101189ba80e18b21b4e7a4295cec70 (diff)
Merge ODP linux-generic v1.25.2.0v1.25.2.0_DPDK_19.11
Merge ODP linux-generic v1.25.2.0 into ODP-DPDK.
-rw-r--r--.github/workflows/ci-pipeline.yml2
-rw-r--r--configure.ac4
-rw-r--r--example/classifier/.gitignore1
-rw-r--r--example/classifier/Makefile.am29
-rw-r--r--example/classifier/odp_classifier.c95
-rwxr-xr-xexample/classifier/odp_classifier_run.sh34
-rw-r--r--example/classifier/udp64.pcapbin0 -> 18544 bytes
-rw-r--r--example/generator/.gitignore1
-rw-r--r--example/generator/Makefile.am7
-rwxr-xr-xexample/generator/generator_null_test.sh24
-rwxr-xr-xexample/generator/generator_run.sh31
-rw-r--r--example/generator/odp_generator.c11
-rw-r--r--example/packet/odp_packet_dump.c8
-rw-r--r--example/sysinfo/odp_sysinfo.c154
-rw-r--r--example/timer/odp_timer_accuracy.c83
-rw-r--r--example/timer/odp_timer_simple.c1
-rw-r--r--helper/Makefile.am2
-rw-r--r--helper/include/odp/helper/gtp.h38
-rw-r--r--helper/include/odp/helper/igmp.h39
-rw-r--r--helper/include/odp/helper/ip.h1
-rw-r--r--helper/include/odp/helper/odph_api.h2
-rw-r--r--include/Makefile.am2
-rw-r--r--include/odp/api/abi-default/event.h11
-rw-r--r--include/odp/api/abi-default/packet.h5
-rw-r--r--include/odp/api/protocols.h26
-rw-r--r--include/odp/api/spec/buffer.h14
-rw-r--r--include/odp/api/spec/classification.h44
-rw-r--r--include/odp/api/spec/event.h16
-rw-r--r--include/odp/api/spec/ipsec.h175
-rw-r--r--include/odp/api/spec/packet.h193
-rw-r--r--include/odp/api/spec/packet_flags.h6
-rw-r--r--include/odp/api/spec/packet_io.h83
-rw-r--r--include/odp/api/spec/pool.h189
-rw-r--r--include/odp/api/spec/protocols.h44
-rw-r--r--include/odp/api/spec/timer.h21
-rw-r--r--platform/linux-dpdk/Makefile.am6
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/packet.h5
l---------platform/linux-dpdk/include/event_vector_internal.h1
l---------platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h7
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inlines.h6
l---------platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h6
-rw-r--r--platform/linux-dpdk/include/odp_buffer_internal.h6
-rw-r--r--platform/linux-dpdk/include/odp_config_internal.h3
-rw-r--r--platform/linux-dpdk/include/odp_eventdev_internal.h6
-rw-r--r--platform/linux-dpdk/include/odp_packet_internal.h22
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h24
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h9
-rw-r--r--platform/linux-dpdk/include/odp_queue_basic_internal.h14
-rw-r--r--platform/linux-dpdk/m4/configure.m42
-rw-r--r--platform/linux-dpdk/odp_buffer.c51
-rw-r--r--platform/linux-dpdk/odp_init.c8
-rw-r--r--platform/linux-dpdk/odp_packet.c48
-rw-r--r--platform/linux-dpdk/odp_packet_dpdk.c24
-rw-r--r--platform/linux-dpdk/odp_pool.c242
-rw-r--r--platform/linux-dpdk/odp_queue_basic.c88
-rw-r--r--platform/linux-dpdk/odp_queue_eventdev.c132
-rw-r--r--platform/linux-dpdk/odp_queue_if.c10
-rw-r--r--platform/linux-dpdk/odp_queue_spsc.c2
-rw-r--r--platform/linux-dpdk/odp_schedule_eventdev.c267
-rw-r--r--platform/linux-dpdk/odp_schedule_if.c90
-rw-r--r--platform/linux-dpdk/odp_system_info.c5
-rw-r--r--platform/linux-dpdk/odp_thread.c20
-rw-r--r--platform/linux-dpdk/odp_timer.c6
-rw-r--r--platform/linux-dpdk/test/example/Makefile.am2
-rw-r--r--platform/linux-dpdk/test/example/classifier/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/classifier/pktio_env47
-rw-r--r--platform/linux-dpdk/test/example/generator/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/generator/pktio_env34
-rw-r--r--platform/linux-dpdk/test/example/switch/pktio_env1
-rw-r--r--platform/linux-generic/Makefile.am6
-rw-r--r--platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c45
-rw-r--r--platform/linux-generic/arch/arm/odp_sysinfo_parse.c4
-rw-r--r--platform/linux-generic/arch/default/odp_sysinfo_parse.c4
-rw-r--r--platform/linux-generic/arch/mips64/odp_sysinfo_parse.c4
-rw-r--r--platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c4
-rw-r--r--platform/linux-generic/arch/x86/cpu_flags.c4
-rw-r--r--platform/linux-generic/arch/x86/cpu_flags.h4
-rw-r--r--platform/linux-generic/arch/x86/odp_cpu_cycles.c2
-rw-r--r--platform/linux-generic/arch/x86/odp_sysinfo_parse.c6
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event.h9
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet.h5
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h37
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h7
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h83
-rw-r--r--platform/linux-generic/include/odp_bitmap_internal.h315
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h1
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h9
-rw-r--r--platform/linux-generic/include/odp_classification_internal.h10
-rw-r--r--platform/linux-generic/include/odp_config_internal.h6
-rw-r--r--platform/linux-generic/include/odp_ethtool_rss.h14
-rw-r--r--platform/linux-generic/include/odp_ethtool_stats.h2
-rw-r--r--platform/linux-generic/include/odp_event_vector_internal.h56
-rw-r--r--platform/linux-generic/include/odp_ipsec_internal.h31
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h18
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h30
-rw-r--r--platform/linux-generic/include/odp_packet_io_stats.h10
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h10
-rw-r--r--platform/linux-generic/include/odp_queue_basic_internal.h14
-rw-r--r--platform/linux-generic/include/odp_queue_if.h2
-rw-r--r--platform/linux-generic/include/odp_queue_lf.h14
-rw-r--r--platform/linux-generic/include/odp_queue_scalable_internal.h4
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h11
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h10
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable.h8
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_ordered.h20
-rw-r--r--platform/linux-generic/include/odp_socket_common.h14
-rw-r--r--platform/linux-generic/include/odp_sysfs_stats.h4
-rw-r--r--platform/linux-generic/include/odp_sysinfo_internal.h4
-rw-r--r--platform/linux-generic/m4/configure.m42
-rw-r--r--platform/linux-generic/odp_bitmap.c315
-rw-r--r--platform/linux-generic/odp_buffer.c38
-rw-r--r--platform/linux-generic/odp_classification.c50
-rw-r--r--platform/linux-generic/odp_event.c37
-rw-r--r--platform/linux-generic/odp_init.c8
-rw-r--r--platform/linux-generic/odp_ipsec.c312
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c156
-rw-r--r--platform/linux-generic/odp_packet.c52
-rw-r--r--platform/linux-generic/odp_packet_api.c1
-rw-r--r--platform/linux-generic/odp_packet_io.c402
-rw-r--r--platform/linux-generic/odp_packet_vector.c133
-rw-r--r--platform/linux-generic/odp_pool.c285
-rw-r--r--platform/linux-generic/odp_queue_basic.c102
-rw-r--r--platform/linux-generic/odp_queue_if.c22
-rw-r--r--platform/linux-generic/odp_queue_lf.c14
-rw-r--r--platform/linux-generic/odp_queue_scalable.c56
-rw-r--r--platform/linux-generic/odp_queue_spsc.c6
-rw-r--r--platform/linux-generic/odp_schedule_basic.c27
-rw-r--r--platform/linux-generic/odp_schedule_if.c90
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c95
-rw-r--r--platform/linux-generic/odp_schedule_scalable_ordered.c36
-rw-r--r--platform/linux-generic/odp_schedule_sp.c50
-rw-r--r--platform/linux-generic/odp_system_info.c5
-rw-r--r--platform/linux-generic/odp_thread.c20
-rw-r--r--platform/linux-generic/odp_timer.c6
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c8
-rw-r--r--platform/linux-generic/pktio/dpdk.c22
-rw-r--r--platform/linux-generic/pktio/dpdk_parse.c4
-rw-r--r--platform/linux-generic/pktio/ethtool_rss.c16
-rw-r--r--platform/linux-generic/pktio/io_ops.c14
-rw-r--r--platform/linux-generic/pktio/ipc.c2
-rw-r--r--platform/linux-generic/pktio/loop.c14
-rw-r--r--platform/linux-generic/pktio/netmap.c62
-rw-r--r--platform/linux-generic/pktio/null.c2
-rw-r--r--platform/linux-generic/pktio/pcap.c33
-rw-r--r--platform/linux-generic/pktio/pktio_common.c8
-rw-r--r--platform/linux-generic/pktio/socket.c42
-rw-r--r--platform/linux-generic/pktio/socket_common.c16
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c54
-rw-r--r--platform/linux-generic/pktio/stats/ethtool_stats.c2
-rw-r--r--platform/linux-generic/pktio/stats/packet_io_stats.c30
-rw-r--r--platform/linux-generic/pktio/stats/sysfs_stats.c4
-rw-r--r--platform/linux-generic/pktio/tap.c36
-rw-r--r--platform/linux-generic/test/Makefile.am16
-rw-r--r--platform/linux-generic/test/example/Makefile.am2
-rw-r--r--platform/linux-generic/test/example/classifier/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/classifier/pktio_env44
-rw-r--r--platform/linux-generic/test/example/generator/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/generator/pktio_env34
-rw-r--r--platform/linux-generic/test/example/switch/pktio_env1
-rwxr-xr-xscripts/ci/build_riscv64.sh32
-rw-r--r--test/common/test_packet_parser.h13
-rw-r--r--test/performance/Makefile.am3
-rw-r--r--test/performance/odp_l2fwd.c402
-rw-r--r--test/performance/odp_sched_latency.c4
-rw-r--r--test/performance/odp_sched_perf.c2
-rwxr-xr-xtest/performance/odp_scheduling_run.sh6
-rw-r--r--test/performance/odp_timer_perf.c506
-rwxr-xr-xtest/performance/odp_timer_perf_run.sh33
-rw-r--r--test/validation/api/atomic/atomic.c64
-rw-r--r--test/validation/api/classification/classification.c5
-rw-r--r--test/validation/api/classification/classification.h1
-rw-r--r--test/validation/api/classification/odp_classification_common.c211
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c1446
-rw-r--r--test/validation/api/classification/odp_classification_tests.c252
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h42
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c1
-rw-r--r--test/validation/api/event/event.c12
-rw-r--r--test/validation/api/ipsec/ipsec.c211
-rw-r--r--test/validation/api/ipsec/ipsec.h35
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c455
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c572
-rw-r--r--test/validation/api/ipsec/test_vectors.h138
-rw-r--r--test/validation/api/packet/packet.c448
-rw-r--r--test/validation/api/pktio/pktio.c487
-rw-r--r--test/validation/api/pool/pool.c462
-rw-r--r--test/validation/api/scheduler/scheduler.c117
-rw-r--r--test/validation/api/timer/timer.c217
-rw-r--r--test/validation/api/traffic_mngr/traffic_mngr.c4
190 files changed, 8737 insertions, 3667 deletions
diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml
index b47528977..596e469c5 100644
--- a/.github/workflows/ci-pipeline.yml
+++ b/.github/workflows/ci-pipeline.yml
@@ -25,7 +25,7 @@ jobs:
uses: webispy/checkpatch-action@v7
- name: Check push
- if: github.event_name == 'push'
+ if: github.event_name == 'push' && github.ref != 'refs/heads/master'
run: |
AFTER=${{ github.event.after }}
BEFORE=${{ github.event.before }}
diff --git a/configure.ac b/configure.ac
index 403e03c40..123b1431b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@ AC_PREREQ([2.5])
##########################################################################
m4_define([odpapi_generation_version], [1])
m4_define([odpapi_major_version], [25])
-m4_define([odpapi_minor_version], [0])
+m4_define([odpapi_minor_version], [2])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
@@ -22,7 +22,7 @@ AC_SUBST(ODP_VERSION_API_MINOR)
##########################################################################
m4_define([odph_version_generation], [1])
m4_define([odph_version_major], [0])
-m4_define([odph_version_minor], [3])
+m4_define([odph_version_minor], [4])
m4_define([odph_version],
[odph_version_generation.odph_version_major.odph_version_minor])
diff --git a/example/classifier/.gitignore b/example/classifier/.gitignore
index a356d48da..9156628bb 100644
--- a/example/classifier/.gitignore
+++ b/example/classifier/.gitignore
@@ -1 +1,2 @@
odp_classifier
+pktio_env
diff --git a/example/classifier/Makefile.am b/example/classifier/Makefile.am
index a0003e9a4..4be6648b6 100644
--- a/example/classifier/Makefile.am
+++ b/example/classifier/Makefile.am
@@ -3,3 +3,32 @@ include $(top_srcdir)/example/Makefile.inc
bin_PROGRAMS = odp_classifier
odp_classifier_SOURCES = odp_classifier.c
+
+if test_example
+if ODP_PKTIO_PCAP
+TESTS = odp_classifier_run.sh
+endif
+endif
+EXTRA_DIST = odp_classifier_run.sh udp64.pcap
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+ ln -f -s $(top_srcdir)/platform/$(with_platform)/test/example/classifier/pktio_env \
+ pktio_env
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/example/classifier/odp_classifier.c b/example/classifier/odp_classifier.c
index 3cdd116fc..859802358 100644
--- a/example/classifier/odp_classifier.c
+++ b/example/classifier/odp_classifier.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2015-2018, Linaro Limited
* Copyright (c) 2019-2020, Nokia
+ * Copyright (C) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -74,8 +75,15 @@ typedef struct {
} global_statistics;
typedef struct {
+ char cos_name[ODP_COS_NAME_LEN];
+ uint64_t count;
+} ci_pass_counters;
+
+typedef struct {
global_statistics stats[MAX_PMR_COUNT];
+ ci_pass_counters ci_pass_rules[MAX_PMR_COUNT];
int policy_count; /**< global policy count */
+ int num_ci_pass_rules; /**< ci pass count */
int appl_mode; /**< application mode */
odp_atomic_u64_t total_packets; /**< total received packets */
unsigned int cpu_count; /**< Number of CPUs to use */
@@ -100,6 +108,38 @@ static int parse_args(int argc, char *argv[], appl_args_t *appl_args);
static void print_info(char *progname, appl_args_t *appl_args);
static void usage(void);
+static inline int check_ci_pass_count(appl_args_t *args)
+{
+ int i, j;
+ uint64_t count;
+
+ if (args->num_ci_pass_rules == 0)
+ return 0;
+
+ for (i = 0; i < args->num_ci_pass_rules; i++) {
+ for (j = 0; j < args->policy_count; j++) {
+ if (!strcmp(args->stats[j].cos_name,
+ args->ci_pass_rules[i].cos_name)) {
+ count = odp_atomic_load_u64(&args->stats[i].queue_pkt_count);
+ if (args->ci_pass_rules[i].count > count) {
+ ODPH_ERR("Error: Cos = %s, expected packets = %" PRIu64 ","
+ "received packet = %" PRIu64 "\n",
+ args->stats[j].cos_name,
+ args->ci_pass_rules[i].count, count);
+ return -1;
+ }
+ break;
+ }
+ }
+ if (j == args->policy_count) {
+ ODPH_ERR("Error: invalid Cos:%s specified for CI pass count\n",
+ args->ci_pass_rules[i].cos_name);
+ return -1;
+ }
+ }
+ return 0;
+}
+
static inline void print_cls_statistics(appl_args_t *args)
{
int i;
@@ -670,6 +710,11 @@ int main(int argc, char *argv[])
args->shutdown = 1;
odph_thread_join(thread_tbl, num_workers);
+ if (check_ci_pass_count(args)) {
+ ODPH_ERR("Error: Packet count verification failed\n");
+ exit(EXIT_FAILURE);
+ }
+
for (i = 0; i < args->policy_count; i++) {
if ((i != args->policy_count - 1) &&
odp_cls_pmr_destroy(args->stats[i].pmr))
@@ -1006,6 +1051,42 @@ static int parse_pmr_policy(appl_args_t *appl_args, char *optarg)
return 0;
}
+static int parse_policy_ci_pass_count(appl_args_t *appl_args, char *optarg)
+{
+ int num_ci_pass_rules;
+ char *token, *value;
+ size_t len;
+ ci_pass_counters *ci_pass_rules;
+ char *count_str;
+
+ num_ci_pass_rules = appl_args->num_ci_pass_rules;
+ ci_pass_rules = appl_args->ci_pass_rules;
+
+ /* last array index is needed for default queue */
+ if (num_ci_pass_rules >= MAX_PMR_COUNT) {
+ ODPH_ERR("Too many ci pass counters. Max count is %i.\n",
+ MAX_PMR_COUNT);
+ return -1;
+ }
+
+ len = strlen(optarg);
+ len++;
+ count_str = malloc(len);
+ strcpy(count_str, optarg);
+
+ token = strtok(count_str, ":");
+ value = strtok(NULL, ":");
+ if (!token || !value) {
+ free(count_str);
+ return -1;
+ }
+ strcpy(ci_pass_rules[num_ci_pass_rules].cos_name, token);
+ ci_pass_rules[num_ci_pass_rules].count = atoll(value);
+ appl_args->num_ci_pass_rules++;
+ free(count_str);
+ return 0;
+}
+
/**
* Parse and store the command line arguments
*
@@ -1029,13 +1110,14 @@ static int parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"policy", required_argument, NULL, 'p'},
{"mode", required_argument, NULL, 'm'},
{"time", required_argument, NULL, 't'},
+ {"ci_pass", required_argument, NULL, 'C'},
{"promisc_mode", no_argument, NULL, 'P'},
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:i:p:m:t:Pvh";
+ static const char *shortopts = "+c:t:i:p:m:t:C:Pvh";
appl_args->cpu_count = 1; /* Use one worker by default */
appl_args->verbose = 0;
@@ -1086,6 +1168,12 @@ static int parse_args(int argc, char *argv[], appl_args_t *appl_args)
else
appl_args->appl_mode = APPL_MODE_REPLY;
break;
+ case 'C':
+ if (parse_policy_ci_pass_count(appl_args, optarg)) {
+ ret = -1;
+ break;
+ }
+ break;
case 'P':
appl_args->promisc_mode = 1;
break;
@@ -1176,6 +1264,11 @@ static void usage(void)
" 0: Runs in infinite loop\n"
" default: Runs in infinite loop\n"
"\n"
+ " -C, --ci_pass <dst queue:count>\n"
+ " Minimum acceptable packet count for a CoS destination queue.\n"
+ " If the received packet count is smaller than this value,\n"
+ " the application will exit with an error.\n"
+ " E.g: -C \"queue1:100\" -C \"queue2:200\" -C \"DefaultQueue:100\"\n"
" -P, --promisc_mode Enable promiscuous mode.\n"
" -v, --verbose Verbose output.\n"
" -h, --help Display help and exit.\n"
diff --git a/example/classifier/odp_classifier_run.sh b/example/classifier/odp_classifier_run.sh
new file mode 100755
index 000000000..2b5fe7ebb
--- /dev/null
+++ b/example/classifier/odp_classifier_run.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+if [ -f ./pktio_env ]; then
+ . ./pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory"
+ exit 1
+fi
+
+setup_interfaces
+
+./odp_classifier${EXEEXT} -t $TIME_OUT_VAL -i $IF0 -m 0 -p \
+ "ODP_PMR_SIP_ADDR:10.10.10.0:0xFFFFFF00:queue1" -P -C "queue1:${CPASS_COUNT_ARG1}" \
+ -C "DefaultCos:${CPASS_COUNT_ARG2}"
+
+STATUS=$?
+
+if [ ${STATUS} -ne 0 ]; then
+ echo "Error: status ${STATUS}"
+ exit 1
+fi
+
+validate_result
+
+cleanup_interfaces
+
+exit 0
diff --git a/example/classifier/udp64.pcap b/example/classifier/udp64.pcap
new file mode 100644
index 000000000..fb05ef0f7
--- /dev/null
+++ b/example/classifier/udp64.pcap
Binary files differ
diff --git a/example/generator/.gitignore b/example/generator/.gitignore
index 85aa1d1ec..37364a254 100644
--- a/example/generator/.gitignore
+++ b/example/generator/.gitignore
@@ -1 +1,2 @@
odp_generator
+pktio_env
diff --git a/example/generator/Makefile.am b/example/generator/Makefile.am
index fd32949e2..c3c08a663 100644
--- a/example/generator/Makefile.am
+++ b/example/generator/Makefile.am
@@ -7,10 +7,9 @@ odp_generator_SOURCES = odp_generator.c
TEST_EXTENSIONS = .sh
if test_example
-TESTS = generator_null_test.sh
-TESTS_ENVIRONMENT += ODP_PLATFORM=$(with_platform)
+TESTS = generator_run.sh
endif
-EXTRA_DIST = generator_null_test.sh
+EXTRA_DIST = generator_run.sh
# If building out-of-tree, make check will not copy the scripts and data to the
# $(builddir) assuming that all commands are run locally. However this prevents
@@ -25,6 +24,8 @@ all-local:
fi \
done \
fi
+ ln -f -s $(top_srcdir)/platform/$(with_platform)/test/example/generator/pktio_env \
+ pktio_env
clean-local:
if [ "x$(srcdir)" != "x$(builddir)" ]; then \
for f in $(EXTRA_DIST); do \
diff --git a/example/generator/generator_null_test.sh b/example/generator/generator_null_test.sh
deleted file mode 100755
index 3c37a99d2..000000000
--- a/example/generator/generator_null_test.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-if [ -n "${ODP_PLATFORM}" -a "x${ODP_PLATFORM}" != "xlinux-generic" ] &&
- [ -n "${ODP_PLATFORM}" -a "x${ODP_PLATFORM}" != "xlinux-dpdk" ]
-then
- echo "null pktio might be unsupported on this platform, skipping"
- exit 77
-fi
-
-./odp_generator${EXEEXT} -w 1 -n 1 -I null:0 -m u
-STATUS=$?
-
-if [ "$STATUS" -ne 0 ]; then
- echo "Error: status was: $STATUS, expected 0"
- exit 1
-fi
-
-exit 0
diff --git a/example/generator/generator_run.sh b/example/generator/generator_run.sh
new file mode 100755
index 000000000..528c1b595
--- /dev/null
+++ b/example/generator/generator_run.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+if [ -f ./pktio_env ]; then
+ . ./pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory"
+ exit 1
+fi
+
+setup_interfaces
+
+./odp_generator${EXEEXT} -w 1 -n 1 -I $IF0 -m u
+STATUS=$?
+
+if [ "$STATUS" -ne 0 ]; then
+ echo "Error: status was: $STATUS, expected 0"
+ exit 1
+fi
+
+validate_result
+
+cleanup_interfaces
+
+exit 0
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c
index a7043fefa..469314718 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -186,14 +186,14 @@ static void sig_handler(int signo ODP_UNUSED)
*/
static int scan_ip(char *buf, unsigned int *paddr)
{
- int part1, part2, part3, part4;
+ unsigned int part1, part2, part3, part4;
char tail = 0;
int field;
if (buf == NULL)
return 0;
- field = sscanf(buf, "%d . %d . %d . %d %c",
+ field = sscanf(buf, "%u . %u . %u . %u %c",
&part1, &part2, &part3, &part4, &tail);
if (field < 4 || field > 5) {
@@ -206,15 +206,14 @@ static int scan_ip(char *buf, unsigned int *paddr)
return 0;
}
- if ((part1 >= 0 && part1 <= 255) && (part2 >= 0 && part2 <= 255) &&
- (part3 >= 0 && part3 <= 255) && (part4 >= 0 && part4 <= 255)) {
+ if (part1 <= 255 && part2 <= 255 && part3 <= 255 && part4 <= 255) {
if (paddr)
*paddr = part1 << 24 | part2 << 16 | part3 << 8 | part4;
return 1;
- } else {
- printf("not good ip %d:%d:%d:%d/n", part1, part2, part3, part4);
}
+ printf("not good ip %u:%u:%u:%u/n", part1, part2, part3, part4);
+
return 0;
}
diff --git a/example/packet/odp_packet_dump.c b/example/packet/odp_packet_dump.c
index 04d6576c5..96b8019a6 100644
--- a/example/packet/odp_packet_dump.c
+++ b/example/packet/odp_packet_dump.c
@@ -492,7 +492,10 @@ static int print_packet(test_global_t *global, odp_packet_t pkt,
nsec = nsec - (sec * ODP_TIME_SEC_IN_NS);
pktio = odp_packet_input(pkt);
- odp_pktio_info(pktio, &pktio_info);
+ if (odp_pktio_info(pktio, &pktio_info)) {
+ printf("Error: odp_pktio_info() failed\n");
+ return -1;
+ }
printf("PACKET [%" PRIu64 "]\n", num_packet);
printf(" time: %" PRIu64 ".%09" PRIu64 " sec\n", sec, nsec);
@@ -653,6 +656,9 @@ static int receive_packets(test_global_t *global)
odp_packet_free(pkt);
+ if (odp_unlikely(printed < 0))
+ return -1;
+
if (!printed)
continue;
diff --git a/example/sysinfo/odp_sysinfo.c b/example/sysinfo/odp_sysinfo.c
index 00a32f658..c28fd065c 100644
--- a/example/sysinfo/odp_sysinfo.c
+++ b/example/sysinfo/odp_sysinfo.c
@@ -25,6 +25,90 @@ static const char *support_level(odp_support_t support)
}
}
+static const char *cpu_arch_name(odp_system_info_t *sysinfo)
+{
+ odp_cpu_arch_t cpu_arch = sysinfo->cpu_arch;
+
+ switch (cpu_arch) {
+ case ODP_CPU_ARCH_ARM:
+ return "ARM";
+ case ODP_CPU_ARCH_MIPS:
+ return "MIPS";
+ case ODP_CPU_ARCH_PPC:
+ return "PPC";
+ case ODP_CPU_ARCH_RISCV:
+ return "RISC-V";
+ case ODP_CPU_ARCH_X86:
+ return "x86";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *arm_isa(odp_cpu_arch_arm_t isa)
+{
+ switch (isa) {
+ case ODP_CPU_ARCH_ARMV6:
+ return "ARMv6";
+ case ODP_CPU_ARCH_ARMV7:
+ return "ARMv7-A";
+ case ODP_CPU_ARCH_ARMV8_0:
+ return "ARMv8.0-A";
+ case ODP_CPU_ARCH_ARMV8_1:
+ return "ARMv8.1-A";
+ case ODP_CPU_ARCH_ARMV8_2:
+ return "ARMv8.2-A";
+ case ODP_CPU_ARCH_ARMV8_3:
+ return "ARMv8.3-A";
+ case ODP_CPU_ARCH_ARMV8_4:
+ return "ARMv8.4-A";
+ case ODP_CPU_ARCH_ARMV8_5:
+ return "ARMv8.5-A";
+ case ODP_CPU_ARCH_ARMV8_6:
+ return "ARMv8.6-A";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *x86_isa(odp_cpu_arch_x86_t isa)
+{
+ switch (isa) {
+ case ODP_CPU_ARCH_X86_I686:
+ return "x86_i686";
+ case ODP_CPU_ARCH_X86_64:
+ return "x86_64";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *cpu_arch_isa(odp_system_info_t *sysinfo, int isa_sw)
+{
+ odp_cpu_arch_t cpu_arch = sysinfo->cpu_arch;
+
+ switch (cpu_arch) {
+ case ODP_CPU_ARCH_ARM:
+ if (isa_sw)
+ return arm_isa(sysinfo->cpu_isa_sw.arm);
+ else
+ return arm_isa(sysinfo->cpu_isa_hw.arm);
+ case ODP_CPU_ARCH_MIPS:
+ return "Unknown";
+ case ODP_CPU_ARCH_PPC:
+ return "Unknown";
+ case ODP_CPU_ARCH_RISCV:
+ return "Unknown";
+ case ODP_CPU_ARCH_X86:
+ if (isa_sw)
+ return x86_isa(sysinfo->cpu_isa_sw.x86);
+ else
+ return x86_isa(sysinfo->cpu_isa_hw.x86);
+ default:
+ return "Unknown";
+ }
+}
+
static const char *cipher_alg_name(odp_cipher_alg_t cipher)
{
switch (cipher) {
@@ -233,17 +317,20 @@ int main(void)
int i, num_hp, num_hp_print;
int num_ava, num_work, num_ctrl;
odp_cpumask_t ava_mask, work_mask, ctrl_mask;
+ odp_system_info_t sysinfo;
odp_shm_capability_t shm_capa;
odp_pool_capability_t pool_capa;
odp_queue_capability_t queue_capa;
odp_timer_capability_t timer_capa;
odp_crypto_capability_t crypto_capa;
+ odp_ipsec_capability_t ipsec_capa;
odp_schedule_capability_t schedule_capa;
uint64_t huge_page[MAX_HUGE_PAGES];
char ava_mask_str[ODP_CPUMASK_STR_SIZE];
char work_mask_str[ODP_CPUMASK_STR_SIZE];
char ctrl_mask_str[ODP_CPUMASK_STR_SIZE];
int crypto_ret;
+ int ipsec_ret;
printf("\n");
printf("ODP system info example\n");
@@ -260,8 +347,21 @@ int main(void)
return -1;
}
+ printf("\n");
+ printf("odp_sys_info_print()\n");
+ printf("***********************************************************\n");
odp_sys_info_print();
+ printf("\n");
+ printf("odp_sys_config_print()\n");
+ printf("***********************************************************\n");
+ odp_sys_config_print();
+
+ if (odp_system_info(&sysinfo)) {
+ printf("system info call failed\n");
+ return -1;
+ }
+
memset(ava_mask_str, 0, ODP_CPUMASK_STR_SIZE);
num_ava = odp_cpumask_all_available(&ava_mask);
odp_cpumask_to_str(&ava_mask, ava_mask_str, ODP_CPUMASK_STR_SIZE);
@@ -309,6 +409,10 @@ int main(void)
if (crypto_ret < 0)
printf("crypto capability failed\n");
+ ipsec_ret = odp_ipsec_capability(&ipsec_capa);
+ if (ipsec_ret < 0)
+ printf("IPsec capability failed\n");
+
printf("\n");
printf("S Y S T E M I N F O R M A T I O N\n");
printf("***********************************************************\n");
@@ -317,6 +421,9 @@ int main(void)
printf(" ODP impl name: %s\n", odp_version_impl_name());
printf(" ODP impl details: %s\n", odp_version_impl_str());
printf(" CPU model: %s\n", odp_cpu_model_str());
+ printf(" CPU arch: %s\n", cpu_arch_name(&sysinfo));
+ printf(" CPU ISA version: %s\n", cpu_arch_isa(&sysinfo, 0));
+ printf(" SW ISA version: %s\n", cpu_arch_isa(&sysinfo, 1));
printf(" CPU max freq: %" PRIu64 " hz\n", odp_cpu_hz_max());
printf(" Current CPU: %i\n", odp_cpu_id());
printf(" Current CPU freq: %" PRIu64 " hz\n", odp_cpu_hz());
@@ -437,6 +544,53 @@ int main(void)
printf("\n");
}
+ if (ipsec_ret == 0) {
+ printf(" IPSEC\n");
+ printf(" max SAs: %" PRIu32 "\n",
+ ipsec_capa.max_num_sa);
+ printf(" sync mode support: %s\n",
+ support_level(ipsec_capa.op_mode_sync));
+ printf(" async mode support: %s\n",
+ support_level(ipsec_capa.op_mode_async));
+ printf(" inline inbound mode support: %s\n",
+ support_level(ipsec_capa.op_mode_inline_in));
+ printf(" inline outbound mode support: %s\n",
+ support_level(ipsec_capa.op_mode_inline_out));
+ printf(" AH support: %s\n",
+ support_level(ipsec_capa.proto_ah));
+ printf(" post-IPsec fragmentation: %s\n",
+ support_level(ipsec_capa.frag_after));
+ printf(" pre-IPsec fragmentation: %s\n",
+ support_level(ipsec_capa.frag_before));
+ printf(" post-IPsec classification: %s\n",
+ support_level(ipsec_capa.pipeline_cls));
+ printf(" retaining outer headers: %s\n",
+ support_level(ipsec_capa.retain_header));
+ printf(" inbound checksum offload support:\n");
+ printf(" IPv4 header checksum: %s\n",
+ support_level(ipsec_capa.chksums_in.chksum.ipv4));
+ printf(" UDP checksum: %s\n",
+ support_level(ipsec_capa.chksums_in.chksum.udp));
+ printf(" TCP checksum: %s\n",
+ support_level(ipsec_capa.chksums_in.chksum.tcp));
+ printf(" SCTP checksum: %s\n",
+ support_level(ipsec_capa.chksums_in.chksum.sctp));
+ printf(" max destination CoSes: %" PRIu32 "\n",
+ ipsec_capa.max_cls_cos);
+ printf(" max destination queues: %" PRIu32 "\n",
+ ipsec_capa.max_queues);
+ printf(" max anti-replay window size: %" PRIu32 "\n",
+ ipsec_capa.max_antireplay_ws);
+ printf(" inline TM pipelining: %s\n",
+ support_level(ipsec_capa.inline_ipsec_tm));
+ printf(" cipher algorithms: ");
+ print_cipher_algos(ipsec_capa.ciphers);
+ printf("\n");
+ printf(" auth algorithms: ");
+ print_auth_algos(ipsec_capa.auths);
+ printf("\n\n");
+ }
+
printf(" SHM MEMORY BLOCKS:\n");
odp_shm_print_all();
diff --git a/example/timer/odp_timer_accuracy.c b/example/timer/odp_timer_accuracy.c
index 7208f216c..cd790a181 100644
--- a/example/timer/odp_timer_accuracy.c
+++ b/example/timer/odp_timer_accuracy.c
@@ -49,13 +49,15 @@ typedef struct test_log_t {
typedef struct test_global_t {
struct {
- unsigned long long int period_ns;
- unsigned long long int res_ns;
- unsigned long long int offset_ns;
- unsigned long long int num;
- unsigned long long int burst;
- unsigned long long int burst_gap;
+ unsigned long long period_ns;
+ unsigned long long res_ns;
+ unsigned long long offset_ns;
+ unsigned long long max_tmo_ns;
+ unsigned long long num;
+ unsigned long long burst;
+ unsigned long long burst_gap;
int mode;
+ int clk_src;
int init;
int output;
int early_retry;
@@ -89,6 +91,7 @@ static void print_usage(void)
" -p, --period <nsec> Timeout period in nsec. Default: 200 msec\n"
" -r, --resolution <nsec> Timeout resolution in nsec. Default: period / 10\n"
" -f, --first <nsec> First timer offset in nsec. Default: 300 msec\n"
+ " -x, --max_tmo <nsec> Maximum timeout in nsec. When 0, max tmo is calculated from other options. Default: 0\n"
" -n, --num <number> Number of timeout periods. Default: 50\n"
" -b, --burst <number> Number of timers per a timeout period. Default: 1\n"
" -g, --burst_gap <nsec> Gap (in nsec) between timers within a burst. Default: 0\n"
@@ -99,6 +102,9 @@ static void print_usage(void)
" -o, --output <file> Output file for measurement logs\n"
" -e, --early_retry <num> When timer restart fails due to ODP_TIMER_TOOEARLY, retry this many times\n"
" with expiration time incremented by the period. Default: 0\n"
+ " -s, --clk_src Clock source select (default 0):\n"
+ " 0: ODP_CLOCK_CPU\n"
+ " 1: ODP_CLOCK_EXT\n"
" -i, --init Set global init parameters. Default: init params not set.\n"
" -h, --help Display help and exit.\n\n");
}
@@ -110,26 +116,30 @@ static int parse_options(int argc, char *argv[], test_global_t *test_global)
{"period", required_argument, NULL, 'p'},
{"resolution", required_argument, NULL, 'r'},
{"first", required_argument, NULL, 'f'},
+ {"max_tmo", required_argument, NULL, 'x'},
{"num", required_argument, NULL, 'n'},
{"burst", required_argument, NULL, 'b'},
{"burst_gap", required_argument, NULL, 'g'},
{"mode", required_argument, NULL, 'm'},
{"output", required_argument, NULL, 'o'},
{"early_retry", required_argument, NULL, 'e'},
+ {"clk_src", required_argument, NULL, 's'},
{"init", no_argument, NULL, 'i'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- const char *shortopts = "+p:r:f:n:b:g:m:o:e:ih";
+ const char *shortopts = "+p:r:f:x:n:b:g:m:o:e:s:ih";
int ret = 0;
test_global->opt.period_ns = 200 * ODP_TIME_MSEC_IN_NS;
test_global->opt.res_ns = 0;
test_global->opt.offset_ns = 300 * ODP_TIME_MSEC_IN_NS;
+ test_global->opt.max_tmo_ns = 0;
test_global->opt.num = 50;
test_global->opt.burst = 1;
test_global->opt.burst_gap = 0;
test_global->opt.mode = 0;
+ test_global->opt.clk_src = 0;
test_global->opt.init = 0;
test_global->opt.output = 0;
test_global->opt.early_retry = 0;
@@ -150,6 +160,9 @@ static int parse_options(int argc, char *argv[], test_global_t *test_global)
case 'f':
test_global->opt.offset_ns = strtoull(optarg, NULL, 0);
break;
+ case 'x':
+ test_global->opt.max_tmo_ns = strtoull(optarg, NULL, 0);
+ break;
case 'n':
test_global->opt.num = strtoull(optarg, NULL, 0);
break;
@@ -170,6 +183,9 @@ static int parse_options(int argc, char *argv[], test_global_t *test_global)
case 'e':
test_global->opt.early_retry = atoi(optarg);
break;
+ case 's':
+ test_global->opt.clk_src = atoi(optarg);
+ break;
case 'i':
test_global->opt.init = 1;
break;
@@ -216,6 +232,7 @@ static int start_timers(test_global_t *test_global)
uint64_t i, j, idx, num_tmo, burst, burst_gap;
uint64_t tot_timers, alloc_timers;
int mode;
+ odp_timer_clk_src_t clk_src;
mode = test_global->opt.mode;
alloc_timers = test_global->alloc_timers;
@@ -263,7 +280,12 @@ static int start_timers(test_global_t *test_global)
test_global->timeout_pool = pool;
- if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
+ if (test_global->opt.clk_src == 0)
+ clk_src = ODP_CLOCK_CPU;
+ else
+ clk_src = ODP_CLOCK_EXT;
+
+ if (odp_timer_capability(clk_src, &timer_capa)) {
printf("Timer capa failed\n");
return -1;
}
@@ -291,17 +313,32 @@ static int start_timers(test_global_t *test_global)
memset(&timer_param, 0, sizeof(odp_timer_pool_param_t));
- timer_param.res_ns = res_ns;
- if (mode)
- timer_param.min_tmo = period_ns / 10;
- else
+ timer_param.res_ns = res_ns;
+
+ if (mode == 0) {
timer_param.min_tmo = offset_ns / 2;
+ timer_param.max_tmo = offset_ns + ((num_tmo + 1) * period_ns);
+ } else {
+ /* periodic mode */
+ timer_param.min_tmo = period_ns / 10;
+ timer_param.max_tmo = offset_ns + (2 * period_ns);
+ }
+
+ if (test_global->opt.max_tmo_ns) {
+ if (test_global->opt.max_tmo_ns < timer_param.max_tmo) {
+ printf("Max tmo is too small. Must be at least %" PRIu64 " nsec.\n",
+ timer_param.max_tmo);
+ return -1;
+ }
+
+ timer_param.max_tmo = test_global->opt.max_tmo_ns;
+ }
- timer_param.max_tmo = offset_ns + ((num_tmo + 1) * period_ns);
timer_param.num_timers = alloc_timers;
- timer_param.clk_src = ODP_CLOCK_CPU;
+ timer_param.clk_src = clk_src;
printf("\nTest parameters:\n");
+ printf(" clock source: %i\n", test_global->opt.clk_src);
printf(" resolution capa: %" PRIu64 " nsec\n", res_capa);
printf(" max timers capa: %" PRIu32 "\n", timer_capa.max_timers);
printf(" mode: %i\n", mode);
@@ -368,10 +405,15 @@ static int start_timers(test_global_t *test_global)
}
idx = 0;
+
+ /* Record test start time and tick. Memory barriers forbid compiler and out-of-order
+ * CPU to move samples apart. */
+ odp_mb_full();
start_tick = odp_timer_current_tick(timer_pool);
time = odp_time_local();
- start_ns = odp_time_to_ns(time);
+ odp_mb_full();
+ start_ns = odp_time_to_ns(time);
test_global->start_tick = start_tick;
test_global->start_ns = start_ns;
test_global->period_tick = odp_timer_ns_to_tick(timer_pool, period_ns);
@@ -537,6 +579,9 @@ static void run_test(test_global_t *test_global)
ctx = odp_timeout_user_ptr(tmo);
tmo_ns = ctx->nsec;
+ if (log)
+ log[i].tmo_ns = tmo_ns;
+
if (time_ns > tmo_ns) {
diff_ns = time_ns - tmo_ns;
stat->num_after++;
@@ -545,10 +590,8 @@ static void run_test(test_global_t *test_global)
stat->nsec_after_min = diff_ns;
if (diff_ns > stat->nsec_after_max)
stat->nsec_after_max = diff_ns;
- if (log) {
- log[i].tmo_ns = tmo_ns;
+ if (log)
log[i].diff_ns = diff_ns;
- }
} else if (time_ns < tmo_ns) {
diff_ns = tmo_ns - time_ns;
@@ -558,10 +601,8 @@ static void run_test(test_global_t *test_global)
stat->nsec_before_min = diff_ns;
if (diff_ns > stat->nsec_before_max)
stat->nsec_before_max = diff_ns;
- if (log) {
- log[i].tmo_ns = tmo_ns;
+ if (log)
log[i].diff_ns = -diff_ns;
- }
} else {
stat->num_exact++;
}
diff --git a/example/timer/odp_timer_simple.c b/example/timer/odp_timer_simple.c
index efbdf7673..683d6ff8d 100644
--- a/example/timer/odp_timer_simple.c
+++ b/example/timer/odp_timer_simple.c
@@ -83,6 +83,7 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
goto err;
}
+ odp_timer_pool_start();
/* Configure scheduler */
odp_schedule_config(NULL);
diff --git a/helper/Makefile.am b/helper/Makefile.am
index fa59d82da..f99fc8d83 100644
--- a/helper/Makefile.am
+++ b/helper/Makefile.am
@@ -19,7 +19,9 @@ helperinclude_HEADERS = \
include/odp/helper/chksum.h\
include/odp/helper/odph_debug.h \
include/odp/helper/eth.h\
+ include/odp/helper/gtp.h\
include/odp/helper/icmp.h\
+ include/odp/helper/igmp.h\
include/odp/helper/ip.h\
include/odp/helper/ipsec.h\
include/odp/helper/odph_api.h\
diff --git a/helper/include/odp/helper/gtp.h b/helper/include/odp/helper/gtp.h
new file mode 100644
index 000000000..d542dc6b3
--- /dev/null
+++ b/helper/include/odp/helper/gtp.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell.
+ */
+/**
+ * @file
+ *
+ * ODP GTP header
+ */
+#ifndef _ODPH_GTP_H_
+#define _ODPH_GTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+
+/**
+ * Simplified GTP protocol header.
+ * Contains 8-bit gtp_hdr_info, 8-bit msg_type,
+ * 16-bit plen, 32-bit teid.
+ * No optional fields and next extension header.
+ */
+typedef struct ODP_PACKED {
+ uint8_t gtp_hdr_info; /**< GTP header info */
+ uint8_t msg_type; /**< GTP message type */
+ odp_u16be_t plen; /**< Total payload length */
+ odp_u32be_t teid; /**< Tunnel endpoint ID */
+} odph_gtphdr_t;
+
+/** GTP header length */
+#define ODP_GTP_HLEN sizeof(odph_gtphdr_t)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_GTP_H_ */
diff --git a/helper/include/odp/helper/igmp.h b/helper/include/odp/helper/igmp.h
new file mode 100644
index 000000000..5cff41aab
--- /dev/null
+++ b/helper/include/odp/helper/igmp.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell.
+ */
+
+/**
+ * @file
+ *
+ * ODP IGMP header
+ */
+#ifndef _ODPH_IGMP_H_
+#define _ODPH_IGMP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+
+/**
+ * Simplified IGMP protocol header.
+ * Contains 8-bit type, 8-bit code,
+ * 16-bit csum, 32-bit group.
+ * No optional fields and next extension header.
+ */
+typedef struct ODP_PACKED {
+ uint8_t type; /** Message Type */
+ uint8_t code; /** Max response code */
+ odp_u16be_t csum; /** Checksum */
+ odp_u32be_t group; /** Group address */
+} odph_igmphdr_t;
+
+/** IGMP header length */
+#define ODP_IGMP_HLEN sizeof(odph_igmphdr_t)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_IGMP_H_ */
diff --git a/helper/include/odp/helper/ip.h b/helper/include/odp/helper/ip.h
index b5bfed78c..3fac438f1 100644
--- a/helper/include/odp/helper/ip.h
+++ b/helper/include/odp/helper/ip.h
@@ -252,6 +252,7 @@ typedef struct ODP_PACKED {
* @{*/
#define ODPH_IPPROTO_HOPOPTS 0x00 /**< IPv6 hop-by-hop options */
#define ODPH_IPPROTO_ICMPV4 0x01 /**< Internet Control Message Protocol (1) */
+#define ODPH_IPPROTO_IGMP 0x02 /**< Internet Group Message Protocol (1) */
#define ODPH_IPPROTO_TCP 0x06 /**< Transmission Control Protocol (6) */
#define ODPH_IPPROTO_UDP 0x11 /**< User Datagram Protocol (17) */
#define ODPH_IPPROTO_ROUTE 0x2B /**< IPv6 Routing header (43) */
diff --git a/helper/include/odp/helper/odph_api.h b/helper/include/odp/helper/odph_api.h
index 921914aa2..f3bcde208 100644
--- a/helper/include/odp/helper/odph_api.h
+++ b/helper/include/odp/helper/odph_api.h
@@ -22,8 +22,10 @@ extern "C" {
#include <odp/helper/chksum.h>
#include <odp/helper/odph_cuckootable.h>
#include <odp/helper/eth.h>
+#include <odp/helper/gtp.h>
#include <odp/helper/odph_hashtable.h>
#include <odp/helper/icmp.h>
+#include <odp/helper/igmp.h>
#include <odp/helper/ip.h>
#include <odp/helper/ipsec.h>
#include <odp/helper/odph_lineartable.h>
diff --git a/include/Makefile.am b/include/Makefile.am
index 32d8a6825..911cd92f9 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -34,6 +34,7 @@ odpapiinclude_HEADERS = \
odp/api/packet_flags.h \
odp/api/packet_io.h \
odp/api/packet_io_stats.h \
+ odp/api/protocols.h \
odp/api/pool.h \
odp/api/queue.h \
odp/api/random.h \
@@ -84,6 +85,7 @@ odpapispecinclude_HEADERS = \
odp/api/spec/packet_flags.h \
odp/api/spec/packet_io.h \
odp/api/spec/packet_io_stats.h \
+ odp/api/spec/protocols.h \
odp/api/spec/pool.h \
odp/api/spec/queue.h \
odp/api/spec/queue_types.h \
diff --git a/include/odp/api/abi-default/event.h b/include/odp/api/abi-default/event.h
index c9d03735c..a63571ca0 100644
--- a/include/odp/api/abi-default/event.h
+++ b/include/odp/api/abi-default/event.h
@@ -25,15 +25,16 @@ typedef _odp_abi_event_t *odp_event_t;
#define ODP_EVENT_INVALID ((odp_event_t)0)
typedef enum {
- ODP_EVENT_BUFFER = 1,
- ODP_EVENT_PACKET = 2,
- ODP_EVENT_TIMEOUT = 3,
+ ODP_EVENT_BUFFER = 1,
+ ODP_EVENT_PACKET = 2,
+ ODP_EVENT_TIMEOUT = 3,
ODP_EVENT_CRYPTO_COMPL = 4,
- ODP_EVENT_IPSEC_STATUS = 5
+ ODP_EVENT_IPSEC_STATUS = 5,
+ ODP_EVENT_PACKET_VECTOR = 6
} odp_event_type_t;
typedef enum {
- ODP_EVENT_NO_SUBTYPE = 0,
+ ODP_EVENT_NO_SUBTYPE = 0,
ODP_EVENT_PACKET_BASIC = 1,
ODP_EVENT_PACKET_CRYPTO = 2,
ODP_EVENT_PACKET_IPSEC = 3,
diff --git a/include/odp/api/abi-default/packet.h b/include/odp/api/abi-default/packet.h
index 57e255c57..3660cfa29 100644
--- a/include/odp/api/abi-default/packet.h
+++ b/include/odp/api/abi-default/packet.h
@@ -19,16 +19,21 @@ typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_packet_t;
/** @internal Dummy type for strong typing */
typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_seg_t;
+/** @internal Dummy type for strong typing */
+typedef struct { char dummy; /**< *internal Dummy */ } _odp_abi_packet_vector_t;
+
/** @ingroup odp_packet
* @{
*/
typedef _odp_abi_packet_t *odp_packet_t;
typedef _odp_abi_packet_seg_t *odp_packet_seg_t;
+typedef _odp_abi_packet_vector_t *odp_packet_vector_t;
#define ODP_PACKET_INVALID ((odp_packet_t)0)
#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)0)
#define ODP_PACKET_OFFSET_INVALID 0xffff
+#define ODP_PACKET_VECTOR_INVALID ((odp_packet_vector_t)0)
typedef uint8_t odp_proto_l2_type_t;
diff --git a/include/odp/api/protocols.h b/include/odp/api/protocols.h
new file mode 100644
index 000000000..b5f3742a4
--- /dev/null
+++ b/include/odp/api/protocols.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP protocols
+ */
+
+#ifndef ODP_API_PROTOCOLS_H_
+#define ODP_API_PROTOCOLS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/protocols.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/spec/buffer.h b/include/odp/api/spec/buffer.h
index 28eb7669b..82f5e2f72 100644
--- a/include/odp/api/spec/buffer.h
+++ b/include/odp/api/spec/buffer.h
@@ -74,7 +74,11 @@ void *odp_buffer_addr(odp_buffer_t buf);
uint32_t odp_buffer_size(odp_buffer_t buf);
/**
- * Tests if buffer is valid
+ * Check that buffer is valid
+ *
+ * This function can be used for debugging purposes to check if a buffer handle represents
+ * a valid buffer. The level of error checks depends on the implementation. The call should not
+ * crash if the buffer handle is corrupted.
*
* @param buf Buffer handle
*
@@ -95,8 +99,8 @@ odp_pool_t odp_buffer_pool(odp_buffer_t buf);
/**
* Buffer alloc
*
- * The validity of a buffer can be checked at any time with
- * odp_buffer_is_valid().
+ * Allocates a buffer from the pool. Returns ODP_BUFFER_INVALID when a buffer
+ * can not be allocated.
*
* @param pool Pool handle
*
@@ -107,8 +111,8 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool);
/**
* Allocate multiple buffers
-
- * Otherwise like odp_buffer_alloc(), but allocates multiple buffers from a pool
+ *
+ * Otherwise like odp_buffer_alloc(), but allocates multiple buffers from a pool.
*
* @param pool Pool handle
* @param[out] buf Array of buffer handles for output
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index 176c53ff3..c33aca13f 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -103,7 +103,20 @@ typedef union odp_cls_pmr_terms_t {
/** Custom layer 3 match rule. PMR offset is counted from
* the start of layer 3 in the packet. */
uint64_t custom_l3:1;
-
+ /** IGMP Group address, implies IPPROTO=2 */
+ uint64_t igmp_grp_addr:1;
+ /** ICMP identifier, implies IPPROTO=1 and ICMP_TYPE=0 or ICMP_TYPE=8 */
+ uint64_t icmp_id:1;
+ /** ICMP type, implies IPPROTO=1 */
+ uint64_t icmp_type:1;
+ /** ICMP code, implies IPPROTO=1 */
+ uint64_t icmp_code:1;
+ /** Source SCTP port, implies IPPROTO=132 */
+ uint64_t sctp_sport:1;
+ /** Destination SCTP port, implies IPPROTO=132 */
+ uint64_t sctp_dport:1;
+ /** GTPv1 tunnel endpoint identifier */
+ uint64_t gtpv1_teid:1;
} bit;
/** All bits of the bit field structure */
uint64_t all_bits;
@@ -285,6 +298,9 @@ typedef struct odp_cls_cos_param {
/** Back Pressure configuration */
odp_bp_param_t bp;
+
+ /** Packet input vector configuration */
+ odp_pktin_vector_config_t vector;
} odp_cls_cos_param_t;
/**
@@ -552,6 +568,32 @@ typedef enum {
*/
ODP_PMR_CUSTOM_L3,
+ /** IGMP Group address (val_sz = 4), implies IPPROTO=2 */
+ ODP_PMR_IGMP_GRP_ADDR,
+
+ /** ICMP identifier (val_sz = 2), implies IPPROTO=1 and ICMP_TYPE=0 or ICMP_TYPE=8 */
+ ODP_PMR_ICMP_ID,
+
+ /** ICMP type (val_sz = 1), implies IPPROTO=1 */
+ ODP_PMR_ICMP_TYPE,
+
+ /** ICMP code (val_sz = 1), implies IPPROTO=1 */
+ ODP_PMR_ICMP_CODE,
+
+ /** Source SCTP port (val_sz = 2), implies IPPROTO=132 */
+ ODP_PMR_SCTP_SPORT,
+
+ /** Destination SCTP port (val_sz = 2), implies IPPROTO=132 */
+ ODP_PMR_SCTP_DPORT,
+
+ /** GTPv1 tunnel endpoint identifier (val_sz = 4)
+ *
+ * Matches if and only if IP protocol is UDP, UDP destination port
+ * is 2152 and the UDP payload interpreted as GTP header has GTP
+ * version 1 and TEID as specified.
+ */
+ ODP_PMR_GTPV1_TEID,
+
/** Inner header may repeat above values with this offset */
ODP_PMR_INNER_HDR_OFF = 32
diff --git a/include/odp/api/spec/event.h b/include/odp/api/spec/event.h
index f9fe17dac..dc61b6e31 100644
--- a/include/odp/api/spec/event.h
+++ b/include/odp/api/spec/event.h
@@ -58,6 +58,8 @@ extern "C" {
* - Crypto completion event (odp_crypto_compl_t)
* - ODP_EVENT_IPSEC_STATUS
* - IPSEC status update event (odp_ipsec_status_t)
+ * - ODP_EVENT_PACKET_VECTOR
+ * - Vector of packet events (odp_packet_t) as odp_packet_vector_t
*/
/**
@@ -180,6 +182,20 @@ int odp_event_filter_packet(const odp_event_t event[],
uint64_t odp_event_to_u64(odp_event_t hdl);
/**
+ * Check that event is valid
+ *
+ * This function can be used for debugging purposes to check if an event handle represents
+ * a valid event. The level of error checks depends on the implementation. The call should not
+ * crash if the event handle is corrupted.
+ *
+ * @param event Event handle
+ *
+ * @retval 1 Event handle represents a valid event.
+ * @retval 0 Event handle does not represent a valid event.
+ */
+int odp_event_is_valid(odp_event_t event);
+
+/**
* Free event
*
* Frees the event based on its type. Results are undefined if event
diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h
index 48b046e43..156d66019 100644
--- a/include/odp/api/spec/ipsec.h
+++ b/include/odp/api/spec/ipsec.h
@@ -21,6 +21,7 @@ extern "C" {
#include <odp/api/crypto.h>
#include <odp/api/support.h>
#include <odp/api/packet_io.h>
+#include <odp/api/protocols.h>
#include <odp/api/classification.h>
#include <odp/api/traffic_mngr.h>
@@ -320,6 +321,14 @@ typedef struct odp_ipsec_config_t {
/** IPSEC outbound processing configuration */
odp_ipsec_outbound_config_t outbound;
+ /** Enable stats collection
+ *
+ * Default value is false (stats collection disabled).
+ *
+ * @see odp_ipsec_stats(), odp_ipsec_stats_multi()
+ */
+ odp_bool_t stats_en;
+
} odp_ipsec_config_t;
/**
@@ -773,6 +782,105 @@ typedef struct odp_ipsec_sa_param_t {
} odp_ipsec_sa_param_t;
/**
+ * IPSEC stats content
+ */
+typedef struct odp_ipsec_stats_t {
+ /** Number of packets processed successfully */
+ uint64_t success;
+
+ /** Number of packets with protocol errors */
+ uint64_t proto_err;
+
+ /** Number of packets with authentication errors */
+ uint64_t auth_err;
+
+ /** Number of packets with antireplay check failures */
+ uint64_t antireplay_err;
+
+ /** Number of packets with algorithm errors */
+ uint64_t alg_err;
+
+ /** Number of packes with MTU errors */
+ uint64_t mtu_err;
+
+ /** Number of packets with hard lifetime(bytes) expired */
+ uint64_t hard_exp_bytes_err;
+
+ /** Number of packets with hard lifetime(packets) expired */
+ uint64_t hard_exp_pkts_err;
+} odp_ipsec_stats_t;
+
+/**
+ * IPSEC SA information
+ */
+typedef struct odp_ipsec_sa_info_t {
+ /** Copy of IPSEC Security Association (SA) parameters */
+ odp_ipsec_sa_param_t param;
+
+ /** IPSEC SA direction dependent parameters */
+ union {
+ /** Inbound specific parameters */
+ struct {
+ /** Additional SA lookup parameters. */
+ struct {
+ /** IP destination address (NETWORK ENDIAN) to
+ * be matched in addition to SPI value. */
+ uint8_t dst_addr[ODP_IPV6_ADDR_SIZE];
+ } lookup_param;
+
+ /** Antireplay window size
+ *
+ * Antireplay window size configured for the SA.
+ * This value can be different from what application
+ * had requested.
+ */
+ uint32_t antireplay_ws;
+
+ /** Antireplay window top
+ *
+ * Sequence number representing a recent top of the
+ * anti-replay window. There may be a delay before the
+ * SA state is reflected in the value. The value will be
+ * zero if no packets have been processed or if the
+ * anti-replay service is not enabled.
+ */
+ uint64_t antireplay_window_top;
+ } inbound;
+
+ /** Outbound specific parameters */
+ struct {
+ /** Sequence number
+ *
+ * Sequence number used for a recently processed packet.
+ * There may be a delay before the SA state is reflected
+ * in the value. When no packets have been processed,
+ * the value will be zero.
+ */
+ uint64_t seq_num;
+
+ /** Tunnel IP address */
+ union {
+ /** IPv4 */
+ struct {
+ /** IPv4 source address */
+ uint8_t src_addr[ODP_IPV4_ADDR_SIZE];
+ /** IPv4 destination address */
+ uint8_t dst_addr[ODP_IPV4_ADDR_SIZE];
+ } ipv4;
+
+ /** IPv6 */
+ struct {
+ /** IPv6 source address */
+ uint8_t src_addr[ODP_IPV6_ADDR_SIZE];
+ /** IPv6 destination address */
+ uint8_t dst_addr[ODP_IPV6_ADDR_SIZE];
+ } ipv6;
+ } tunnel;
+ } outbound;
+ };
+} odp_ipsec_sa_info_t;
+
+/**
* Query IPSEC capabilities
*
* Outputs IPSEC capabilities on success.
@@ -1189,7 +1297,13 @@ typedef struct odp_ipsec_out_inline_param_t {
struct {
/** Points to first byte of outer headers to be copied in
* front of the outgoing IPSEC packet. Implementation copies
- * the headers during odp_ipsec_out_inline() call. */
+ * the headers during odp_ipsec_out_inline() call.
+ *
+ * Null value indicates that the outer headers are in the
+ * packet data, starting at L2 offset and ending at the byte
+ * before L3 offset. In this case, value of 'len' field must
+ * be greater than zero and set to L3 offset minus L2 offset.
+ */
const uint8_t *ptr;
/** Outer header length in bytes */
@@ -1627,6 +1741,65 @@ int odp_ipsec_sa_mtu_update(odp_ipsec_sa_t sa, uint32_t mtu);
void *odp_ipsec_sa_context(odp_ipsec_sa_t sa);
/**
+ * Print global IPSEC configuration info
+ *
+ * Print implementation-defined information about the global IPSEC
+ * configuration.
+ */
+void odp_ipsec_print(void);
+
+/**
+ * Print IPSEC SA info
+ *
+ * @param sa SA handle
+ *
+ * Print implementation-defined IPSEC SA debug information to the ODP log.
+ */
+void odp_ipsec_sa_print(odp_ipsec_sa_t sa);
+
+/**
+ * Get IPSEC stats for the IPSEC SA handle
+ *
+ * @param sa IPSEC SA handle
+ * @param[out] stats Stats output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_ipsec_stats(odp_ipsec_sa_t sa, odp_ipsec_stats_t *stats);
+
+/**
+ * Get IPSEC stats for multiple IPSEC SA handles
+ *
+ * @param sa Array of IPSEC SA handles
+ * @param[out] stats Stats array for output
+ * @param num Number of SA handles
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_ipsec_stats_multi(odp_ipsec_sa_t sa[], odp_ipsec_stats_t stats[], int num);
+
+/**
+ * Retrieve information about an IPSEC SA
+ *
+ * The cipher and auth key data(including key extra) will not be exposed and
+ * the corresponding pointers will be set to NULL. The IP address pointers
+ * will point to the corresponding buffers available in the SA info structure.
+ *
+ * The user defined SA context pointer is an opaque field and hence the value
+ * provided during the SA creation will be returned.
+ *
+ * @param sa The IPSEC SA for which to retrieve information
+ * @param[out] sa_info Pointer to caller allocated SA info structure to be
+ * filled in
+ *
+ * @retval 0 On success
+ * @retval <0 On failure
+ **/
+int odp_ipsec_sa_info(odp_ipsec_sa_t sa, odp_ipsec_sa_info_t *sa_info);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h
index fa56b5d02..44fc75924 100644
--- a/include/odp/api/spec/packet.h
+++ b/include/odp/api/spec/packet.h
@@ -177,6 +177,16 @@ extern "C" {
*/
/**
+ * @typedef odp_packet_vector_t
+ * ODP packet vector
+ */
+
+/**
+ * @def ODP_PACKET_VECTOR_INVALID
+ * Invalid packet vector
+ */
+
+/**
* Protocol
*/
typedef enum odp_proto_t {
@@ -2027,6 +2037,181 @@ uint64_t odp_packet_cls_mark(odp_packet_t pkt);
/*
*
+ * Packet vector handling routines
+ * ********************************************************
+ *
+ */
+
+/**
+ * Get packet vector handle from event
+ *
+ * Converts an ODP_EVENT_PACKET_VECTOR type event to a packet vector handle
+ *
+ * @param ev Event handle
+ * @return Packet vector handle
+ *
+ * @see odp_event_type()
+ */
+odp_packet_vector_t odp_packet_vector_from_event(odp_event_t ev);
+
+/**
+ * Convert packet vector handle to event
+ *
+ * @param pktv Packet vector handle
+ *
+ * @return Event handle
+ */
+odp_event_t odp_packet_vector_to_event(odp_packet_vector_t pktv);
+
+/**
+ * Allocate a packet vector from a packet vector pool
+ *
+ * Allocates a packet vector from the specified packet vector pool.
+ * The pool must have been created with the ODP_POOL_VECTOR type.
+ *
+ * @param pool Packet vector pool handle
+ *
+ * @return Handle of allocated packet vector
+ * @retval ODP_PACKET_VECTOR_INVALID Packet vector could not be allocated
+ *
+ * @note A newly allocated vector shall not contain any packets, instead, alloc
+ * operation shall reserve the space for odp_pool_param_t::vector::max_size packets.
+ */
+odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool);
+
+/**
+ * Free packet vector
+ *
+ * Frees the packet vector into the packet vector pool it was allocated from.
+ *
+ * @param pktv Packet vector handle
+ *
+ * @note This API just frees the vector, not any packets inside the vector.
+ * Application can use odp_event_free() to free the vector and packets inside
+ * the vector.
+ */
+void odp_packet_vector_free(odp_packet_vector_t pktv);
+
+/**
+ * Get packet vector table
+ *
+ * Packet vector table is an array of packets (odp_packet_t) stored in
+ * contiguous memory location. Upon completion of this API, the implementation
+ * returns the packet table pointer in pkt_tbl.
+ *
+ * @param pktv Packet vector handle
+ * @param[out] pkt_tbl Points to packet vector table
+ *
+ * @return Number of packets available in the vector.
+ *
+ * @note When pktin subsystem is producing the packet vectors,
+ * odp_pktin_vector_config_t::pool shall be used to configure the pool to form
+ * the vector table.
+ *
+ * @note The maximum number of packets this vector can hold is defined by
+ * odp_pool_param_t:vector:max_size. The return value of this function will not
+ * be greater than odp_pool_param_t:vector:max_size
+ *
+ * @note The pkt_tbl points to the packet vector table. Application can edit the
+ * packet handles in the table directly (up to odp_pool_param_t::vector::max_size).
+ * Application must update the size of the table using odp_packet_vector_size_set()
+ * when there is a change in the size of the vector.
+ *
+ * @note Invalid packet handles (ODP_PACKET_INVALID) are not allowed to be
+ * stored in the table to allow consumers of odp_packet_vector_t handle to have
+ * optimized implementation. So consumption of packets in the middle of the
+ * vector would call for moving the remaining packets up to form a contiguous
+ * array of packets and update the size of the new vector using
+ * odp_packet_vector_size_set().
+ *
+ * @note The table memory is backed by a vector pool buffer. The ownership of
+ * the table memory is linked to the ownership of the event. I.e. after sending
+ * the event to a queue, the sender loses ownership to the table also.
+ */
+uint32_t odp_packet_vector_tbl(odp_packet_vector_t pktv, odp_packet_t **pkt_tbl);
+
+/**
+ * Number of packets in a vector
+ *
+ * @param pktv Packet vector handle
+ *
+ * @return The number of packets available in the vector
+ */
+uint32_t odp_packet_vector_size(odp_packet_vector_t pktv);
+
+/**
+ * Set the number of packets stored in a vector
+ *
+ * Update the number of packets stored in a vector. When the application is
+ * producing a packet vector, this function shall be used by the application
+ * to set the number of packets available in this vector.
+ *
+ * @param pktv Packet vector handle
+ * @param size Number of packets in this vector
+ *
+ * @note The maximum number of packets this vector can hold is defined by
+ * odp_pool_param_t::vector::max_size. The size value must not be greater than
+ * odp_pool_param_t::vector::max_size
+ *
+ * @note All handles in the vector table (0 .. size - 1) need to be valid packet
+ * handles.
+ *
+ * @see odp_packet_vector_tbl()
+ *
+ */
+void odp_packet_vector_size_set(odp_packet_vector_t pktv, uint32_t size);
+
+/**
+ * Check that packet vector is valid
+ *
+ * This function can be used for debugging purposes to check if a packet vector handle represents
+ * a valid packet vector. The level of error checks depends on the implementation. Considerable
+ * number of cpu cycles may be consumed depending on the level. The call should not crash if
+ * the packet vector handle is corrupted.
+ *
+ * @param pktv Packet vector handle
+ *
+ * @retval 0 Packet vector is not valid
+ * @retval 1 Packet vector is valid
+ */
+int odp_packet_vector_valid(odp_packet_vector_t pktv);
+
+/**
+ * Packet vector pool
+ *
+ * Returns handle to the packet vector pool where the packet vector was
+ * allocated from.
+ *
+ * @param pktv Packet vector handle
+ *
+ * @return Packet vector pool handle
+ */
+odp_pool_t odp_packet_vector_pool(odp_packet_vector_t pktv);
+
+/**
+ * Print packet vector debug information
+ *
+ * Print all packet vector debug information to ODP log.
+ *
+ * @param pktv Packet vector handle
+ */
+void odp_packet_vector_print(odp_packet_vector_t pktv);
+
+/**
+ * Get printable value for an odp_packet_vector_t
+ *
+ * @param hdl odp_packet_vector_t handle to be printed
+ *
+ * @return uint64_t value that can be used to print/display this handle
+ *
+ * @note This routine is intended to be used for diagnostic purposes to enable
+ * applications to generate a printable value that represents an
+ * odp_packet_vector_t handle.
+ */
+uint64_t odp_packet_vector_to_u64(odp_packet_vector_t hdl);
+
+/*
+ *
* Debugging
* ********************************************************
*
@@ -2055,10 +2240,12 @@ void odp_packet_print(odp_packet_t pkt);
void odp_packet_print_data(odp_packet_t pkt, uint32_t offset, uint32_t len);
/**
- * Perform full packet validity check
+ * Check that packet is valid
*
- * The operation may consume considerable number of cpu cycles depending on
- * the check level.
+ * This function can be used for debugging purposes to check if a packet handle represents
+ * a valid packet. The level of error checks depends on the implementation. Considerable number of
+ * cpu cycles may be consumed depending on the level. The call should not crash if the packet
+ * handle is corrupted.
*
* @param pkt Packet handle
*
diff --git a/include/odp/api/spec/packet_flags.h b/include/odp/api/spec/packet_flags.h
index 0e4243968..7c1386cb6 100644
--- a/include/odp/api/spec/packet_flags.h
+++ b/include/odp/api/spec/packet_flags.h
@@ -27,7 +27,7 @@ extern "C" {
*/
/**
- * Check for all errors in packet
+ * Check for all parse errors in packet
*
* Check if packet parsing has found any errors in the packet. The level of
* error checking depends on the parse configuration (e.g. included layers and
@@ -35,6 +35,10 @@ extern "C" {
* which layers have been checked, and layer error functions
* (e.g. odp_packet_has_l3_error()) which layers have errors.
*
+ * If packet subtype is ODP_EVENT_PACKET_IPSEC, odp_packet_has_error() would
+ * indicate parsing errors after IPSEC processing. IPSEC errors/warnings need
+ * to be checked using odp_ipsec_result().
+ *
* @param pkt Packet handle
*
* @retval non-zero Packet has errors
diff --git a/include/odp/api/spec/packet_io.h b/include/odp/api/spec/packet_io.h
index d25d29751..940b712fa 100644
--- a/include/odp/api/spec/packet_io.h
+++ b/include/odp/api/spec/packet_io.h
@@ -160,6 +160,50 @@ typedef struct odp_pktin_queue_param_ovr_t {
} odp_pktin_queue_param_ovr_t;
/**
+ * Packet input vector configuration
+ */
+typedef struct odp_pktin_vector_config_t {
+ /** Enable packet input vector
+ *
+ * When true, packet input vector is enabled and configured with vector
+ * config parameters. Otherwise, packet input vector configuration
+ * parameters are ignored.
+ */
+ odp_bool_t enable;
+
+ /** Vector pool
+ *
+ * Vector pool to allocate the vectors to hold packets.
+ * The pool must have been created with the ODP_POOL_VECTOR type.
+ */
+ odp_pool_t pool;
+
+ /** Maximum time to wait for packets
+ *
+ * Maximum timeout in nanoseconds to wait for the producer to form the
+ * vector of packet events (odp_packet_vector_t). This value should be
+ * in the range of odp_pktin_vector_capability_t::min_tmo_ns to
+ * odp_pktin_vector_capability_t::max_tmo_ns.
+ */
+ uint64_t max_tmo_ns;
+
+ /** Maximum number of packets in a vector
+ *
+ * The packet input subsystem forms packet vector events when either
+ * it reaches odp_pktin_vector_config_t::max_tmo_ns or producer reaches
+ * max_size packets. This value should be in the range of
+ * odp_pktin_vector_capability_t::min_size to
+ * odp_pktin_vector_capability_t::max_size.
+ *
+ * @note The maximum number of packets this vector can hold is defined
+ * by odp_pool_param_t::vector::max_size with odp_pktin_vector_config_t::pool.
+ * The max_size should not be greater than odp_pool_param_t::vector::max_size.
+ */
+ uint32_t max_size;
+
+} odp_pktin_vector_config_t;
+
+/**
* Packet input queue parameters
*/
typedef struct odp_pktin_queue_param_t {
@@ -226,6 +270,10 @@ typedef struct odp_pktin_queue_param_t {
* NULL.
*/
odp_pktin_queue_param_ovr_t *queue_param_ovr;
+
+ /** Packet input vector configuration */
+ odp_pktin_vector_config_t vector;
+
} odp_pktin_queue_param_t;
/**
@@ -570,6 +618,37 @@ typedef union odp_pktio_set_op_t {
} odp_pktio_set_op_t;
/**
+ * Packet input vector capabilities
+ */
+typedef struct odp_pktin_vector_capability_t {
+ /** Packet input vector availability */
+ odp_support_t supported;
+
+ /** Maximum number of packets that can be accumulated into a packet
+ * vector by a producer
+ *
+ * odp_pktin_vector_config_t::max_size should not be greater than this
+ * value. */
+ uint32_t max_size;
+
+ /** Minimum value allowed to be configured to
+ * odp_pktin_vector_config_t::max_size */
+ uint32_t min_size;
+
+ /** Maximum timeout in nanoseconds for the producer to wait for the
+ * vector of packets
+ *
+ * odp_pktin_vector_config_t::max_tmo_ns should not be greater than this
+ * value. */
+ uint64_t max_tmo_ns;
+
+ /** Minimum value allowed to be configured to
+ * odp_pktin_vector_config_t::max_tmo_ns */
+ uint64_t min_tmo_ns;
+
+} odp_pktin_vector_capability_t;
+
+/**
* Packet IO capabilities
*/
typedef struct odp_pktio_capability_t {
@@ -590,6 +669,10 @@ typedef struct odp_pktio_capability_t {
/** @deprecated Use enable_loop inside odp_pktin_config_t */
odp_bool_t ODP_DEPRECATE(loop_supported);
+
+ /** Packet input vector capability */
+ odp_pktin_vector_capability_t vector;
+
} odp_pktio_capability_t;
/**
diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h
index cdc7f5fef..c30d937e5 100644
--- a/include/odp/api/spec/pool.h
+++ b/include/odp/api/spec/pool.h
@@ -45,6 +45,85 @@ extern "C" {
#define ODP_POOL_MAX_SUBPARAMS 7
/**
+ * Pool statistics counters options
+ *
+ * Pool statistics counters listed in a bit field structure.
+ */
+typedef union odp_pool_stats_opt_t {
+ /** Option flags */
+ struct {
+ /** @see odp_pool_stats_t::available */
+ uint64_t available : 1;
+
+ /** @see odp_pool_stats_t::alloc_ops */
+ uint64_t alloc_ops : 1;
+
+ /** @see odp_pool_stats_t::alloc_fails */
+ uint64_t alloc_fails : 1;
+
+ /** @see odp_pool_stats_t::free_ops */
+ uint64_t free_ops : 1;
+
+ /** @see odp_pool_stats_t::total_ops */
+ uint64_t total_ops : 1;
+
+ /** @see odp_pool_stats_t::cache_available */
+ uint64_t cache_available : 1;
+
+ /** @see odp_pool_stats_t::cache_alloc_ops */
+ uint64_t cache_alloc_ops : 1;
+
+ /** @see odp_pool_stats_t::cache_free_ops */
+ uint64_t cache_free_ops : 1;
+ } bit;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or for bitwise
+ * operations over the entire structure. */
+ uint64_t all;
+
+} odp_pool_stats_opt_t;
+
+/**
+ * Pool statistics counters
+ *
+ * In addition to API alloc and free calls, statistics counters may be updated
+ * by alloc/free operations from implementation internal software or hardware
+ * components.
+ */
+typedef struct odp_pool_stats_t {
+ /** The number of available events in the pool */
+ uint64_t available;
+
+ /** The number of alloc operations from the pool. Includes both
+ * successful and failed operations (pool empty). */
+ uint64_t alloc_ops;
+
+ /** The number of failed alloc operations (pool empty) */
+ uint64_t alloc_fails;
+
+ /** The number of free operations to the pool */
+ uint64_t free_ops;
+
+ /** The total number of alloc and free operations. Includes both
+ * successful and failed operations (pool empty). */
+ uint64_t total_ops;
+
+ /** The number of available events in the local caches of all threads
+ * using the pool */
+ uint64_t cache_available;
+
+ /** The number of successful alloc operations from pool caches (returned
+ * at least one event). */
+ uint64_t cache_alloc_ops;
+
+ /** The number of free operations, which stored events to pool caches. */
+ uint64_t cache_free_ops;
+
+} odp_pool_stats_t;
+
+/**
* Pool capabilities
*/
typedef struct odp_pool_capability_t {
@@ -76,6 +155,9 @@ typedef struct odp_pool_capability_t {
/** Maximum size of thread local cache */
uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
} buf;
/** Packet pool capabilities */
@@ -161,6 +243,9 @@ typedef struct odp_pool_capability_t {
/** Maximum size of thread local cache */
uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
} pkt;
/** Timeout pool capabilities */
@@ -179,8 +264,35 @@ typedef struct odp_pool_capability_t {
/** Maximum size of thread local cache */
uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
} tmo;
+ /** Vector pool capabilities */
+ struct {
+ /** Maximum number of vector pools */
+ unsigned int max_pools;
+
+ /** Maximum number of vector events in a pool
+ *
+ * The value of zero means that limited only by the available
+ * memory size for the pool. */
+ uint32_t max_num;
+
+ /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ uint32_t max_size;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
+
+ /** Supported statistics counters */
+ odp_pool_stats_opt_t stats;
+ } vector;
+
} odp_pool_capability_t;
/**
@@ -359,6 +471,29 @@ typedef struct odp_pool_param_t {
uint32_t cache_size;
} tmo;
+ /** Parameters for vector pools */
+ struct {
+ /** Number of vectors in the pool */
+ uint32_t num;
+
+ /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ uint32_t max_size;
+
+ /** Maximum number of vectors cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
+ } vector;
+
+ /**
+ * Configure statistics counters
+ *
+ * An application can read the enabled statistics counters using
+ * odp_pool_stats(). For optimal performance an application should
+ * enable only the required counters.
+ */
+ odp_pool_stats_opt_t stats;
} odp_pool_param_t;
/** Packet pool*/
@@ -367,6 +502,13 @@ typedef struct odp_pool_param_t {
#define ODP_POOL_BUFFER ODP_EVENT_BUFFER
/** Timeout pool */
#define ODP_POOL_TIMEOUT ODP_EVENT_TIMEOUT
+/** Vector pool
+ *
+ * The pool to hold a vector of general type such as odp_packet_t.
+ * Each vector holds an array of generic types of the same type.
+ * @see ODP_EVENT_PACKET_VECTOR
+ */
+#define ODP_POOL_VECTOR (ODP_POOL_TIMEOUT + 1)
/**
* Create a pool
@@ -499,6 +641,53 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl);
void odp_pool_param_init(odp_pool_param_t *param);
/**
+ * Maximum pool index
+ *
+ * Return the maximum pool index. Pool indexes (e.g. returned by odp_pool_index())
+ * range from zero to this maximum value.
+ *
+ * @return Maximum pool index
+ */
+unsigned int odp_pool_max_index(void);
+
+/**
+ * Get pool index
+ *
+ * @param pool Pool handle
+ *
+ * @return Pool index (0..odp_pool_max_index())
+ * @retval <0 on failure
+ */
+int odp_pool_index(odp_pool_t pool);
+
+/**
+ * Get statistics for pool handle
+ *
+ * Read the statistics counters enabled using odp_pool_stats_opt_t during pool
+ * creation. The inactive counters are set to zero by the implementation.
+ *
+ * @param pool Pool handle
+ * @param[out] stats Output buffer for counters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pool_stats(odp_pool_t pool, odp_pool_stats_t *stats);
+
+/**
+ * Reset statistics for pool handle
+ *
+ * Reset all statistics counters to zero except: odp_pool_stats_t::available,
+ * odp_pool_stats_t::cache_available
+ *
+ * @param pool Pool handle
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pool_stats_reset(odp_pool_t pool);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/protocols.h b/include/odp/api/spec/protocols.h
new file mode 100644
index 000000000..71fc59909
--- /dev/null
+++ b/include/odp/api/spec/protocols.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP protocols
+ */
+
+#ifndef ODP_API_SPEC_PROTOCOLS_H_
+#define ODP_API_SPEC_PROTOCOLS_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup odp_protocols
+ * @details
+ * <b> Protocols </b>
+ *
+ * @{
+ */
+
+/** IPv4 address size */
+#define ODP_IPV4_ADDR_SIZE 4
+
+/** IPv6 address size */
+#define ODP_IPV6_ADDR_SIZE 16
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index 1f49e7b40..22c4fbef8 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -216,6 +216,27 @@ typedef struct {
*/
odp_timer_res_capability_t max_tmo;
+ /**
+ * Scheduled queue destination support
+ *
+ * This defines whether schedule queues are supported as timeout
+ * destination queues.
+ * 0: Scheduled queues are not supported as timeout destination queues
+ * 1: Scheduled queues are supported as timeout destination queues
+ * @see odp_timer_alloc()
+ */
+ odp_bool_t queue_type_sched;
+
+ /**
+ * Plain queue destination support
+ *
+ * This defines whether plain queues are supported as timeout
+ * destination queues.
+ * 0: Plain queues are not supported as timeout destination queues
+ * 1: Plain queues are supported as timeout destination queues
+ * @see odp_timer_alloc()
+ */
+ odp_bool_t queue_type_plain;
} odp_timer_capability_t;
/**
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 7c0718d4a..24fad4e39 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -31,9 +31,11 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/byteorder_inlines.h \
include/odp/api/plat/cpu_inlines.h \
include/odp/api/plat/event_inlines.h \
+ include/odp/api/plat/event_vector_inline_types.h \
include/odp/api/plat/packet_flag_inlines.h \
include/odp/api/plat/packet_inline_types.h \
include/odp/api/plat/packet_inlines.h \
+ include/odp/api/plat/packet_vector_inlines.h \
include/odp/api/plat/pktio_inlines.h \
include/odp/api/plat/pool_inline_types.h \
include/odp/api/plat/queue_inlines.h \
@@ -90,7 +92,6 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_align_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_atomic_internal.h \
include/odp_buffer_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_bitmap_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_bitset.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_internal.h \
include/odp_config_internal.h \
@@ -134,6 +135,7 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_timer_wheel_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_traffic_mngr_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_event_vector_internal.h \
include/protocols/eth.h \
include/protocols/ip.h \
include/protocols/ipsec.h \
@@ -149,7 +151,6 @@ BUILT_SOURCES = \
__LIB__libodp_dpdk_la_SOURCES = \
../linux-generic/odp_atomic.c \
../linux-generic/odp_barrier.c \
- ../linux-generic/odp_bitmap.c \
odp_buffer.c \
../linux-generic/odp_chksum.c \
../linux-generic/odp_classification.c \
@@ -174,6 +175,7 @@ __LIB__libodp_dpdk_la_SOURCES = \
../linux-generic/odp_libconfig.c \
odp_packet.c \
odp_packet_dpdk.c \
+ ../linux-generic/odp_packet_vector.c \
../linux-generic/pktio/dpdk_parse.c \
odp_packet_flags.c \
../linux-generic/odp_packet_io.c \
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
index f482154b7..e99f0d49f 100644
--- a/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
@@ -34,6 +34,10 @@ typedef ODP_HANDLE_T(odp_packet_seg_t);
#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
typedef uint8_t odp_proto_l2_type_t;
#define ODP_PROTO_L2_TYPE_NONE 0
@@ -116,6 +120,7 @@ typedef struct odp_packet_parse_result_flag_t {
} odp_packet_parse_result_flag_t;
#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
/**
* @}
diff --git a/platform/linux-dpdk/include/event_vector_internal.h b/platform/linux-dpdk/include/event_vector_internal.h
new file mode 120000
index 000000000..8487d0256
--- /dev/null
+++ b/platform/linux-dpdk/include/event_vector_internal.h
@@ -0,0 +1 @@
+../../linux-generic/include/odp_event_vector_internal.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h
new file mode 120000
index 000000000..30b894e27
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/event_vector_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
index 891e71ff8..899347839 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
@@ -112,7 +112,7 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 9;
+ uint32_t reserved1: 10;
/*
* Init flags
@@ -139,15 +139,14 @@ typedef union {
uint32_t udp_err: 1; /* UDP error */
uint32_t sctp_err: 1; /* SCTP error */
uint32_t l4_chksum_err: 1; /* L4 checksum error */
- uint32_t ipsec_err: 1; /* IPsec error */
uint32_t crypto_err: 1; /* Crypto packet operation error */
};
/* Flag groups */
struct {
- uint32_t reserved2: 9;
+ uint32_t reserved2: 10;
uint32_t other: 14; /* All other flags */
- uint32_t error: 9; /* All error flags */
+ uint32_t error: 8; /* All error flags */
} all;
} _odp_packet_flags_t;
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
index 149ca1574..9a7e52dd4 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
@@ -36,12 +36,16 @@ extern "C" {
#include <rte_config.h>
#include <rte_mbuf.h>
-/* ppc64 rte_memcpy.h may overwrite bool with incompatible type */
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
#include <rte_memcpy.h>
#if defined(__PPC64__) && defined(bool)
#undef bool
#define bool _Bool
#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h
new file mode 120000
index 000000000..30dd89e0d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/packet_vector_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h b/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
index aed77b780..7dbba2be8 100644
--- a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
@@ -15,12 +15,16 @@
#if defined(__clang__)
#undef RTE_TOOLCHAIN_GCC
#endif
-/* ppc64 rte_memcpy.h may overwrite bool with incompatible type */
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
#include <rte_memcpy.h>
#if defined(__PPC64__) && defined(bool)
#undef bool
#define bool _Bool
#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
#ifndef _ODP_NO_INLINE
/* Inline functions by default */
diff --git a/platform/linux-dpdk/include/odp_buffer_internal.h b/platform/linux-dpdk/include/odp_buffer_internal.h
index f9e36e69b..ed36fd4f3 100644
--- a/platform/linux-dpdk/include/odp_buffer_internal.h
+++ b/platform/linux-dpdk/include/odp_buffer_internal.h
@@ -38,6 +38,10 @@ extern "C" {
#undef RTE_TOOLCHAIN_GCC
#endif
#include <rte_mbuf.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256,
"ODP Segment size must be a minimum of 256 bytes");
@@ -73,8 +77,6 @@ struct odp_buffer_hdr_t {
void *pool_ptr;
};
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
-
/*
* Buffer type
*
diff --git a/platform/linux-dpdk/include/odp_config_internal.h b/platform/linux-dpdk/include/odp_config_internal.h
index 0813514e4..9b361f348 100644
--- a/platform/linux-dpdk/include/odp_config_internal.h
+++ b/platform/linux-dpdk/include/odp_config_internal.h
@@ -145,6 +145,9 @@ extern "C" {
*/
#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
+/* Maximum packet vector size */
+#define CONFIG_PACKET_VECTOR_MAX_SIZE 256
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/include/odp_eventdev_internal.h b/platform/linux-dpdk/include/odp_eventdev_internal.h
index db0010883..29e9d7a58 100644
--- a/platform/linux-dpdk/include/odp_eventdev_internal.h
+++ b/platform/linux-dpdk/include/odp_eventdev_internal.h
@@ -151,8 +151,8 @@ typedef struct {
uint8_t started;
} eventdev_local_t;
-extern eventdev_global_t *eventdev_gbl;
-extern __thread eventdev_local_t eventdev_local;
+extern eventdev_global_t *_odp_eventdev_gbl;
+extern __thread eventdev_local_t _odp_eventdev_local;
int service_setup(uint32_t service_id);
@@ -180,7 +180,7 @@ static inline odp_queue_t queue_from_qentry(queue_entry_t *queue)
static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
{
- return &eventdev_gbl->queue[queue_id];
+ return &_odp_eventdev_gbl->queue[queue_id];
}
static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
diff --git a/platform/linux-dpdk/include/odp_packet_internal.h b/platform/linux-dpdk/include/odp_packet_internal.h
index fb5b88ace..927e609d4 100644
--- a/platform/linux-dpdk/include/odp_packet_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_internal.h
@@ -38,8 +38,12 @@ extern "C" {
#endif
#include <rte_mbuf.h>
#include <rte_memory.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
-/** Minimum segment length expected by packet_parse_common() */
+/** Minimum segment length expected by _odp_packet_parse_common() */
#define PACKET_PARSE_SEG_LEN 96
ODP_STATIC_ASSERT(sizeof(_odp_packet_input_flags_t) == sizeof(uint64_t),
@@ -114,6 +118,9 @@ typedef struct {
/* Classifier mark */
uint16_t cls_mark;
+ /* Classifier handle index */
+ uint16_t cos;
+
/* Event subtype */
int8_t subtype;
@@ -202,6 +209,7 @@ static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
{
dst_hdr->p = src_hdr->p;
dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
dst_hdr->timestamp = src_hdr->timestamp;
dst_hdr->cls_mark = src_hdr->cls_mark;
}
@@ -220,9 +228,9 @@ static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt);
/* Perform packet parse up to a given protocol layer */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
- odp_proto_layer_t layer,
- odp_proto_chksums_t chksums);
+int _odp_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
+ odp_proto_layer_t layer,
+ odp_proto_chksums_t chksums);
/* Reset parser metadata for a new parse */
static inline void packet_parse_reset(odp_packet_hdr_t *pkt_hdr, int all)
@@ -268,9 +276,9 @@ static inline void packet_set_ts(odp_packet_hdr_t *pkt_hdr, odp_time_t *ts)
}
}
-int packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
- uint32_t pkt_len, uint32_t seg_len, int layer,
- odp_proto_chksums_t chksums);
+int _odp_packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
+ uint32_t pkt_len, uint32_t seg_len, int layer,
+ odp_proto_chksums_t chksums);
int _odp_packet_set_data(odp_packet_t pkt, uint32_t offset,
uint8_t c, uint32_t len);
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index 8036e12b4..fbb41377a 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -85,6 +85,7 @@ struct pktio_entry {
/* Statistics counters used outside drivers */
struct {
odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t out_discards;
} stats_extra;
/* Latest Tx timestamp */
odp_atomic_u64_t tx_ts;
@@ -106,6 +107,7 @@ struct pktio_entry {
struct {
odp_queue_t queue;
odp_pktin_queue_t pktin;
+ odp_pktin_vector_config_t vector;
} in_queue[PKTIO_MAX_QUEUES];
struct {
@@ -184,7 +186,7 @@ typedef struct pktio_if_ops {
const odp_pktout_queue_param_t *p);
} pktio_if_ops_t;
-extern void *pktio_entry_ptr[];
+extern void *_odp_pktio_entry_ptr[];
static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
{
@@ -201,7 +203,7 @@ static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
idx = odp_pktio_index(pktio);
- return pktio_entry_ptr[idx];
+ return _odp_pktio_entry_ptr[idx];
}
static inline int pktio_cls_enabled(pktio_entry_t *entry)
@@ -230,19 +232,19 @@ static inline void _odp_pktio_tx_ts_set(pktio_entry_t *entry)
odp_atomic_store_u64(&entry->s.tx_ts, ts_val.u64);
}
-extern const pktio_if_ops_t null_pktio_ops;
-extern const pktio_if_ops_t dpdk_pktio_ops;
+extern const pktio_if_ops_t _odp_null_pktio_ops;
+extern const pktio_if_ops_t _odp_dpdk_pktio_ops;
extern const pktio_if_ops_t * const pktio_if_ops[];
/* Dummy function required by odp_pktin_recv_mq_tmo() */
static inline int
-sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
- unsigned num_q ODP_UNUSED,
- unsigned *from ODP_UNUSED,
- odp_packet_t packets[] ODP_UNUSED,
- int num ODP_UNUSED,
- uint64_t usecs ODP_UNUSED,
- int *trial_successful) {
+_odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ unsigned int num_q ODP_UNUSED,
+ unsigned int *from ODP_UNUSED,
+ odp_packet_t packets[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ uint64_t usecs ODP_UNUSED,
+ int *trial_successful) {
(void)queues;
*trial_successful = 0;
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 6e124fd27..d0682ec43 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -33,11 +33,15 @@ extern "C" {
#include <rte_config.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
-/* ppc64 rte_memcpy.h may overwrite bool with incompatible type */
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
#if defined(__PPC64__) && defined(bool)
#undef bool
#define bool _Bool
#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
/* Use ticketlock instead of spinlock */
#define POOL_USE_TICKETLOCK
@@ -61,6 +65,7 @@ typedef struct ODP_ALIGNED_CACHE {
odp_pool_param_t params;
odp_pool_t pool_hdl;
struct rte_mempool *rte_mempool;
+ uint32_t pool_idx;
uint32_t seg_len;
} pool_t;
@@ -95,6 +100,8 @@ static inline void buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num)
rte_mbuf_raw_free((struct rte_mbuf *)(uintptr_t)buf_hdr[i]);
}
+int _odp_buffer_is_valid(odp_buffer_t buf);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/include/odp_queue_basic_internal.h b/platform/linux-dpdk/include/odp_queue_basic_internal.h
index c9f4d5e8d..c3ddaf334 100644
--- a/platform/linux-dpdk/include/odp_queue_basic_internal.h
+++ b/platform/linux-dpdk/include/odp_queue_basic_internal.h
@@ -84,7 +84,7 @@ typedef struct queue_global_t {
} queue_global_t;
-extern queue_global_t *queue_glb;
+extern queue_global_t *_odp_queue_glb;
static inline uint32_t queue_to_index(odp_queue_t handle)
{
@@ -95,7 +95,7 @@ static inline uint32_t queue_to_index(odp_queue_t handle)
static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
{
- return &queue_glb->queue[queue_id];
+ return &_odp_queue_glb->queue[queue_id];
}
static inline odp_queue_t queue_from_index(uint32_t queue_id)
@@ -108,13 +108,13 @@ static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
return (queue_entry_t *)(uintptr_t)handle;
}
-void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
/* Functions for schedulers */
-void sched_queue_set_status(uint32_t queue_index, int status);
-int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
- int update_status);
-int sched_queue_empty(uint32_t queue_index);
+void _odp_sched_queue_set_status(uint32_t queue_index, int status);
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
+ int update_status);
+int _odp_sched_queue_empty(uint32_t queue_index);
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/m4/configure.m4 b/platform/linux-dpdk/m4/configure.m4
index c9bfc25f1..0900c49d6 100644
--- a/platform/linux-dpdk/m4/configure.m4
+++ b/platform/linux-dpdk/m4/configure.m4
@@ -67,6 +67,8 @@ AC_CONFIG_FILES([platform/linux-dpdk/Makefile
platform/linux-dpdk/dumpconfig/Makefile
platform/linux-dpdk/test/Makefile
platform/linux-dpdk/test/example/Makefile
+ platform/linux-dpdk/test/example/classifier/Makefile
+ platform/linux-dpdk/test/example/generator/Makefile
platform/linux-dpdk/test/example/l2fwd_simple/Makefile
platform/linux-dpdk/test/example/l3fwd/Makefile
platform/linux-dpdk/test/example/packet/Makefile
diff --git a/platform/linux-dpdk/odp_buffer.c b/platform/linux-dpdk/odp_buffer.c
index e101cdad4..b8bccd3ba 100644
--- a/platform/linux-dpdk/odp_buffer.c
+++ b/platform/linux-dpdk/odp_buffer.c
@@ -47,52 +47,37 @@ void _odp_buffer_type_set(odp_buffer_t buf, int type)
int odp_buffer_is_valid(odp_buffer_t buf)
{
- /* We could call rte_mbuf_sanity_check, but that panics
- * and aborts the program */
- return buf != ODP_BUFFER_INVALID;
+ if (_odp_buffer_is_valid(buf) == 0)
+ return 0;
+
+ if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
+ return 0;
+
+ return 1;
}
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
+void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
pool_t *pool;
int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
if (!odp_buffer_is_valid(buf)) {
- ODP_PRINT("Buffer is not valid.\n");
- return len;
+ ODP_ERR("Buffer is not valid.\n");
+ return;
}
hdr = buf_hdl_to_hdr(buf);
pool = hdr->pool_ptr;
- len += snprintf(&str[len], n - len,
- "Buffer\n");
- len += snprintf(&str[len], n - len,
- " pool %" PRIu64 "\n",
- odp_pool_to_u64(pool->pool_hdl));
- len += snprintf(&str[len], n - len,
- " phy_addr %" PRIu64 "\n", hdr->mb.buf_physaddr);
- len += snprintf(&str[len], n - len,
- " addr %p\n", hdr->mb.buf_addr);
- len += snprintf(&str[len], n - len,
- " size %u\n", hdr->mb.buf_len);
- len += snprintf(&str[len], n - len,
- " ref_count %i\n",
- rte_mbuf_refcnt_read(&hdr->mb));
- len += snprintf(&str[len], n - len,
- " odp type %i\n", hdr->type);
-
- return len;
-}
-
-void odp_buffer_print(odp_buffer_t buf)
-{
- int max_len = 512;
- char str[max_len];
- int len;
-
- len = odp_buffer_snprint(str, max_len - 1, buf);
+ len += snprintf(&str[len], n - len, "Buffer\n------\n");
+ len += snprintf(&str[len], n - len, " pool index %u\n", pool->pool_idx);
+ len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->index);
+ len += snprintf(&str[len], n - len, " addr %p\n", odp_buffer_addr(buf));
+ len += snprintf(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
str[len] = 0;
ODP_PRINT("\n%s\n", str);
diff --git a/platform/linux-dpdk/odp_init.c b/platform/linux-dpdk/odp_init.c
index d869d38c5..98f7d3926 100644
--- a/platform/linux-dpdk/odp_init.c
+++ b/platform/linux-dpdk/odp_init.c
@@ -539,14 +539,14 @@ static int term_local(enum init_stage stage)
case ALL_INIT:
case SCHED_INIT:
- if (sched_fn->term_local()) {
+ if (_odp_sched_fn->term_local()) {
ODP_ERR("ODP schedule local term failed.\n");
rc = -1;
}
/* Fall through */
case QUEUE_INIT:
- if (queue_fn->term_local()) {
+ if (_odp_queue_fn->term_local()) {
ODP_ERR("ODP queue local term failed.\n");
rc = -1;
}
@@ -656,13 +656,13 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
}
stage = POOL_INIT;
- if (queue_fn->init_local()) {
+ if (_odp_queue_fn->init_local()) {
ODP_ERR("ODP queue local init failed.\n");
goto init_fail;
}
stage = QUEUE_INIT;
- if (sched_fn->init_local()) {
+ if (_odp_sched_fn->init_local()) {
ODP_ERR("ODP schedule local init failed.\n");
goto init_fail;
}
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index 9ce585225..a940bb7bf 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -1036,10 +1036,12 @@ void odp_packet_print(odp_packet_t pkt)
int len = 0;
int n = max_len - 1;
odp_packet_hdr_t *hdr = packet_hdr(pkt);
- odp_buffer_t buf = packet_to_buffer(pkt);
+ pool_t *pool = hdr->buf_hdr.pool_ptr;
- len += snprintf(&str[len], n - len, "Packet ");
- len += odp_buffer_snprint(&str[len], n - len, buf);
+ len += snprintf(&str[len], n - len, "Packet\n------\n");
+ len += snprintf(&str[len], n - len, " pool index %u\n", pool->pool_idx);
+ len += snprintf(&str[len], n - len, " buf index %u\n", hdr->buf_hdr.index);
+ len += snprintf(&str[len], n - len, " ev subtype %i\n", hdr->subtype);
len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
hdr->p.input_flags.all);
if (hdr->p.input_flags.all) {
@@ -1152,12 +1154,33 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
int odp_packet_is_valid(odp_packet_t pkt)
{
- if (odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
+ odp_event_t ev;
+
+ if (pkt == ODP_PACKET_INVALID)
return 0;
- if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET)
+ if (_odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
+ return 0;
+
+ ev = odp_packet_to_event(pkt);
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
+ switch (odp_event_subtype(ev)) {
+ case ODP_EVENT_PACKET_BASIC:
+ /* Fall through */
+ case ODP_EVENT_PACKET_COMP:
+ /* Fall through */
+ case ODP_EVENT_PACKET_CRYPTO:
+ /* Fall through */
+ case ODP_EVENT_PACKET_IPSEC:
+ /* Fall through */
+ break;
+ default:
+ return 0;
+ }
+
return 1;
}
@@ -1177,6 +1200,7 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
dsthdr->input = srchdr->input;
dsthdr->dst_queue = srchdr->dst_queue;
+ dsthdr->cos = srchdr->cos;
dsthdr->cls_mark = srchdr->cls_mark;
dsthdr->buf_hdr.mb.userdata = srchdr->buf_hdr.mb.userdata;
@@ -1383,7 +1407,7 @@ static inline uint16_t parse_eth(packet_parser_t *prs, const uint8_t **parseptr,
goto error;
}
ethtype = odp_be_to_cpu_16(*((const uint16_t *)(uintptr_t)
- (parseptr + 6)));
+ (*parseptr + 6)));
*offset += 8;
*parseptr += 8;
}
@@ -1754,9 +1778,9 @@ int packet_parse_common_l3_l4(packet_parser_t *prs, const uint8_t *parseptr,
* The function expects at least PACKET_PARSE_SEG_LEN bytes of data to be
* available from the ptr. Also parse metadata must be already initialized.
*/
-int packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
- uint32_t frame_len, uint32_t seg_len,
- int layer, odp_proto_chksums_t chksums)
+int _odp_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
+ uint32_t frame_len, uint32_t seg_len,
+ int layer, odp_proto_chksums_t chksums)
{
uint32_t offset;
uint16_t ethtype;
@@ -2026,9 +2050,9 @@ static int packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
/**
* Simple packet parser
*/
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
- odp_proto_layer_t layer,
- odp_proto_chksums_t chksums)
+int _odp_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
+ odp_proto_layer_t layer,
+ odp_proto_chksums_t chksums)
{
odp_packet_t pkt = packet_handle(pkt_hdr);
uint32_t seg_len = odp_packet_seg_len(pkt);
diff --git a/platform/linux-dpdk/odp_packet_dpdk.c b/platform/linux-dpdk/odp_packet_dpdk.c
index 29fb3817b..9d51d9b05 100644
--- a/platform/linux-dpdk/odp_packet_dpdk.c
+++ b/platform/linux-dpdk/odp_packet_dpdk.c
@@ -141,8 +141,8 @@ static inline pkt_dpdk_t *pkt_priv(pktio_entry_t *pktio_entry)
* will be picked.
* Array must be NULL terminated */
const pktio_if_ops_t * const pktio_if_ops[] = {
- &dpdk_pktio_ops,
- &null_pktio_ops,
+ &_odp_dpdk_pktio_ops,
+ &_odp_null_pktio_ops,
NULL
};
@@ -466,8 +466,8 @@ static int dpdk_term_global(void)
int ret = 0;
/* Eventdev takes care of closing pktio devices */
- if (!eventdev_gbl ||
- eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT) {
+ if (!_odp_eventdev_gbl ||
+ _odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT) {
uint16_t port_id;
if (dpdk_glb->loopback_ring) {
@@ -750,8 +750,8 @@ static int close_pkt_dpdk(pktio_entry_t *pktio_entry)
{
pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
- if (eventdev_gbl &&
- eventdev_gbl->rx_adapter.status != RX_ADAPTER_INIT)
+ if (_odp_eventdev_gbl &&
+ _odp_eventdev_gbl->rx_adapter.status != RX_ADAPTER_INIT)
rx_adapter_port_stop(pkt_dpdk->port_id);
else
rte_eth_dev_stop(pkt_dpdk->port_id);
@@ -1008,8 +1008,8 @@ int input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], int num)
/* DPDK ring pmd doesn't support packet parsing */
if (pkt_dpdk->flags.loopback) {
- packet_parse_layer(pkt_hdr, parse_layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(pkt_hdr, parse_layer,
+ pktio_entry->s.in_chksums);
} else {
if (_odp_dpdk_packet_parse_layer(pkt_hdr, mbuf,
parse_layer,
@@ -1055,9 +1055,9 @@ int input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], int num)
continue;
}
}
- if (cls_classify_packet(pktio_entry, data, pkt_len,
- pkt_len, &new_pool, &parsed_hdr,
- pkt_dpdk->flags.loopback)) {
+ if (_odp_cls_classify_packet(pktio_entry, data, pkt_len,
+ pkt_len, &new_pool, &parsed_hdr,
+ pkt_dpdk->flags.loopback)) {
failed++;
odp_packet_free(pkt);
continue;
@@ -1572,7 +1572,7 @@ static int stats_reset_pkt_dpdk(pktio_entry_t *pktio_entry)
return 0;
}
-const pktio_if_ops_t dpdk_pktio_ops = {
+const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.name = "odp-dpdk",
.print = NULL,
.init_global = dpdk_init_global,
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 0f9c87edc..c21d453a6 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -21,6 +21,7 @@
#include <odp_debug_internal.h>
#include <odp/api/cpumask.h>
#include <odp_libconfig_internal.h>
+#include <odp_event_vector_internal.h>
#include <string.h>
#include <stdlib.h>
@@ -32,6 +33,11 @@
#include <rte_config.h>
#include <rte_errno.h>
#include <rte_version.h>
+#include <rte_mempool.h>
+/* ppc64 rte_memcpy.h (included through rte_mempool.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
#ifdef POOL_USE_TICKETLOCK
#include <odp/api/ticketlock.h>
@@ -69,6 +75,49 @@ static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx)
return _odp_cast_scalar(odp_pool_t, pool_idx + 1);
}
+struct mem_cb_arg_t {
+ uint8_t *addr;
+ odp_bool_t match;
+};
+
+static void ptr_from_mempool(struct rte_mempool *mp ODP_UNUSED, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx ODP_UNUSED)
+{
+ struct mem_cb_arg_t *args = (struct mem_cb_arg_t *)opaque;
+ uint8_t *min_addr = (uint8_t *)memhdr->addr;
+ uint8_t *max_addr = min_addr + memhdr->len;
+
+ /* Match found already */
+ if (args->match)
+ return;
+
+ if (args->addr >= min_addr && args->addr < max_addr)
+ args->match = true;
+}
+
+static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
+{
+ int i;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool_t *pool = pool_entry(i);
+ struct mem_cb_arg_t args;
+
+ if (pool->rte_mempool == NULL)
+ continue;
+
+ args.addr = (uint8_t *)buf_hdr;
+ args.match = false;
+ rte_mempool_mem_iter(pool->rte_mempool, ptr_from_mempool, &args);
+
+ if (args.match)
+ return pool;
+ }
+
+ return NULL;
+}
+
static int read_config_file(pool_global_t *pool_gbl)
{
const char *str;
@@ -122,11 +171,15 @@ int _odp_pool_init_global(void)
LOCK_INIT(&pool->lock);
pool->pool_hdl = pool_index_to_handle(i);
+ pool->pool_idx = i;
}
ODP_DBG("\nPool init global\n");
- ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
- ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
+ ODP_DBG(" odp_buffer_hdr_t size: %zu\n", sizeof(odp_buffer_hdr_t));
+ ODP_DBG(" odp_packet_hdr_t size: %zu\n", sizeof(odp_packet_hdr_t));
+ ODP_DBG(" odp_timeout_hdr_t size: %zu\n", sizeof(odp_timeout_hdr_t));
+ ODP_DBG(" odp_event_vector_hdr_t size: %zu\n", sizeof(odp_event_vector_hdr_t));
+
ODP_DBG("\n");
return 0;
@@ -156,14 +209,40 @@ int _odp_pool_term_local(void)
return 0;
}
+int _odp_buffer_is_valid(odp_buffer_t buf)
+{
+ pool_t *pool;
+ odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(buf);
+
+ if (buf == ODP_BUFFER_INVALID)
+ return 0;
+
+ /* Check that buffer header is from a known pool */
+ pool = find_pool(buf_hdr);
+ if (pool == NULL)
+ return 0;
+
+ if (pool != buf_hdr->pool_ptr)
+ return 0;
+
+ if (buf_hdr->index >= pool->rte_mempool->size)
+ return 0;
+
+ return 1;
+}
+
int odp_pool_capability(odp_pool_capability_t *capa)
{
- unsigned int max_pools;
+ odp_pool_stats_opt_t supported_stats;
+ /* Reserve one pool for internal usage */
+ unsigned int max_pools = ODP_CONFIG_POOLS - 1;
memset(capa, 0, sizeof(odp_pool_capability_t));
- /* Reserve one pool for internal usage */
- max_pools = ODP_CONFIG_POOLS - 1;
+ capa->max_pools = max_pools;
+
+ supported_stats.all = 0;
+ supported_stats.bit.available = 1;
/* Buffer pools */
capa->buf.max_pools = max_pools;
@@ -172,6 +251,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
capa->buf.min_cache_size = 0;
capa->buf.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->buf.stats.all = supported_stats.all;
/* Packet pools */
capa->pkt.max_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
@@ -187,12 +267,22 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->pkt.max_uarea_size = MAX_SIZE;
capa->pkt.min_cache_size = 0;
capa->pkt.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->pkt.stats.all = supported_stats.all;
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
capa->tmo.min_cache_size = 0;
capa->tmo.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->tmo.stats.all = supported_stats.all;
+
+ /* Vector pools */
+ capa->vector.max_pools = max_pools;
+ capa->vector.max_num = CONFIG_POOL_MAX_NUM;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.min_cache_size = 0;
+ capa->vector.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->vector.stats.all = supported_stats.all;
return 0;
}
@@ -201,7 +291,8 @@ struct mbuf_ctor_arg {
pool_t *pool;
uint16_t seg_buf_offset; /* To skip the ODP buf/pkt/tmo header */
uint16_t seg_buf_size; /* size of user data */
- int type;
+ int type; /* ODP pool type */
+ int event_type; /* ODP event type */
int pkt_uarea_size; /* size of user area in bytes */
};
@@ -261,7 +352,15 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
buf_hdr->index = i;
buf_hdr->pool_ptr = mb_ctor_arg->pool;
buf_hdr->type = mb_ctor_arg->type;
- buf_hdr->event_type = mb_ctor_arg->type;
+ buf_hdr->event_type = mb_ctor_arg->event_type;
+
+ /* Initialize event vector metadata */
+ if (mb_ctor_arg->type == ODP_POOL_VECTOR) {
+ odp_event_vector_hdr_t *vect_hdr;
+
+ vect_hdr = (odp_event_vector_hdr_t *)raw_mbuf;
+ vect_hdr->size = 0;
+ }
}
#define CHECK_U16_OVERFLOW(X) do { \
@@ -302,6 +401,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->stats.all & ~capa.buf.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
case ODP_POOL_PACKET:
@@ -349,6 +453,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->stats.all & ~capa.pkt.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
case ODP_POOL_TIMEOUT:
@@ -363,6 +472,44 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->stats.all & ~capa.tmo.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
+ break;
+
+ case ODP_POOL_VECTOR:
+ if (params->vector.num == 0) {
+ ODP_ERR("vector.num zero\n");
+ return -1;
+ }
+
+ if (params->vector.num > capa.vector.max_num) {
+ ODP_ERR("vector.num too large %u\n", params->vector.num);
+ return -1;
+ }
+
+ if (params->vector.max_size == 0) {
+ ODP_ERR("vector.max_size zero\n");
+ return -1;
+ }
+
+ if (params->vector.max_size > capa.vector.max_size) {
+ ODP_ERR("vector.max_size too large %u\n", params->vector.max_size);
+ return -1;
+ }
+
+ if (params->vector.cache_size > capa.vector.max_cache_size) {
+ ODP_ERR("vector.cache_size too large %u\n", params->vector.cache_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.vector.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
default:
@@ -436,6 +583,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
pool_t *pool;
uint32_t buf_align, blk_size, headroom, tailroom, min_seg_len;
uint32_t max_len, min_align;
+ int8_t event_type;
char pool_name[ODP_POOL_NAME_LEN];
char rte_name[RTE_MEMPOOL_NAMESIZE];
@@ -490,9 +638,10 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
CHECK_U16_OVERFLOW(blk_size);
mbp_ctor_arg.mbuf_data_room_size = blk_size;
num = params->buf.num;
- ODP_DBG("type: buffer name: %s num: "
- "%u size: %u align: %u\n", pool_name, num,
- params->buf.size, params->buf.align);
+ event_type = ODP_EVENT_BUFFER;
+
+ ODP_DBG("type: buffer, name: %s, num: %u, size: %u, align: %u\n",
+ pool_name, num, params->buf.size, params->buf.align);
break;
case ODP_POOL_PACKET:
headroom = CONFIG_PACKET_HEADROOM;
@@ -531,21 +680,30 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
CHECK_U16_OVERFLOW(blk_size);
mbp_ctor_arg.mbuf_data_room_size = blk_size;
num = params->pkt.num;
+ event_type = ODP_EVENT_PACKET;
- ODP_DBG("type: packet, name: %s, "
- "num: %u, len: %u, blk_size: %u, "
- "uarea_size %d, hdr_size %d\n",
- pool_name, num, params->pkt.len, blk_size,
- params->pkt.uarea_size, hdr_size);
+ ODP_DBG("type: packet, name: %s, num: %u, len: %u, blk_size: %u, "
+ "uarea_size: %d, hdr_size: %d\n", pool_name, num, params->pkt.len,
+ blk_size, params->pkt.uarea_size, hdr_size);
break;
case ODP_POOL_TIMEOUT:
hdr_size = sizeof(odp_timeout_hdr_t);
mbp_ctor_arg.mbuf_data_room_size = 0;
num = params->tmo.num;
cache_size = params->tmo.cache_size;
+ event_type = ODP_EVENT_TIMEOUT;
+
+ ODP_DBG("type: tmo, name: %s, num: %u\n", pool_name, num);
+ break;
+ case ODP_POOL_VECTOR:
+ hdr_size = sizeof(odp_event_vector_hdr_t) +
+ (params->vector.max_size * sizeof(odp_packet_t));
+ mbp_ctor_arg.mbuf_data_room_size = 0;
+ num = params->vector.num;
+ cache_size = params->vector.cache_size;
+ event_type = ODP_EVENT_PACKET_VECTOR;
- ODP_DBG("type: tmo name: %s num: %u\n",
- pool_name, num);
+ ODP_DBG("type: vector, name: %s, num: %u\n", pool_name, num);
break;
default:
ODP_ERR("Bad type %i\n",
@@ -558,6 +716,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
(uint16_t)ROUNDUP_CACHE_LINE(hdr_size);
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.mbuf_data_room_size;
mb_ctor_arg.type = params->type;
+ mb_ctor_arg.event_type = event_type;
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
mb_ctor_arg.pool = pool;
mbp_ctor_arg.mbuf_priv_size = mb_ctor_arg.seg_buf_offset -
@@ -649,7 +808,8 @@ static inline int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[],
struct rte_mempool *mp = pool->rte_mempool;
ODP_ASSERT(pool->params.type == ODP_POOL_BUFFER ||
- pool->params.type == ODP_POOL_TIMEOUT);
+ pool->params.type == ODP_POOL_TIMEOUT ||
+ pool->params.type == ODP_POOL_VECTOR);
for (i = 0; i < num; i++) {
struct rte_mbuf *mbuf;
@@ -765,10 +925,54 @@ void odp_pool_param_init(odp_pool_param_t *params)
params->buf.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
params->pkt.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
params->tmo.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-
+ params->vector.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
{
return _odp_pri(hdl);
}
+
+unsigned int odp_pool_max_index(void)
+{
+ return ODP_CONFIG_POOLS - 1;
+}
+
+int odp_pool_index(odp_pool_t pool_hdl)
+{
+ pool_t *pool;
+
+ ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ return pool->pool_idx;
+}
+
+int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ memset(stats, 0, sizeof(odp_pool_stats_t));
+
+ if (pool->params.stats.bit.available)
+ stats->available = rte_mempool_avail_count(pool->rte_mempool);
+
+ return 0;
+}
+
+int odp_pool_stats_reset(odp_pool_t pool_hdl ODP_UNUSED)
+{
+ return 0;
+}
diff --git a/platform/linux-dpdk/odp_queue_basic.c b/platform/linux-dpdk/odp_queue_basic.c
index d5dc9b1b3..a006bcc54 100644
--- a/platform/linux-dpdk/odp_queue_basic.c
+++ b/platform/linux-dpdk/odp_queue_basic.c
@@ -42,7 +42,7 @@
static int queue_init(queue_entry_t *queue, const char *name,
const odp_queue_param_t *param);
-queue_global_t *queue_glb;
+queue_global_t *_odp_queue_glb;
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
@@ -52,16 +52,16 @@ static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
/* Reserve some queues for internal use */
capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
- capa->plain.max_size = queue_glb->config.max_queue_size;
- capa->plain.lockfree.max_num = queue_glb->queue_lf_num;
- capa->plain.lockfree.max_size = queue_glb->queue_lf_size;
+ capa->plain.max_size = _odp_queue_glb->config.max_queue_size;
+ capa->plain.lockfree.max_num = _odp_queue_glb->queue_lf_num;
+ capa->plain.lockfree.max_size = _odp_queue_glb->queue_lf_size;
#if ODP_DEPRECATED_API
capa->sched.max_num = CONFIG_MAX_SCHED_QUEUES;
- capa->sched.max_size = queue_glb->config.max_queue_size;
+ capa->sched.max_size = _odp_queue_glb->config.max_queue_size;
if (sched) {
- capa->max_ordered_locks = sched_fn->max_ordered_locks();
- capa->max_sched_groups = sched_fn->num_grps();
+ capa->max_ordered_locks = _odp_sched_fn->max_ordered_locks();
+ capa->max_sched_groups = _odp_sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
}
#endif
@@ -69,7 +69,7 @@ static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
return 0;
}
-static int read_config_file(queue_global_t *queue_glb)
+static int read_config_file(queue_global_t *_odp_queue_glb)
{
const char *str;
uint32_t val_u32;
@@ -90,7 +90,7 @@ static int read_config_file(queue_global_t *queue_glb)
return -1;
}
- queue_glb->config.max_queue_size = val_u32;
+ _odp_queue_glb->config.max_queue_size = val_u32;
ODP_PRINT(" %s: %u\n", str, val_u32);
str = "queue_basic.default_queue_size";
@@ -101,13 +101,13 @@ static int read_config_file(queue_global_t *queue_glb)
val_u32 = val;
- if (val_u32 > queue_glb->config.max_queue_size ||
+ if (val_u32 > _odp_queue_glb->config.max_queue_size ||
val_u32 < MIN_QUEUE_SIZE) {
ODP_ERR("Bad value %s = %u\n", str, val_u32);
return -1;
}
- queue_glb->config.default_queue_size = val_u32;
+ _odp_queue_glb->config.default_queue_size = val_u32;
ODP_PRINT(" %s: %u\n\n", str, val_u32);
return 0;
@@ -133,12 +133,12 @@ static int queue_init_global(void)
sizeof(queue_global_t),
sizeof(queue_entry_t), 0);
- queue_glb = odp_shm_addr(shm);
+ _odp_queue_glb = odp_shm_addr(shm);
- if (queue_glb == NULL)
+ if (_odp_queue_glb == NULL)
return -1;
- memset(queue_glb, 0, sizeof(queue_global_t));
+ memset(_odp_queue_glb, 0, sizeof(queue_global_t));
for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
/* init locks */
@@ -148,18 +148,18 @@ static int queue_init_global(void)
queue->s.handle = (odp_queue_t)queue;
}
- if (read_config_file(queue_glb)) {
+ if (read_config_file(_odp_queue_glb)) {
odp_shm_free(shm);
return -1;
}
- queue_glb->queue_gbl_shm = shm;
- queue_glb->queue_ring_shm = ODP_SHM_INVALID;
- queue_glb->ring_data = NULL;
+ _odp_queue_glb->queue_gbl_shm = shm;
+ _odp_queue_glb->queue_ring_shm = ODP_SHM_INVALID;
+ _odp_queue_glb->ring_data = NULL;
- lf_func = &queue_glb->queue_lf_func;
- queue_glb->queue_lf_num = queue_lf_init_global(&lf_size, lf_func);
- queue_glb->queue_lf_size = lf_size;
+ lf_func = &_odp_queue_glb->queue_lf_func;
+ _odp_queue_glb->queue_lf_num = _odp_queue_lf_init_global(&lf_size, lf_func);
+ _odp_queue_glb->queue_lf_size = lf_size;
queue_capa(&capa, 0);
@@ -199,9 +199,9 @@ static int queue_term_global(void)
UNLOCK(queue);
}
- queue_lf_term_global();
+ _odp_queue_lf_term_global();
- if (odp_shm_free(queue_glb->queue_gbl_shm)) {
+ if (odp_shm_free(_odp_queue_glb->queue_gbl_shm)) {
ODP_ERR("shm free failed");
ret = -1;
}
@@ -269,13 +269,13 @@ static odp_queue_t queue_create(const char *name,
}
if (param->nonblocking == ODP_BLOCKING) {
- if (param->size > queue_glb->config.max_queue_size)
+ if (param->size > _odp_queue_glb->config.max_queue_size)
return ODP_QUEUE_INVALID;
} else if (param->nonblocking == ODP_NONBLOCKING_LF) {
/* Only plain type lock-free queues supported */
if (type != ODP_QUEUE_TYPE_PLAIN)
return ODP_QUEUE_INVALID;
- if (param->size > queue_glb->queue_lf_size)
+ if (param->size > _odp_queue_glb->queue_lf_size)
return ODP_QUEUE_INVALID;
} else {
/* Wait-free queues not supported */
@@ -310,9 +310,9 @@ static odp_queue_t queue_create(const char *name,
param->nonblocking == ODP_NONBLOCKING_LF) {
queue_lf_func_t *lf_fn;
- lf_fn = &queue_glb->queue_lf_func;
+ lf_fn = &_odp_queue_glb->queue_lf_func;
- queue_lf = queue_lf_create(queue);
+ queue_lf = _odp_queue_lf_create(queue);
if (queue_lf == NULL) {
UNLOCK(queue);
@@ -343,8 +343,8 @@ static odp_queue_t queue_create(const char *name,
return ODP_QUEUE_INVALID;
if (type == ODP_QUEUE_TYPE_SCHED) {
- if (sched_fn->create_queue(queue->s.index,
- &queue->s.param.sched)) {
+ if (_odp_sched_fn->create_queue(queue->s.index,
+ &queue->s.param.sched)) {
queue->s.status = QUEUE_STATUS_FREE;
ODP_ERR("schedule queue init failed\n");
return ODP_QUEUE_INVALID;
@@ -354,7 +354,7 @@ static odp_queue_t queue_create(const char *name,
return handle;
}
-void sched_queue_set_status(uint32_t queue_index, int status)
+void _odp_sched_queue_set_status(uint32_t queue_index, int status)
{
queue_entry_t *queue = qentry_from_index(queue_index);
@@ -411,7 +411,7 @@ static int queue_destroy(odp_queue_t handle)
break;
case QUEUE_STATUS_NOTSCHED:
queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue->s.index);
+ _odp_sched_fn->destroy_queue(queue->s.index);
break;
case QUEUE_STATUS_SCHED:
/* Queue is still in scheduling */
@@ -422,7 +422,7 @@ static int queue_destroy(odp_queue_t handle)
}
if (queue->s.queue_lf)
- queue_lf_destroy(queue->s.queue_lf);
+ _odp_queue_lf_destroy(queue->s.queue_lf);
UNLOCK(queue);
@@ -471,7 +471,7 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = queue->s.ring_mpmc;
- if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
return ret;
num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)buf_hdr, num);
@@ -704,7 +704,7 @@ static void queue_print(odp_queue_t handle)
if (queue->s.queue_lf) {
ODP_PRINT(" implementation queue_lf\n");
ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
- queue_lf_length(queue->s.queue_lf), queue_lf_max_length());
+ _odp_queue_lf_length(queue->s.queue_lf), _odp_queue_lf_max_length());
} else if (queue->s.spsc) {
ODP_PRINT(" implementation ring_spsc\n");
ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
@@ -738,7 +738,7 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_st = queue->s.ring_st;
- if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
return ret;
LOCK(queue);
@@ -758,14 +758,14 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
UNLOCK(queue);
/* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
+ if (sched && _odp_sched_fn->sched_queue(queue->s.index))
ODP_ABORT("schedule_queue failed\n");
return num_enq;
}
-int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
- int update_status)
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
+ int update_status)
{
int num_deq, status;
ring_st_t ring_st;
@@ -782,7 +782,7 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
* Inform scheduler about a destroyed queue. */
if (queue->s.status == QUEUE_STATUS_DESTROYED) {
queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue_index);
+ _odp_sched_fn->destroy_queue(queue_index);
}
UNLOCK(queue);
@@ -824,7 +824,7 @@ static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-int sched_queue_empty(uint32_t queue_index)
+int _odp_sched_queue_empty(uint32_t queue_index)
{
queue_entry_t *queue = qentry_from_index(queue_index);
int ret = 0;
@@ -866,7 +866,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
}
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
+ if (queue->s.param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
return -1;
if (queue_type == ODP_QUEUE_TYPE_SCHED)
@@ -880,12 +880,12 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue_size = param->size;
if (queue_size == 0)
- queue_size = queue_glb->config.default_queue_size;
+ queue_size = _odp_queue_glb->config.default_queue_size;
if (queue_size < MIN_QUEUE_SIZE)
queue_size = MIN_QUEUE_SIZE;
- if (queue_size > queue_glb->config.max_queue_size) {
+ if (queue_size > _odp_queue_glb->config.max_queue_size) {
ODP_ERR("Too large queue size %u\n", queue_size);
return -1;
}
@@ -914,7 +914,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.orig_dequeue_multi = error_dequeue_multi;
if (spsc) {
- queue_spsc_init(queue, queue_size);
+ _odp_queue_spsc_init(queue, queue_size);
} else {
if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
queue->s.enqueue = plain_queue_enq;
diff --git a/platform/linux-dpdk/odp_queue_eventdev.c b/platform/linux-dpdk/odp_queue_eventdev.c
index 5fbf5d96b..195a0fc68 100644
--- a/platform/linux-dpdk/odp_queue_eventdev.c
+++ b/platform/linux-dpdk/odp_queue_eventdev.c
@@ -46,10 +46,10 @@
#define NUM_PRIO 8
/* Thread local eventdev context */
-__thread eventdev_local_t eventdev_local;
+__thread eventdev_local_t _odp_eventdev_local;
/* Global eventdev context */
-eventdev_global_t *eventdev_gbl;
+eventdev_global_t *_odp_eventdev_gbl;
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
@@ -67,15 +67,15 @@ static uint8_t event_queue_ids(odp_schedule_sync_t sync, uint8_t *first_id)
{
*first_id = 0;
if (sync == ODP_SCHED_SYNC_ATOMIC)
- return eventdev_gbl->event_queue.num_atomic;
+ return _odp_eventdev_gbl->event_queue.num_atomic;
- *first_id += eventdev_gbl->event_queue.num_atomic;
+ *first_id += _odp_eventdev_gbl->event_queue.num_atomic;
if (sync == ODP_SCHED_SYNC_PARALLEL)
- return eventdev_gbl->event_queue.num_parallel;
+ return _odp_eventdev_gbl->event_queue.num_parallel;
- *first_id += eventdev_gbl->event_queue.num_parallel;
+ *first_id += _odp_eventdev_gbl->event_queue.num_parallel;
if (sync == ODP_SCHED_SYNC_ORDERED)
- return eventdev_gbl->event_queue.num_ordered;
+ return _odp_eventdev_gbl->event_queue.num_ordered;
ODP_ABORT("Invalid schedule sync type\n");
return 0;
@@ -130,22 +130,22 @@ static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
/* Reserve some queues for internal use */
capa->max_queues = CONFIG_MAX_QUEUES;
capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
- capa->plain.max_size = eventdev_gbl->plain_config.max_queue_size - 1;
+ capa->plain.max_size = _odp_eventdev_gbl->plain_config.max_queue_size - 1;
capa->plain.lockfree.max_num = 0;
capa->plain.lockfree.max_size = 0;
#if ODP_DEPRECATED_API
uint16_t max_sched;
- max_sched = RTE_MAX(RTE_MAX(eventdev_gbl->event_queue.num_atomic,
- eventdev_gbl->event_queue.num_ordered),
- eventdev_gbl->event_queue.num_parallel);
+ max_sched = RTE_MAX(RTE_MAX(_odp_eventdev_gbl->event_queue.num_atomic,
+ _odp_eventdev_gbl->event_queue.num_ordered),
+ _odp_eventdev_gbl->event_queue.num_parallel);
capa->sched.max_num = RTE_MIN(CONFIG_MAX_SCHED_QUEUES, max_sched);
- capa->sched.max_size = eventdev_gbl->config.nb_events_limit;
+ capa->sched.max_size = _odp_eventdev_gbl->config.nb_events_limit;
if (sched) {
- capa->max_ordered_locks = sched_fn->max_ordered_locks();
- capa->max_sched_groups = sched_fn->num_grps();
+ capa->max_ordered_locks = _odp_sched_fn->max_ordered_locks();
+ capa->max_sched_groups = _odp_sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
}
#endif
@@ -298,7 +298,7 @@ static int queue_is_linked(uint8_t dev_id, uint8_t queue_id)
{
uint8_t i;
- for (i = 0; i < eventdev_gbl->config.nb_event_ports; i++) {
+ for (i = 0; i < _odp_eventdev_gbl->config.nb_event_ports; i++) {
uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
int num_links;
@@ -392,18 +392,18 @@ static int init_event_dev(void)
return -1;
}
- if (read_config_file(eventdev_gbl))
+ if (read_config_file(_odp_eventdev_gbl))
return -1;
- eventdev_gbl->dev_id = dev_id;
- eventdev_gbl->rx_adapter.id = rx_adapter_id;
- eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
- odp_ticketlock_init(&eventdev_gbl->rx_adapter.lock);
- odp_atomic_init_u32(&eventdev_gbl->num_started, 0);
+ _odp_eventdev_gbl->dev_id = dev_id;
+ _odp_eventdev_gbl->rx_adapter.id = rx_adapter_id;
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
+ odp_ticketlock_init(&_odp_eventdev_gbl->rx_adapter.lock);
+ odp_atomic_init_u32(&_odp_eventdev_gbl->num_started, 0);
- odp_ticketlock_init(&eventdev_gbl->port_lock);
+ odp_ticketlock_init(&_odp_eventdev_gbl->port_lock);
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
- eventdev_gbl->port[i].linked = 0;
+ _odp_eventdev_gbl->port[i].linked = 0;
if (rte_event_dev_info_get(dev_id, &info)) {
ODP_ERR("rte_event_dev_info_get failed\n");
@@ -411,26 +411,26 @@ static int init_event_dev(void)
}
print_dev_info(&info);
- eventdev_gbl->num_prio = RTE_MIN(NUM_PRIO,
- info.max_event_queue_priority_levels);
+ _odp_eventdev_gbl->num_prio = RTE_MIN(NUM_PRIO,
+ info.max_event_queue_priority_levels);
if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)) {
ODP_PRINT(" Only one QoS level supported!\n");
- eventdev_gbl->num_prio = 1;
+ _odp_eventdev_gbl->num_prio = 1;
}
memset(&config, 0, sizeof(struct rte_event_dev_config));
config.dequeue_timeout_ns = 0;
config.nb_events_limit = info.max_num_events;
- config.nb_event_queues = alloc_queues(eventdev_gbl, &info);
+ config.nb_event_queues = alloc_queues(_odp_eventdev_gbl, &info);
config.nb_event_ports = RTE_MIN(ODP_THREAD_COUNT_MAX,
(int)info.max_event_ports);
/* RX adapter requires additional port which is reserved when
* rte_event_eth_rx_adapter_queue_add() is called. */
config.nb_event_ports -= 1;
- if (eventdev_gbl->num_event_ports &&
- eventdev_gbl->num_event_ports < config.nb_event_ports)
- config.nb_event_ports = eventdev_gbl->num_event_ports;
+ if (_odp_eventdev_gbl->num_event_ports &&
+ _odp_eventdev_gbl->num_event_ports < config.nb_event_ports)
+ config.nb_event_ports = _odp_eventdev_gbl->num_event_ports;
num_flows = (EVENT_QUEUE_FLOWS < info.max_event_queue_flows) ?
EVENT_QUEUE_FLOWS : info.max_event_queue_flows;
@@ -450,8 +450,8 @@ static int init_event_dev(void)
ODP_ERR("rte_event_dev_configure failed\n");
return -1;
}
- eventdev_gbl->config = config;
- eventdev_gbl->num_event_ports = config.nb_event_ports;
+ _odp_eventdev_gbl->config = config;
+ _odp_eventdev_gbl->num_event_ports = config.nb_event_ports;
if (configure_ports(dev_id, &config)) {
ODP_ERR("Configuring eventdev ports failed\n");
@@ -496,19 +496,19 @@ static int init_event_dev(void)
}
/* Scheduling groups */
- odp_ticketlock_init(&eventdev_gbl->grp_lock);
+ odp_ticketlock_init(&_odp_eventdev_gbl->grp_lock);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
- memset(eventdev_gbl->grp[i].name, 0,
+ memset(_odp_eventdev_gbl->grp[i].name, 0,
ODP_SCHED_GROUP_NAME_LEN);
- odp_thrmask_zero(&eventdev_gbl->grp[i].mask);
+ odp_thrmask_zero(&_odp_eventdev_gbl->grp[i].mask);
}
- eventdev_gbl->grp[ODP_SCHED_GROUP_ALL].allocated = 1;
- eventdev_gbl->grp[ODP_SCHED_GROUP_WORKER].allocated = 1;
- eventdev_gbl->grp[ODP_SCHED_GROUP_CONTROL].allocated = 1;
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_ALL].allocated = 1;
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_WORKER].allocated = 1;
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_CONTROL].allocated = 1;
- odp_thrmask_setall(&eventdev_gbl->mask_all);
+ odp_thrmask_setall(&_odp_eventdev_gbl->mask_all);
return 0;
}
@@ -532,13 +532,13 @@ static int queue_init_global(void)
sizeof(eventdev_global_t),
ODP_CACHE_LINE_SIZE, 0);
- eventdev_gbl = odp_shm_addr(shm);
+ _odp_eventdev_gbl = odp_shm_addr(shm);
- if (eventdev_gbl == NULL)
+ if (_odp_eventdev_gbl == NULL)
return -1;
- memset(eventdev_gbl, 0, sizeof(eventdev_global_t));
- eventdev_gbl->shm = shm;
+ memset(_odp_eventdev_gbl, 0, sizeof(eventdev_global_t));
+ _odp_eventdev_gbl->shm = shm;
if (init_event_dev())
return -1;
@@ -551,10 +551,10 @@ static int queue_init_global(void)
queue->s.index = i;
}
- max_queue_size = eventdev_gbl->config.nb_events_limit;
- eventdev_gbl->plain_config.default_queue_size = DEFAULT_QUEUE_SIZE;
- eventdev_gbl->plain_config.max_queue_size = MAX_QUEUE_SIZE;
- eventdev_gbl->sched_config.max_queue_size = max_queue_size;
+ max_queue_size = _odp_eventdev_gbl->config.nb_events_limit;
+ _odp_eventdev_gbl->plain_config.default_queue_size = DEFAULT_QUEUE_SIZE;
+ _odp_eventdev_gbl->plain_config.max_queue_size = MAX_QUEUE_SIZE;
+ _odp_eventdev_gbl->sched_config.max_queue_size = max_queue_size;
queue_capa(&capa, 0);
@@ -571,12 +571,12 @@ static int queue_init_local(void)
{
int thread_id = odp_thread_id();
- memset(&eventdev_local, 0, sizeof(eventdev_local_t));
+ memset(&_odp_eventdev_local, 0, sizeof(eventdev_local_t));
ODP_ASSERT(thread_id <= UINT8_MAX);
- eventdev_local.port_id = thread_id;
- eventdev_local.paused = 0;
- eventdev_local.started = 0;
+ _odp_eventdev_local.port_id = thread_id;
+ _odp_eventdev_local.paused = 0;
+ _odp_eventdev_local.started = 0;
return 0;
}
@@ -605,17 +605,17 @@ static int queue_term_global(void)
if (rx_adapter_close())
ret = -1;
- rte_event_dev_stop(eventdev_gbl->dev_id);
+ rte_event_dev_stop(_odp_eventdev_gbl->dev_id);
/* Fix for DPDK 17.11 sync bug */
sleep(1);
- if (rte_event_dev_close(eventdev_gbl->dev_id)) {
+ if (rte_event_dev_close(_odp_eventdev_gbl->dev_id)) {
ODP_ERR("Failed to close event device\n");
ret = -1;
}
- if (odp_shm_free(eventdev_gbl->shm)) {
+ if (odp_shm_free(_odp_eventdev_gbl->shm)) {
ODP_ERR("Shm free failed for evendev\n");
ret = -1;
}
@@ -679,10 +679,10 @@ static odp_queue_t queue_create(const char *name,
ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
return ODP_QUEUE_INVALID;
}
- if (param->size > eventdev_gbl->sched_config.max_queue_size)
+ if (param->size > _odp_eventdev_gbl->sched_config.max_queue_size)
return ODP_QUEUE_INVALID;
} else {
- if (param->size > eventdev_gbl->plain_config.max_queue_size)
+ if (param->size > _odp_eventdev_gbl->plain_config.max_queue_size)
return ODP_QUEUE_INVALID;
}
@@ -738,8 +738,8 @@ static odp_queue_t queue_create(const char *name,
}
if (type == ODP_QUEUE_TYPE_SCHED) {
- if (sched_fn->create_queue(queue->s.index,
- &queue->s.param.sched)) {
+ if (_odp_sched_fn->create_queue(queue->s.index,
+ &queue->s.param.sched)) {
queue->s.status = QUEUE_STATUS_FREE;
ODP_ERR("schedule queue init failed\n");
return ODP_QUEUE_INVALID;
@@ -779,7 +779,7 @@ static int queue_destroy(odp_queue_t handle)
break;
case QUEUE_STATUS_SCHED:
queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue->s.index);
+ _odp_sched_fn->destroy_queue(queue->s.index);
break;
default:
ODP_ABORT("Unexpected queue status\n");
@@ -1073,8 +1073,8 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
queue_entry_t *queue;
struct rte_event ev[CONFIG_BURST_SIZE];
uint16_t num_enq = 0;
- uint8_t dev_id = eventdev_gbl->dev_id;
- uint8_t port_id = eventdev_local.port_id;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t port_id = _odp_eventdev_local.port_id;
uint8_t sched;
uint8_t queue_id;
uint8_t priority;
@@ -1096,9 +1096,9 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
UNLOCK(queue);
- if (odp_unlikely(port_id >= eventdev_gbl->num_event_ports)) {
+ if (odp_unlikely(port_id >= _odp_eventdev_gbl->num_event_ports)) {
ODP_ERR("Max %" PRIu8 " scheduled workers supported\n",
- eventdev_gbl->num_event_ports);
+ _odp_eventdev_gbl->num_event_ports);
return 0;
}
@@ -1151,7 +1151,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
}
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
+ if (queue->s.param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
return -1;
/* Convert ODP priority to eventdev priority:
@@ -1169,13 +1169,13 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue_size = param->size;
if (queue_size == 0)
- queue_size = eventdev_gbl->plain_config.default_queue_size;
+ queue_size = _odp_eventdev_gbl->plain_config.default_queue_size;
if (queue_size < MIN_QUEUE_SIZE)
queue_size = MIN_QUEUE_SIZE;
if (queue_type == ODP_QUEUE_TYPE_PLAIN &&
- queue_size > eventdev_gbl->plain_config.max_queue_size) {
+ queue_size > _odp_eventdev_gbl->plain_config.max_queue_size) {
ODP_ERR("Too large queue size %u\n", queue_size);
return -1;
}
diff --git a/platform/linux-dpdk/odp_queue_if.c b/platform/linux-dpdk/odp_queue_if.c
index a520b27ae..f19716d73 100644
--- a/platform/linux-dpdk/odp_queue_if.c
+++ b/platform/linux-dpdk/odp_queue_if.c
@@ -29,7 +29,7 @@ extern const queue_fn_t queue_basic_fn;
extern const _odp_queue_api_fn_t queue_eventdev_api;
extern const queue_fn_t queue_eventdev_fn;
-const queue_fn_t *queue_fn;
+const queue_fn_t *_odp_queue_fn;
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
{
@@ -109,20 +109,20 @@ int _odp_queue_init_global(void)
sched = _ODP_SCHEDULE_DEFAULT;
if (!strcmp(sched, "basic") || !strcmp(sched, "sp")) {
- queue_fn = &queue_basic_fn;
+ _odp_queue_fn = &queue_basic_fn;
_odp_queue_api = &queue_basic_api;
} else if (!strcmp(sched, "eventdev")) {
- queue_fn = &queue_eventdev_fn;
+ _odp_queue_fn = &queue_eventdev_fn;
_odp_queue_api = &queue_eventdev_api;
} else {
ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
return -1;
}
- return queue_fn->init_global();
+ return _odp_queue_fn->init_global();
}
int _odp_queue_term_global(void)
{
- return queue_fn->term_global();
+ return _odp_queue_fn->term_global();
}
diff --git a/platform/linux-dpdk/odp_queue_spsc.c b/platform/linux-dpdk/odp_queue_spsc.c
index 7373c12f7..d07451042 100644
--- a/platform/linux-dpdk/odp_queue_spsc.c
+++ b/platform/linux-dpdk/odp_queue_spsc.c
@@ -79,7 +79,7 @@ static odp_buffer_hdr_t *queue_spsc_deq(odp_queue_t handle)
return NULL;
}
-void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
{
queue->s.enqueue = queue_spsc_enq;
queue->s.dequeue = queue_spsc_deq;
diff --git a/platform/linux-dpdk/odp_schedule_eventdev.c b/platform/linux-dpdk/odp_schedule_eventdev.c
index ef80d2916..4ab235d70 100644
--- a/platform/linux-dpdk/odp_schedule_eventdev.c
+++ b/platform/linux-dpdk/odp_schedule_eventdev.c
@@ -51,10 +51,10 @@ static int link_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
{
int ret;
- odp_ticketlock_lock(&eventdev_gbl->port_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->port_lock);
- if (!eventdev_gbl->port[port_id].linked && !link_now) {
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ if (!_odp_eventdev_gbl->port[port_id].linked && !link_now) {
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return 0;
}
@@ -62,13 +62,13 @@ static int link_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
nb_links);
if (ret < 0 || (queue_ids && ret != nb_links)) {
ODP_ERR("rte_event_port_link failed: %d\n", ret);
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return ret;
}
- eventdev_gbl->port[port_id].linked = 1;
+ _odp_eventdev_gbl->port[port_id].linked = 1;
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return ret;
}
@@ -78,17 +78,17 @@ static int unlink_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
{
int ret;
- odp_ticketlock_lock(&eventdev_gbl->port_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->port_lock);
- if (!eventdev_gbl->port[port_id].linked) {
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ if (!_odp_eventdev_gbl->port[port_id].linked) {
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return 0;
}
ret = rte_event_port_unlink(dev_id, port_id, queue_ids, nb_links);
if (ret < 0) {
ODP_ERR("rte_event_port_unlink failed: %d\n", ret);
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return ret;
}
@@ -104,9 +104,9 @@ static int unlink_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
} while (ret > 0);
#endif
if (queue_ids == NULL)
- eventdev_gbl->port[port_id].linked = 0;
+ _odp_eventdev_gbl->port[port_id].linked = 0;
- odp_ticketlock_unlock(&eventdev_gbl->port_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
return ret;
}
@@ -119,18 +119,18 @@ static int resume_scheduling(uint8_t dev_id, uint8_t port_id)
int ret;
int i;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
int j;
- if (!eventdev_gbl->grp[i].allocated ||
- !odp_thrmask_isset(&eventdev_gbl->grp[i].mask,
- eventdev_local.port_id))
+ if (!_odp_eventdev_gbl->grp[i].allocated ||
+ !odp_thrmask_isset(&_odp_eventdev_gbl->grp[i].mask,
+ _odp_eventdev_local.port_id))
continue;
for (j = 0; j < RTE_EVENT_MAX_QUEUES_PER_DEV; j++) {
- queue_entry_t *queue = eventdev_gbl->grp[i].queue[j];
+ queue_entry_t *queue = _odp_eventdev_gbl->grp[i].queue[j];
if (!queue)
continue;
@@ -141,7 +141,7 @@ static int resume_scheduling(uint8_t dev_id, uint8_t port_id)
}
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
if (!nb_links)
return 0;
@@ -150,9 +150,9 @@ static int resume_scheduling(uint8_t dev_id, uint8_t port_id)
if (ret != nb_links)
return -1;
- if (eventdev_local.started == 0) {
- odp_atomic_inc_u32(&eventdev_gbl->num_started);
- eventdev_local.started = 1;
+ if (_odp_eventdev_local.started == 0) {
+ odp_atomic_inc_u32(&_odp_eventdev_gbl->num_started);
+ _odp_eventdev_local.started = 1;
}
return 0;
@@ -161,7 +161,7 @@ static int resume_scheduling(uint8_t dev_id, uint8_t port_id)
static int link_group(int group, const odp_thrmask_t *mask, odp_bool_t unlink)
{
odp_thrmask_t new_mask;
- uint8_t dev_id = eventdev_gbl->dev_id;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
uint8_t queue_ids[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
int nb_links = 0;
@@ -170,7 +170,7 @@ static int link_group(int group, const odp_thrmask_t *mask, odp_bool_t unlink)
int i;
for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
- queue_entry_t *queue = eventdev_gbl->grp[group].queue[i];
+ queue_entry_t *queue = _odp_eventdev_gbl->grp[group].queue[i];
if (queue == NULL)
continue;
@@ -214,7 +214,7 @@ static int rx_adapter_create(uint8_t dev_id, uint8_t rx_adapter_id,
return -1;
}
if ((capa & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0)
- eventdev_gbl->rx_adapter.single_queue = 1;
+ _odp_eventdev_gbl->rx_adapter.single_queue = 1;
memset(&port_config, 0, sizeof(struct rte_event_port_conf));
port_config.new_event_threshold = config->nb_events_limit;
@@ -227,7 +227,7 @@ static int rx_adapter_create(uint8_t dev_id, uint8_t rx_adapter_id,
return -1;
}
- eventdev_gbl->rx_adapter.status = RX_ADAPTER_STOPPED;
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_STOPPED;
return 0;
}
@@ -236,13 +236,13 @@ static int rx_adapter_add_queues(uint8_t rx_adapter_id, uint8_t port_id,
int num_pktin, int pktin_idx[],
odp_queue_t queues[])
{
- int num_dummy_links = eventdev_gbl->config.nb_event_queues;
+ int num_dummy_links = _odp_eventdev_gbl->config.nb_event_queues;
uint8_t dummy_links[num_dummy_links];
int ret = 0;
int i;
/* SW eventdev requires that all queues have ports linked */
- num_dummy_links = dummy_link_queues(eventdev_gbl->dev_id, dummy_links,
+ num_dummy_links = dummy_link_queues(_odp_eventdev_gbl->dev_id, dummy_links,
num_dummy_links);
for (i = 0; i < num_pktin; i++) {
@@ -263,7 +263,7 @@ static int rx_adapter_add_queues(uint8_t rx_adapter_id, uint8_t port_id,
qconf.rx_queue_flags = 0;
qconf.servicing_weight = 1;
- if (eventdev_gbl->rx_adapter.single_queue)
+ if (_odp_eventdev_gbl->rx_adapter.single_queue)
rx_queue_id = -1;
ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
@@ -273,11 +273,11 @@ static int rx_adapter_add_queues(uint8_t rx_adapter_id, uint8_t port_id,
return -1;
}
- if (eventdev_gbl->rx_adapter.single_queue)
+ if (_odp_eventdev_gbl->rx_adapter.single_queue)
break;
}
- if (dummy_unlink_queues(eventdev_gbl->dev_id, dummy_links,
+ if (dummy_unlink_queues(_odp_eventdev_gbl->dev_id, dummy_links,
num_dummy_links))
return -1;
@@ -287,13 +287,13 @@ static int rx_adapter_add_queues(uint8_t rx_adapter_id, uint8_t port_id,
int rx_adapter_close(void)
{
uint16_t port_id;
- uint8_t rx_adapter_id = eventdev_gbl->rx_adapter.id;
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
int ret = 0;
- if (eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT)
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT)
return ret;
- if (eventdev_gbl->rx_adapter.status != RX_ADAPTER_STOPPED &&
+ if (_odp_eventdev_gbl->rx_adapter.status != RX_ADAPTER_STOPPED &&
rte_event_eth_rx_adapter_stop(rx_adapter_id)) {
ODP_ERR("Failed to stop RX adapter\n");
ret = -1;
@@ -303,14 +303,14 @@ int rx_adapter_close(void)
rte_eth_dev_close(port_id);
}
- eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
return ret;
}
void rx_adapter_port_stop(uint16_t port_id)
{
- uint8_t rx_adapter_id = eventdev_gbl->rx_adapter.id;
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
if (rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1))
ODP_ERR("Failed to delete RX queue\n");
@@ -351,7 +351,7 @@ static inline int schedule_min_prio(void)
static inline int schedule_max_prio(void)
{
- return eventdev_gbl->num_prio - 1;
+ return _odp_eventdev_gbl->num_prio - 1;
}
static inline int schedule_default_prio(void)
@@ -364,22 +364,27 @@ static int schedule_create_queue(uint32_t qi,
{
queue_entry_t *queue = qentry_from_index(qi);
odp_thrmask_t mask;
- uint8_t dev_id = eventdev_gbl->dev_id;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
uint8_t queue_id = queue->s.index;
uint8_t priority = queue->s.eventdev.prio;
int thr;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ if (sched_param->group < 0 || sched_param->group >= NUM_SCHED_GRPS) {
+ ODP_ERR("Bad schedule group\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
- eventdev_gbl->grp[sched_param->group].queue[queue_id] = queue;
+ _odp_eventdev_gbl->grp[sched_param->group].queue[queue_id] = queue;
- mask = eventdev_gbl->grp[sched_param->group].mask;
+ mask = _odp_eventdev_gbl->grp[sched_param->group].mask;
thr = odp_thrmask_first(&mask);
while (0 <= thr) {
link_port(dev_id, thr, &queue_id, &priority, 1, 0);
thr = odp_thrmask_next(&mask, thr);
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return 0;
}
@@ -389,21 +394,21 @@ static void schedule_destroy_queue(uint32_t qi)
queue_entry_t *queue = qentry_from_index(qi);
odp_thrmask_t mask;
odp_schedule_group_t group = queue->s.param.sched.group;
- uint8_t dev_id = eventdev_gbl->dev_id;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
uint8_t queue_id = queue->s.index;
int thr;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
- eventdev_gbl->grp[group].queue[queue_id] = NULL;
+ _odp_eventdev_gbl->grp[group].queue[queue_id] = NULL;
- mask = eventdev_gbl->grp[group].mask;
+ mask = _odp_eventdev_gbl->grp[group].mask;
thr = odp_thrmask_first(&mask);
while (0 <= thr) {
unlink_port(dev_id, thr, &queue_id, 1);
thr = odp_thrmask_next(&mask, thr);
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
}
static void schedule_pktio_start(int pktio_index, int num_pktin,
@@ -411,31 +416,31 @@ static void schedule_pktio_start(int pktio_index, int num_pktin,
{
pktio_entry_t *entry = get_pktio_entry(index_to_pktio(pktio_index));
uint16_t port_id = dpdk_pktio_port_id(entry);
- uint8_t rx_adapter_id = eventdev_gbl->rx_adapter.id;
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
/* All eventdev pktio devices should to be started before calling
* odp_schedule(). This is due to the SW eventdev requirement that all
* event queues are linked when rte_event_eth_rx_adapter_queue_add() is
* called. */
- if (odp_atomic_load_u32(&eventdev_gbl->num_started))
+ if (odp_atomic_load_u32(&_odp_eventdev_gbl->num_started))
ODP_PRINT("All ODP pktio devices used by the scheduler should "
"be started before calling odp_schedule() for the "
"first time.\n");
- eventdev_gbl->pktio[port_id] = entry;
+ _odp_eventdev_gbl->pktio[port_id] = entry;
- odp_ticketlock_lock(&eventdev_gbl->rx_adapter.lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->rx_adapter.lock);
- if (eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT &&
- rx_adapter_create(eventdev_gbl->dev_id, rx_adapter_id,
- &eventdev_gbl->config))
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT &&
+ rx_adapter_create(_odp_eventdev_gbl->dev_id, rx_adapter_id,
+ &_odp_eventdev_gbl->config))
ODP_ABORT("Creating eventdev RX adapter failed\n");
if (rx_adapter_add_queues(rx_adapter_id, port_id, num_pktin, pktin_idx,
queue))
ODP_ABORT("Adding RX adapter queues failed\n");
- if (eventdev_gbl->rx_adapter.status == RX_ADAPTER_STOPPED) {
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_STOPPED) {
uint32_t service_id = 0;
int ret;
@@ -451,10 +456,10 @@ static void schedule_pktio_start(int pktio_index, int num_pktin,
if (rte_event_eth_rx_adapter_start(rx_adapter_id))
ODP_ABORT("Unable to start RX adapter\n");
- eventdev_gbl->rx_adapter.status = RX_ADAPTER_RUNNING;
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_RUNNING;
}
- odp_ticketlock_unlock(&eventdev_gbl->rx_adapter.lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->rx_adapter.lock);
}
static inline int classify_pkts(odp_packet_t packets[], int num)
@@ -542,11 +547,11 @@ static inline uint16_t event_input(struct rte_event ev[], odp_event_t out_ev[],
if (odp_unlikely(event->queue_id != first_queue)) {
uint16_t cache_idx, j;
- eventdev_local.cache.idx = 0;
+ _odp_eventdev_local.cache.idx = 0;
for (j = i; j < nb_events; j++) {
- cache_idx = eventdev_local.cache.count;
- eventdev_local.cache.event[cache_idx] = ev[j];
- eventdev_local.cache.count++;
+ cache_idx = _odp_eventdev_local.cache.count;
+ _odp_eventdev_local.cache.event[cache_idx] = ev[j];
+ _odp_eventdev_local.cache.count++;
}
break;
}
@@ -561,7 +566,7 @@ static inline uint16_t event_input(struct rte_event ev[], odp_event_t out_ev[],
}
if (num_pkts) {
- pktio_entry_t *entry = eventdev_gbl->pktio[pkt_table[0]->port];
+ pktio_entry_t *entry = _odp_eventdev_gbl->pktio[pkt_table[0]->port];
num_pkts = input_pkts(entry, (odp_packet_t *)pkt_table,
num_pkts);
@@ -585,19 +590,19 @@ static inline uint16_t input_cached(odp_event_t out_ev[], unsigned int max_num,
odp_queue_t *out_queue)
{
struct rte_event ev[max_num];
- uint16_t idx = eventdev_local.cache.idx;
+ uint16_t idx = _odp_eventdev_local.cache.idx;
uint16_t i;
- uint8_t first_queue = eventdev_local.cache.event[idx].queue_id;
+ uint8_t first_queue = _odp_eventdev_local.cache.event[idx].queue_id;
- for (i = 0; i < max_num && eventdev_local.cache.count; i++) {
- uint16_t idx = eventdev_local.cache.idx;
- struct rte_event *event = &eventdev_local.cache.event[idx];
+ for (i = 0; i < max_num && _odp_eventdev_local.cache.count; i++) {
+ uint16_t idx = _odp_eventdev_local.cache.idx;
+ struct rte_event *event = &_odp_eventdev_local.cache.event[idx];
if (odp_unlikely(event->queue_id != first_queue))
break;
- eventdev_local.cache.idx++;
- eventdev_local.cache.count--;
+ _odp_eventdev_local.cache.idx++;
+ _odp_eventdev_local.cache.count--;
ev[i] = *event;
}
@@ -611,18 +616,18 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
struct rte_event ev[max_num];
int first = 1;
uint16_t num_deq;
- uint8_t dev_id = eventdev_gbl->dev_id;
- uint8_t port_id = eventdev_local.port_id;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t port_id = _odp_eventdev_local.port_id;
- if (odp_unlikely(port_id >= eventdev_gbl->num_event_ports)) {
+ if (odp_unlikely(port_id >= _odp_eventdev_gbl->num_event_ports)) {
ODP_ERR("Max %" PRIu8 " scheduled workers supported\n",
- eventdev_gbl->num_event_ports);
+ _odp_eventdev_gbl->num_event_ports);
return 0;
}
/* Check that port is linked */
- if (odp_unlikely(!eventdev_gbl->port[port_id].linked &&
- !eventdev_local.paused)) {
+ if (odp_unlikely(!_odp_eventdev_gbl->port[port_id].linked &&
+ !_odp_eventdev_local.paused)) {
if (resume_scheduling(dev_id, port_id))
return 0;
}
@@ -630,7 +635,7 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
if (odp_unlikely(max_num > MAX_SCHED_BURST))
max_num = MAX_SCHED_BURST;
- if (odp_unlikely(eventdev_local.cache.count)) {
+ if (odp_unlikely(_odp_eventdev_local.cache.count)) {
num_deq = input_cached(out_ev, max_num, out_queue);
} else {
while (1) {
@@ -700,19 +705,19 @@ static int schedule_multi_no_wait(odp_queue_t *out_queue, odp_event_t events[],
static void schedule_pause(void)
{
- if (unlink_port(eventdev_gbl->dev_id,
- eventdev_local.port_id, NULL, 0) < 0)
+ if (unlink_port(_odp_eventdev_gbl->dev_id,
+ _odp_eventdev_local.port_id, NULL, 0) < 0)
ODP_ERR("Unable to pause scheduling\n");
- eventdev_local.paused = 1;
+ _odp_eventdev_local.paused = 1;
}
static void schedule_resume(void)
{
- if (resume_scheduling(eventdev_gbl->dev_id, eventdev_local.port_id))
+ if (resume_scheduling(_odp_eventdev_gbl->dev_id, _odp_eventdev_local.port_id))
ODP_ERR("Unable to resume scheduling\n");
- eventdev_local.paused = 0;
+ _odp_eventdev_local.paused = 0;
}
static void schedule_release_atomic(void)
@@ -730,7 +735,7 @@ static uint64_t schedule_wait_time(uint64_t ns)
static inline void grp_update_mask(int grp, const odp_thrmask_t *new_mask)
{
- odp_thrmask_copy(&eventdev_gbl->grp[grp].mask, new_mask);
+ odp_thrmask_copy(&_odp_eventdev_gbl->grp[grp].mask, new_mask);
}
static int schedule_thr_add(odp_schedule_group_t group, int thr)
@@ -744,12 +749,12 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
odp_thrmask_zero(&mask);
odp_thrmask_set(&mask, thr);
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
- odp_thrmask_or(&new_mask, &eventdev_gbl->grp[group].mask, &mask);
+ odp_thrmask_or(&new_mask, &_odp_eventdev_gbl->grp[group].mask, &mask);
grp_update_mask(group, &new_mask);
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return 0;
}
@@ -764,17 +769,17 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
odp_thrmask_zero(&mask);
odp_thrmask_set(&mask, thr);
- odp_thrmask_xor(&new_mask, &mask, &eventdev_gbl->mask_all);
+ odp_thrmask_xor(&new_mask, &mask, &_odp_eventdev_gbl->mask_all);
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
- odp_thrmask_and(&new_mask, &eventdev_gbl->grp[group].mask,
+ odp_thrmask_and(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
&new_mask);
grp_update_mask(group, &new_mask);
- unlink_port(eventdev_gbl->dev_id, thr, NULL, 0);
+ unlink_port(_odp_eventdev_gbl->dev_id, thr, NULL, 0);
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return 0;
}
@@ -786,12 +791,12 @@ static void schedule_prefetch(int num ODP_UNUSED)
static int schedule_num_prio(void)
{
- return eventdev_gbl->num_prio;
+ return _odp_eventdev_gbl->num_prio;
}
static int schedule_num_grps(void)
{
- return NUM_SCHED_GRPS;
+ return NUM_SCHED_GRPS - SCHED_GROUP_NAMED;
}
static odp_schedule_group_t schedule_group_create(const char *name,
@@ -800,11 +805,11 @@ static odp_schedule_group_t schedule_group_create(const char *name,
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
- if (!eventdev_gbl->grp[i].allocated) {
- char *grp_name = eventdev_gbl->grp[i].name;
+ if (!_odp_eventdev_gbl->grp[i].allocated) {
+ char *grp_name = _odp_eventdev_gbl->grp[i].name;
if (name == NULL) {
grp_name[0] = 0;
@@ -816,12 +821,12 @@ static odp_schedule_group_t schedule_group_create(const char *name,
grp_update_mask(i, mask);
group = (odp_schedule_group_t)i;
- eventdev_gbl->grp[i].allocated = 1;
+ _odp_eventdev_gbl->grp[i].allocated = 1;
break;
}
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return group;
}
@@ -832,20 +837,20 @@ static int schedule_group_destroy(odp_schedule_group_t group)
odp_thrmask_zero(&zero);
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- eventdev_gbl->grp[group].allocated) {
+ _odp_eventdev_gbl->grp[group].allocated) {
grp_update_mask(group, &zero);
- memset(eventdev_gbl->grp[group].name, 0,
+ memset(_odp_eventdev_gbl->grp[group].name, 0,
ODP_SCHED_GROUP_NAME_LEN);
- eventdev_gbl->grp[group].allocated = 0;
+ _odp_eventdev_gbl->grp[group].allocated = 0;
ret = 0;
} else {
ret = -1;
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return ret;
}
@@ -854,16 +859,16 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
- if (strcmp(name, eventdev_gbl->grp[i].name) == 0) {
+ if (strcmp(name, _odp_eventdev_gbl->grp[i].name) == 0) {
group = (odp_schedule_group_t)i;
break;
}
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return group;
}
@@ -872,17 +877,17 @@ static int schedule_group_join(odp_schedule_group_t group,
{
int ret = 0;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- eventdev_gbl->grp[group].allocated) {
+ _odp_eventdev_gbl->grp[group].allocated) {
odp_thrmask_t new_mask;
odp_thrmask_t link_mask;
- odp_thrmask_and(&link_mask, &eventdev_gbl->grp[group].mask,
+ odp_thrmask_and(&link_mask, &_odp_eventdev_gbl->grp[group].mask,
mask);
odp_thrmask_xor(&link_mask, &link_mask, mask);
- odp_thrmask_or(&new_mask, &eventdev_gbl->grp[group].mask,
+ odp_thrmask_or(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
mask);
grp_update_mask(group, &new_mask);
@@ -891,7 +896,7 @@ static int schedule_group_join(odp_schedule_group_t group,
ret = -1;
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return ret;
}
@@ -902,16 +907,16 @@ static int schedule_group_leave(odp_schedule_group_t group,
odp_thrmask_t unlink_mask;
int ret = 0;
- odp_thrmask_xor(&new_mask, mask, &eventdev_gbl->mask_all);
- odp_thrmask_and(&unlink_mask, mask, &eventdev_gbl->mask_all);
+ odp_thrmask_xor(&new_mask, mask, &_odp_eventdev_gbl->mask_all);
+ odp_thrmask_and(&unlink_mask, mask, &_odp_eventdev_gbl->mask_all);
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- eventdev_gbl->grp[group].allocated) {
- odp_thrmask_and(&unlink_mask, &eventdev_gbl->grp[group].mask,
+ _odp_eventdev_gbl->grp[group].allocated) {
+ odp_thrmask_and(&unlink_mask, &_odp_eventdev_gbl->grp[group].mask,
&unlink_mask);
- odp_thrmask_and(&new_mask, &eventdev_gbl->grp[group].mask,
+ odp_thrmask_and(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
&new_mask);
grp_update_mask(group, &new_mask);
@@ -920,7 +925,7 @@ static int schedule_group_leave(odp_schedule_group_t group,
ret = -1;
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return ret;
}
@@ -929,17 +934,17 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
{
int ret;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- eventdev_gbl->grp[group].allocated) {
- *thrmask = eventdev_gbl->grp[group].mask;
+ _odp_eventdev_gbl->grp[group].allocated) {
+ *thrmask = _odp_eventdev_gbl->grp[group].mask;
ret = 0;
} else {
ret = -1;
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return ret;
}
@@ -948,18 +953,18 @@ static int schedule_group_info(odp_schedule_group_t group,
{
int ret;
- odp_ticketlock_lock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- eventdev_gbl->grp[group].allocated) {
- info->name = eventdev_gbl->grp[group].name;
- info->thrmask = eventdev_gbl->grp[group].mask;
+ _odp_eventdev_gbl->grp[group].allocated) {
+ info->name = _odp_eventdev_gbl->grp[group].name;
+ info->thrmask = _odp_eventdev_gbl->grp[group].mask;
ret = 0;
} else {
ret = -1;
}
- odp_ticketlock_unlock(&eventdev_gbl->grp_lock);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
return ret;
}
@@ -998,11 +1003,11 @@ static int schedule_capability(odp_schedule_capability_t *capa)
memset(capa, 0, sizeof(odp_schedule_capability_t));
- max_sched = RTE_MAX(RTE_MAX(eventdev_gbl->event_queue.num_atomic,
- eventdev_gbl->event_queue.num_ordered),
- eventdev_gbl->event_queue.num_parallel);
+ max_sched = RTE_MAX(RTE_MAX(_odp_eventdev_gbl->event_queue.num_atomic,
+ _odp_eventdev_gbl->event_queue.num_ordered),
+ _odp_eventdev_gbl->event_queue.num_parallel);
capa->max_queues = RTE_MIN(CONFIG_MAX_SCHED_QUEUES, max_sched);
- capa->max_queue_size = eventdev_gbl->config.nb_events_limit;
+ capa->max_queue_size = _odp_eventdev_gbl->config.nb_events_limit;
capa->max_ordered_locks = schedule_max_ordered_locks();
capa->max_groups = schedule_num_grps();
capa->max_prios = odp_schedule_num_prio();
@@ -1031,7 +1036,7 @@ static int schedule_config(const odp_schedule_config_t *config)
}
/* Fill in scheduler interface */
-const schedule_fn_t schedule_eventdev_fn = {
+const schedule_fn_t _odp_schedule_eventdev_fn = {
.pktio_start = schedule_pktio_start,
.thr_add = schedule_thr_add,
.thr_rem = schedule_thr_rem,
@@ -1051,7 +1056,7 @@ const schedule_fn_t schedule_eventdev_fn = {
};
/* Fill in scheduler API calls */
-const schedule_api_t schedule_eventdev_api = {
+const schedule_api_t _odp_schedule_eventdev_api = {
.schedule_wait_time = schedule_wait_time,
.schedule_capability = schedule_capability,
.schedule_config_init = schedule_config_init,
diff --git a/platform/linux-dpdk/odp_schedule_if.c b/platform/linux-dpdk/odp_schedule_if.c
index d290a6a85..b8ba33725 100644
--- a/platform/linux-dpdk/odp_schedule_if.c
+++ b/platform/linux-dpdk/odp_schedule_if.c
@@ -13,35 +13,35 @@
#include <stdlib.h>
#include <string.h>
-extern const schedule_fn_t schedule_sp_fn;
-extern const schedule_api_t schedule_sp_api;
+extern const schedule_fn_t _odp_schedule_sp_fn;
+extern const schedule_api_t _odp_schedule_sp_api;
-extern const schedule_fn_t schedule_basic_fn;
-extern const schedule_api_t schedule_basic_api;
+extern const schedule_fn_t _odp_schedule_basic_fn;
+extern const schedule_api_t _odp_schedule_basic_api;
-extern const schedule_fn_t schedule_eventdev_fn;
-extern const schedule_api_t schedule_eventdev_api;
+extern const schedule_fn_t _odp_schedule_eventdev_fn;
+extern const schedule_api_t _odp_schedule_eventdev_api;
-const schedule_fn_t *sched_fn;
-const schedule_api_t *sched_api;
+const schedule_fn_t *_odp_sched_fn;
+const schedule_api_t *_odp_sched_api;
int _odp_schedule_configured;
uint64_t odp_schedule_wait_time(uint64_t ns)
{
- return sched_api->schedule_wait_time(ns);
+ return _odp_sched_api->schedule_wait_time(ns);
}
int odp_schedule_capability(odp_schedule_capability_t *capa)
{
- return sched_api->schedule_capability(capa);
+ return _odp_sched_api->schedule_capability(capa);
}
void odp_schedule_config_init(odp_schedule_config_t *config)
{
memset(config, 0, sizeof(*config));
- sched_api->schedule_config_init(config);
+ _odp_sched_api->schedule_config_init(config);
}
int odp_schedule_config(const odp_schedule_config_t *config)
@@ -59,7 +59,7 @@ int odp_schedule_config(const odp_schedule_config_t *config)
config = &defconfig;
}
- ret = sched_api->schedule_config(config);
+ ret = _odp_sched_api->schedule_config(config);
if (ret >= 0)
_odp_schedule_configured = 1;
@@ -71,7 +71,7 @@ odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
{
ODP_ASSERT(_odp_schedule_configured);
- return sched_api->schedule(from, wait);
+ return _odp_sched_api->schedule(from, wait);
}
int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
@@ -79,127 +79,127 @@ int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
{
ODP_ASSERT(_odp_schedule_configured);
- return sched_api->schedule_multi(from, wait, events, num);
+ return _odp_sched_api->schedule_multi(from, wait, events, num);
}
int odp_schedule_multi_wait(odp_queue_t *from, odp_event_t events[], int num)
{
- return sched_api->schedule_multi_wait(from, events, num);
+ return _odp_sched_api->schedule_multi_wait(from, events, num);
}
int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
{
- return sched_api->schedule_multi_no_wait(from, events, num);
+ return _odp_sched_api->schedule_multi_no_wait(from, events, num);
}
void odp_schedule_pause(void)
{
- return sched_api->schedule_pause();
+ return _odp_sched_api->schedule_pause();
}
void odp_schedule_resume(void)
{
- return sched_api->schedule_resume();
+ return _odp_sched_api->schedule_resume();
}
void odp_schedule_release_atomic(void)
{
- return sched_api->schedule_release_atomic();
+ return _odp_sched_api->schedule_release_atomic();
}
void odp_schedule_release_ordered(void)
{
- return sched_api->schedule_release_ordered();
+ return _odp_sched_api->schedule_release_ordered();
}
void odp_schedule_prefetch(int num)
{
- return sched_api->schedule_prefetch(num);
+ return _odp_sched_api->schedule_prefetch(num);
}
int odp_schedule_min_prio(void)
{
- return sched_api->schedule_min_prio();
+ return _odp_sched_api->schedule_min_prio();
}
int odp_schedule_max_prio(void)
{
- return sched_api->schedule_max_prio();
+ return _odp_sched_api->schedule_max_prio();
}
int odp_schedule_default_prio(void)
{
- return sched_api->schedule_default_prio();
+ return _odp_sched_api->schedule_default_prio();
}
int odp_schedule_num_prio(void)
{
- return sched_api->schedule_num_prio();
+ return _odp_sched_api->schedule_num_prio();
}
odp_schedule_group_t odp_schedule_group_create(const char *name,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_create(name, mask);
+ return _odp_sched_api->schedule_group_create(name, mask);
}
int odp_schedule_group_destroy(odp_schedule_group_t group)
{
- return sched_api->schedule_group_destroy(group);
+ return _odp_sched_api->schedule_group_destroy(group);
}
odp_schedule_group_t odp_schedule_group_lookup(const char *name)
{
- return sched_api->schedule_group_lookup(name);
+ return _odp_sched_api->schedule_group_lookup(name);
}
int odp_schedule_group_join(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_join(group, mask);
+ return _odp_sched_api->schedule_group_join(group, mask);
}
int odp_schedule_group_leave(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_leave(group, mask);
+ return _odp_sched_api->schedule_group_leave(group, mask);
}
int odp_schedule_group_thrmask(odp_schedule_group_t group,
odp_thrmask_t *thrmask)
{
- return sched_api->schedule_group_thrmask(group, thrmask);
+ return _odp_sched_api->schedule_group_thrmask(group, thrmask);
}
int odp_schedule_group_info(odp_schedule_group_t group,
odp_schedule_group_info_t *info)
{
- return sched_api->schedule_group_info(group, info);
+ return _odp_sched_api->schedule_group_info(group, info);
}
void odp_schedule_order_lock(uint32_t lock_index)
{
- return sched_api->schedule_order_lock(lock_index);
+ return _odp_sched_api->schedule_order_lock(lock_index);
}
void odp_schedule_order_unlock(uint32_t lock_index)
{
- return sched_api->schedule_order_unlock(lock_index);
+ return _odp_sched_api->schedule_order_unlock(lock_index);
}
void odp_schedule_order_unlock_lock(uint32_t unlock_index, uint32_t lock_index)
{
- sched_api->schedule_order_unlock_lock(unlock_index, lock_index);
+ _odp_sched_api->schedule_order_unlock_lock(unlock_index, lock_index);
}
void odp_schedule_order_lock_start(uint32_t lock_index)
{
- sched_api->schedule_order_lock_start(lock_index);
+ _odp_sched_api->schedule_order_lock_start(lock_index);
}
void odp_schedule_order_lock_wait(uint32_t lock_index)
{
- sched_api->schedule_order_lock_wait(lock_index);
+ _odp_sched_api->schedule_order_lock_wait(lock_index);
}
int _odp_schedule_init_global(void)
@@ -212,23 +212,23 @@ int _odp_schedule_init_global(void)
ODP_PRINT("Using scheduler '%s'\n", sched);
if (!strcmp(sched, "basic")) {
- sched_fn = &schedule_basic_fn;
- sched_api = &schedule_basic_api;
+ _odp_sched_fn = &_odp_schedule_basic_fn;
+ _odp_sched_api = &_odp_schedule_basic_api;
} else if (!strcmp(sched, "sp")) {
- sched_fn = &schedule_sp_fn;
- sched_api = &schedule_sp_api;
+ _odp_sched_fn = &_odp_schedule_sp_fn;
+ _odp_sched_api = &_odp_schedule_sp_api;
} else if (!strcmp(sched, "eventdev")) {
- sched_fn = &schedule_eventdev_fn;
- sched_api = &schedule_eventdev_api;
+ _odp_sched_fn = &_odp_schedule_eventdev_fn;
+ _odp_sched_api = &_odp_schedule_eventdev_api;
} else {
ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
return -1;
}
- return sched_fn->init_global();
+ return _odp_sched_fn->init_global();
}
int _odp_schedule_term_global(void)
{
- return sched_fn->term_global();
+ return _odp_sched_fn->term_global();
}
diff --git a/platform/linux-dpdk/odp_system_info.c b/platform/linux-dpdk/odp_system_info.c
index e9c81328f..fce76d15a 100644
--- a/platform/linux-dpdk/odp_system_info.c
+++ b/platform/linux-dpdk/odp_system_info.c
@@ -311,7 +311,7 @@ int _odp_system_info_init(void)
if (file != NULL) {
/* Read CPU model, and set max cpu frequency
* if not set from cpufreq. */
- cpuinfo_parser(file, &odp_global_ro.system_info);
+ _odp_cpuinfo_parser(file, &odp_global_ro.system_info);
fclose(file);
} else {
_odp_dummy_cpuinfo(&odp_global_ro.system_info);
@@ -455,6 +455,7 @@ int odp_system_info(odp_system_info_t *info)
info->cpu_arch = sys_info->cpu_arch;
info->cpu_isa_sw = sys_info->cpu_isa_sw;
+ info->cpu_isa_hw = sys_info->cpu_isa_hw;
return 0;
}
@@ -497,7 +498,7 @@ void odp_sys_info_print(void)
str[len] = '\0';
ODP_PRINT("%s", str);
- sys_info_print_arch();
+ _odp_sys_info_print_arch();
}
void odp_sys_config_print(void)
diff --git a/platform/linux-dpdk/odp_thread.c b/platform/linux-dpdk/odp_thread.c
index 99cb7d8f7..28ea2ab38 100644
--- a/platform/linux-dpdk/odp_thread.c
+++ b/platform/linux-dpdk/odp_thread.c
@@ -147,10 +147,10 @@ int _odp_thread_init_local(odp_thread_type_t type)
group_worker = 1;
group_control = 1;
- if (sched_fn->get_config) {
+ if (_odp_sched_fn->get_config) {
schedule_config_t schedule_config;
- sched_fn->get_config(&schedule_config);
+ _odp_sched_fn->get_config(&schedule_config);
group_all = schedule_config.group_enable.all;
group_worker = schedule_config.group_enable.worker;
group_control = schedule_config.group_enable.control;
@@ -180,13 +180,13 @@ int _odp_thread_init_local(odp_thread_type_t type)
_odp_this_thread = &thread_globals->thr[id];
if (group_all)
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
if (type == ODP_THREAD_WORKER && group_worker)
- sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
if (type == ODP_THREAD_CONTROL && group_control)
- sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0;
}
@@ -202,23 +202,23 @@ int _odp_thread_term_local(void)
group_worker = 1;
group_control = 1;
- if (sched_fn->get_config) {
+ if (_odp_sched_fn->get_config) {
schedule_config_t schedule_config;
- sched_fn->get_config(&schedule_config);
+ _odp_sched_fn->get_config(&schedule_config);
group_all = schedule_config.group_enable.all;
group_worker = schedule_config.group_enable.worker;
group_control = schedule_config.group_enable.control;
}
if (group_all)
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
if (type == ODP_THREAD_WORKER && group_worker)
- sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
if (type == ODP_THREAD_CONTROL && group_control)
- sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
odp_spinlock_lock(&thread_globals->lock);
num = free_id(id);
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index 3205c7d98..46e132bf7 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -278,6 +278,8 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src,
capa->max_tmo.res_hz = MAX_RES_HZ;
capa->max_tmo.min_tmo = min_tmo;
capa->max_tmo.max_tmo = MAX_TMO_NS;
+ capa->queue_type_sched = true;
+ capa->queue_type_plain = true;
return 0;
}
@@ -534,7 +536,7 @@ odp_timer_t odp_timer_alloc(odp_timer_pool_t tp,
timer->tmo_event = ODP_EVENT_INVALID;
/* Add timer to queue */
- queue_fn->timer_add(queue);
+ _odp_queue_fn->timer_add(queue);
odp_ticketlock_lock(&timer_pool->lock);
@@ -575,7 +577,7 @@ retry:
}
/* Remove timer from queue */
- queue_fn->timer_rem(timer->queue);
+ _odp_queue_fn->timer_rem(timer->queue);
odp_ticketlock_unlock(&timer->lock);
diff --git a/platform/linux-dpdk/test/example/Makefile.am b/platform/linux-dpdk/test/example/Makefile.am
index 2e6a7ce6c..22b254cd7 100644
--- a/platform/linux-dpdk/test/example/Makefile.am
+++ b/platform/linux-dpdk/test/example/Makefile.am
@@ -1,4 +1,6 @@
SUBDIRS = \
+ classifier \
+ generator \
l2fwd_simple \
l3fwd \
packet \
diff --git a/platform/linux-dpdk/test/example/classifier/Makefile.am b/platform/linux-dpdk/test/example/classifier/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/classifier/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/classifier/pktio_env b/platform/linux-dpdk/test/example/classifier/pktio_env
new file mode 100644
index 000000000..1bd4b31d8
--- /dev/null
+++ b/platform/linux-dpdk/test/example/classifier/pktio_env
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create one pcap interface
+# which uses udp64.pcap to inject traffic.
+#
+# Network set-up
+# +---------+ +-----------+
+# |pcap intf| IF0<---> | Classifier|
+# +--------- +-----------+
+#
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP in=${PCAP_IN}"
+
+IF0=0
+TIME_OUT_VAL=10
+CPASS_COUNT_ARG1=100
+CPASS_COUNT_ARG2=100
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0;
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/generator/Makefile.am b/platform/linux-dpdk/test/example/generator/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/generator/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/generator/pktio_env b/platform/linux-dpdk/test/example/generator/pktio_env
new file mode 100644
index 000000000..82c238cc5
--- /dev/null
+++ b/platform/linux-dpdk/test/example/generator/pktio_env
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on odp-dpdk.
+#
+# Generator uses a loop interface to validate udp mode.
+#
+# Network set-up
+# IF0 ---> loop
+
+IF0=loop
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/switch/pktio_env b/platform/linux-dpdk/test/example/switch/pktio_env
index 413588c1f..8daca3c3e 100644
--- a/platform/linux-dpdk/test/example/switch/pktio_env
+++ b/platform/linux-dpdk/test/example/switch/pktio_env
@@ -43,6 +43,7 @@ validate_result()
do
if [ `stat -c %s pcapout${i}.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
echo "Error: Output file $i size not matching"
+ exit 1
fi
rm -f pcapout${i}.pcap
done
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index c2f8cca00..66fbf3364 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -33,9 +33,11 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/byteorder_inlines.h \
include/odp/api/plat/cpu_inlines.h \
include/odp/api/plat/event_inlines.h \
+ include/odp/api/plat/event_vector_inline_types.h \
include/odp/api/plat/packet_flag_inlines.h \
include/odp/api/plat/packet_inline_types.h \
include/odp/api/plat/packet_inlines.h \
+ include/odp/api/plat/packet_vector_inlines.h \
include/odp/api/plat/pktio_inlines.h \
include/odp/api/plat/pool_inline_types.h \
include/odp/api/plat/queue_inlines.h \
@@ -91,7 +93,6 @@ endif
noinst_HEADERS = \
include/odp_align_internal.h \
include/odp_atomic_internal.h \
- include/odp_bitmap_internal.h \
include/odp_bitset.h \
include/odp_buffer_internal.h \
include/odp_classification_datamodel.h \
@@ -146,6 +147,7 @@ noinst_HEADERS = \
include/odp_timer_internal.h \
include/odp_timer_wheel_internal.h \
include/odp_traffic_mngr_internal.h \
+ include/odp_event_vector_internal.h \
include/protocols/eth.h \
include/protocols/ip.h \
include/protocols/ipsec.h \
@@ -159,7 +161,6 @@ BUILT_SOURCES = \
__LIB__libodp_linux_la_SOURCES = \
odp_atomic.c \
odp_barrier.c \
- odp_bitmap.c \
odp_buffer.c \
odp_chksum.c \
odp_classification.c \
@@ -186,6 +187,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_libconfig.c \
odp_name_table.c \
odp_packet.c \
+ odp_packet_vector.c \
odp_packet_flags.c \
odp_packet_io.c \
odp_pkt_queue.c \
diff --git a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
index 2b397b9c4..77e115756 100644
--- a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
@@ -58,49 +58,64 @@ static void aarch64_impl_str(char *str, int maxlen, int implementer)
snprintf(str, maxlen, "UNKNOWN (0x%x)", implementer);
}
-static void aarch64_part_str(char *str, int maxlen, int implementer,
- int part, int variant, int revision)
+static void aarch64_part_info(char *str, int maxlen, odp_cpu_arch_arm_t *cpu_isa, int implementer,
+ int part, int variant, int revision)
{
+ *cpu_isa = ODP_CPU_ARCH_ARM_UNKNOWN;
+
if (implementer == 0x41) {
switch (part) {
case 0xd02:
snprintf(str, maxlen, "Cortex-A34");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd04:
snprintf(str, maxlen, "Cortex-A35");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd03:
snprintf(str, maxlen, "Cortex-A53");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd05:
snprintf(str, maxlen, "Cortex-A55");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd07:
snprintf(str, maxlen, "Cortex-A57");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd06:
snprintf(str, maxlen, "Cortex-A65");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd08:
snprintf(str, maxlen, "Cortex-A72");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd09:
snprintf(str, maxlen, "Cortex-A73");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
return;
case 0xd0a:
snprintf(str, maxlen, "Cortex-A75");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd0b:
snprintf(str, maxlen, "Cortex-A76");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd0e:
snprintf(str, maxlen, "Cortex-A76AE");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd0d:
snprintf(str, maxlen, "Cortex-A77");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xd41:
snprintf(str, maxlen, "Cortex-A78");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
default:
break;
@@ -108,22 +123,24 @@ static void aarch64_part_str(char *str, int maxlen, int implementer,
} else if (implementer == 0x43) {
switch (part) {
case 0xa1:
- snprintf(str, maxlen, "CN88XX, Pass %i.%i",
- variant + 1, revision);
+ snprintf(str, maxlen, "CN88XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
return;
case 0xa2:
- snprintf(str, maxlen, "CN81XX, Pass %i.%i",
- variant + 1, revision);
+ snprintf(str, maxlen, "CN81XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
return;
case 0xa3:
- snprintf(str, maxlen, "CN83XX, Pass %i.%i",
- variant + 1, revision);
+ snprintf(str, maxlen, "CN83XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
return;
case 0xaf:
snprintf(str, maxlen, "CN99XX, Rev %c%i", 'A' + variant, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
return;
case 0xb1:
snprintf(str, maxlen, "CN98XX, Rev %c%i", 'A' + variant, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
case 0xb2:
/* Handle B0 errata: variant and revision numbers show up as A1 */
@@ -131,6 +148,8 @@ static void aarch64_part_str(char *str, int maxlen, int implementer,
snprintf(str, maxlen, "CN96XX, Rev B0");
else
snprintf(str, maxlen, "CN96XX, Rev %c%i", 'A' + variant, revision);
+
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
return;
default:
break;
@@ -185,7 +204,7 @@ static odp_cpu_arch_arm_t arm_isa_version(void)
return ODP_CPU_ARCH_ARM_UNKNOWN;
}
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
char impl_str[TMP_STR_LEN];
@@ -249,11 +268,13 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
cur = strstr(str, "CPU revision");
if (cur) {
+ odp_cpu_arch_arm_t cpu_isa;
+
cur = strchr(cur, ':');
rev = strtol(cur + 1, NULL, 10);
- aarch64_part_str(part_str, TMP_STR_LEN,
- impl, part, var, rev);
+ aarch64_part_info(part_str, TMP_STR_LEN, &cpu_isa, impl, part, var, rev);
+ sysinfo->cpu_isa_hw.arm = cpu_isa;
/* This is the last line about this cpu, update
* model string. */
@@ -280,7 +301,7 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
return 0;
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
const char *ndef = "n/a";
diff --git a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
index f39173d9e..4cbe46d7c 100644
--- a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
@@ -7,7 +7,7 @@
#include <odp_global_data.h>
#include <odp_sysinfo_internal.h>
-int cpuinfo_parser(FILE * file ODP_UNUSED, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
{
sysinfo->cpu_arch = ODP_CPU_ARCH_ARM;
sysinfo->cpu_isa_sw.arm = ODP_CPU_ARCH_ARM_UNKNOWN;
@@ -23,7 +23,7 @@ int cpuinfo_parser(FILE * file ODP_UNUSED, system_info_t *sysinfo)
return _odp_dummy_cpuinfo(sysinfo);
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
}
diff --git a/platform/linux-generic/arch/default/odp_sysinfo_parse.c b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
index 4c3f1aec1..11d33d576 100644
--- a/platform/linux-generic/arch/default/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
@@ -7,12 +7,12 @@
#include <odp_global_data.h>
#include <odp_sysinfo_internal.h>
-int cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
{
return _odp_dummy_cpuinfo(sysinfo);
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
}
diff --git a/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c b/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
index 15b2ccc86..897637516 100644
--- a/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
@@ -8,7 +8,7 @@
#include <odp_sysinfo_internal.h>
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
char *pos;
@@ -63,7 +63,7 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
return 0;
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
}
diff --git a/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
index 77b813f99..2049cc42f 100644
--- a/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
@@ -8,7 +8,7 @@
#include <odp_sysinfo_internal.h>
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
char *pos;
@@ -62,7 +62,7 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
return 0;
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
}
diff --git a/platform/linux-generic/arch/x86/cpu_flags.c b/platform/linux-generic/arch/x86/cpu_flags.c
index f5c9c0d45..c75a87233 100644
--- a/platform/linux-generic/arch/x86/cpu_flags.c
+++ b/platform/linux-generic/arch/x86/cpu_flags.c
@@ -329,7 +329,7 @@ static const char *cpu_get_flag_name(enum rte_cpu_flag_t feature)
return cpu_feature_table[feature].name;
}
-void cpu_flags_print_all(void)
+void _odp_cpu_flags_print_all(void)
{
int len, i;
int max_str = 1024;
@@ -367,7 +367,7 @@ int _odp_cpu_has_global_time(void)
return 0;
}
-int cpu_flags_has_rdtsc(void)
+int _odp_cpu_flags_has_rdtsc(void)
{
if (cpu_get_flag_enabled(RTE_CPUFLAG_TSC) > 0)
return 1;
diff --git a/platform/linux-generic/arch/x86/cpu_flags.h b/platform/linux-generic/arch/x86/cpu_flags.h
index 879c9aadc..8d485dbfa 100644
--- a/platform/linux-generic/arch/x86/cpu_flags.h
+++ b/platform/linux-generic/arch/x86/cpu_flags.h
@@ -11,8 +11,8 @@
extern "C" {
#endif
-void cpu_flags_print_all(void);
-int cpu_flags_has_rdtsc(void);
+void _odp_cpu_flags_print_all(void);
+int _odp_cpu_flags_has_rdtsc(void);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/arch/x86/odp_cpu_cycles.c b/platform/linux-generic/arch/x86/odp_cpu_cycles.c
index c7d97a764..406668a79 100644
--- a/platform/linux-generic/arch/x86/odp_cpu_cycles.c
+++ b/platform/linux-generic/arch/x86/odp_cpu_cycles.c
@@ -12,7 +12,7 @@
int _odp_cpu_cycles_init_global(void)
{
- if (cpu_flags_has_rdtsc() == 0) {
+ if (_odp_cpu_flags_has_rdtsc() == 0) {
ODP_ERR("RDTSC instruction not supported\n");
return -1;
}
diff --git a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
index d21ccf30c..c74c52045 100644
--- a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
@@ -8,7 +8,7 @@
#include "cpu_flags.h"
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
char *pos, *pos_end;
@@ -77,9 +77,9 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
return 0;
}
-void sys_info_print_arch(void)
+void _odp_sys_info_print_arch(void)
{
- cpu_flags_print_all();
+ _odp_cpu_flags_print_all();
}
uint64_t odp_cpu_arch_hz_current(int id)
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event.h b/platform/linux-generic/include-abi/odp/api/abi/event.h
index a8024654c..27d750d16 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/event.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/event.h
@@ -29,11 +29,12 @@ typedef ODP_HANDLE_T(odp_event_t);
#define ODP_EVENT_INVALID _odp_cast_scalar(odp_event_t, 0)
typedef enum odp_event_type_t {
- ODP_EVENT_BUFFER = 1,
- ODP_EVENT_PACKET = 2,
- ODP_EVENT_TIMEOUT = 3,
+ ODP_EVENT_BUFFER = 1,
+ ODP_EVENT_PACKET = 2,
+ ODP_EVENT_TIMEOUT = 3,
ODP_EVENT_CRYPTO_COMPL = 4,
- ODP_EVENT_IPSEC_STATUS = 5
+ ODP_EVENT_IPSEC_STATUS = 5,
+ ODP_EVENT_PACKET_VECTOR = 6
} odp_event_type_t;
typedef enum odp_event_subtype_t {
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet.h b/platform/linux-generic/include-abi/odp/api/abi/packet.h
index de4d822b4..76ec97dc7 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/packet.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet.h
@@ -35,6 +35,10 @@ typedef ODP_HANDLE_T(odp_packet_seg_t);
#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
#define ODP_PACKET_OFFSET_INVALID 0xffff
typedef uint8_t odp_proto_l2_type_t;
@@ -119,6 +123,7 @@ typedef struct odp_packet_parse_result_flag_t {
} odp_packet_parse_result_flag_t;
#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
/**
* @}
diff --git a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
new file mode 100644
index 000000000..547620df6
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_
+#define ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Event vector field accessors */
+#define _odp_event_vect_get(vect, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)vect + _odp_event_vector_inline.field))
+#define _odp_event_vect_get_ptr(vect, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)vect + _odp_event_vector_inline.field))
+
+/* Event vector header field offsets for inline functions */
+typedef struct _odp_event_vector_inline_offset_t {
+ uint16_t packet;
+ uint16_t pool;
+ uint16_t size;
+} _odp_event_vector_inline_offset_t;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_ */
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
index e8764228d..ec7b1900e 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -115,7 +115,7 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 9;
+ uint32_t reserved1: 10;
/*
* Init flags
@@ -142,15 +142,14 @@ typedef union {
uint32_t udp_err: 1; /* UDP error */
uint32_t sctp_err: 1; /* SCTP error */
uint32_t l4_chksum_err: 1; /* L4 checksum error */
- uint32_t ipsec_err: 1; /* IPsec error */
uint32_t crypto_err: 1; /* Crypto packet operation error */
};
/* Flag groups */
struct {
- uint32_t reserved2: 9;
+ uint32_t reserved2: 10;
uint32_t other: 14; /* All other flags */
- uint32_t error: 9; /* All error flags */
+ uint32_t error: 8; /* All error flags */
} all;
} _odp_packet_flags_t;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
new file mode 100644
index 000000000..b63f13909
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
@@ -0,0 +1,83 @@
+/* Copyright (c) 2020, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * Packet vector inline functions
+ */
+
+#ifndef _ODP_PLAT_PACKET_VECTOR_INLINES_H_
+#define _ODP_PLAT_PACKET_VECTOR_INLINES_H_
+
+#include <odp/api/abi/event.h>
+#include <odp/api/abi/packet.h>
+#include <odp/api/abi/pool.h>
+
+#include <odp/api/plat/event_vector_inline_types.h>
+#include <odp/api/plat/pool_inline_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_vector_from_event __odp_packet_vector_from_event
+ #define odp_packet_vector_to_event __odp_packet_vector_to_event
+ #define odp_packet_vector_tbl __odp_packet_vector_tbl
+ #define odp_packet_vector_pool __odp_packet_vector_pool
+ #define odp_packet_vector_size __odp_packet_vector_size
+ #define odp_packet_vector_size_set __odp_packet_vector_size_set
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+extern const _odp_event_vector_inline_offset_t _odp_event_vector_inline;
+extern const _odp_pool_inline_offset_t _odp_pool_inline;
+
+_ODP_INLINE odp_packet_vector_t odp_packet_vector_from_event(odp_event_t ev)
+{
+ return (odp_packet_vector_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_vector_to_event(odp_packet_vector_t pktv)
+{
+ return (odp_event_t)pktv;
+}
+
+_ODP_INLINE uint32_t odp_packet_vector_tbl(odp_packet_vector_t pktv, odp_packet_t **pkt_tbl)
+{
+ *pkt_tbl = _odp_event_vect_get_ptr(pktv, odp_packet_t, packet);
+
+ return _odp_event_vect_get(pktv, uint32_t, size);
+}
+
+_ODP_INLINE odp_pool_t odp_packet_vector_pool(odp_packet_vector_t pktv)
+{
+ void *pool = _odp_event_vect_get(pktv, void *, pool);
+
+ return _odp_pool_get(pool, odp_pool_t, pool_hdl);
+}
+
+_ODP_INLINE uint32_t odp_packet_vector_size(odp_packet_vector_t pktv)
+{
+ return _odp_event_vect_get(pktv, uint32_t, size);
+}
+
+_ODP_INLINE void odp_packet_vector_size_set(odp_packet_vector_t pktv, uint32_t size)
+{
+ uint32_t *vector_size = _odp_event_vect_get_ptr(pktv, uint32_t, size);
+
+ *vector_size = size;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp_bitmap_internal.h b/platform/linux-generic/include/odp_bitmap_internal.h
deleted file mode 100644
index 9ba6c29e3..000000000
--- a/platform/linux-generic/include/odp_bitmap_internal.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP generic bitmap types and operations.
- */
-
-#ifndef ODP_BITMAP_INTERNAL_H_
-#define ODP_BITMAP_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <string.h>
-#include <odp/api/hints.h>
-#include <odp_macros_internal.h>
-
-/* Generate unique identifier for instantiated class */
-#define TOKENIZE(template, line) \
- template ## _ ## line ## _ ## __COUNTER__
-
-#define BITS_PER_BYTE (8)
-#define BITS_PER_LONG __WORDSIZE
-#define BYTES_PER_LONG (BITS_PER_LONG / BITS_PER_BYTE)
-
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr) BIT_WORD(nr + BITS_PER_LONG - 1)
-
-#define BITMAP_FIRST_WORD_MASK(start) \
- (~0UL << ((start) & (BITS_PER_LONG - 1)))
-#define BITMAP_LAST_WORD_MASK(nbits) \
- (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-
-/* WAPL bitmap base class */
-typedef struct {
- unsigned int nwords;
- unsigned int *pl;
- unsigned long *ul;
-} wapl_bitmap_t;
-
-/*
- * Word-Aligned Position List (WAPL) bitmap, which actually
- * is not a compression, but with an extra list of non-empty
- * word positions.
- *
- * WAPL accelerates bitwise operations and iterations by
- * applying only to non-empty positions instead of walking
- * through the whole bitmap.
- *
- * WAPL uses [1 ~ N] instead of [0 ~ N - 1] as position
- * values and an extra 0 as end indicator for position list.
- * This is the reason to allocate one extra room below.
- */
-#define instantiate_wapl_bitmap(line, nbits) \
- struct TOKENIZE(wapl_bitmap, line) { \
- unsigned int pl[BITS_TO_LONGS(nbits) + 1]; \
- unsigned long ul[BITS_TO_LONGS(nbits) + 1]; \
- }
-
-#define WAPL_BITMAP(nbits) instantiate_wapl_bitmap(__LINE__, nbits)
-
-/*
- * Upcast any derived WAPL bitmap class to its base class
- */
-#define __wapl_upcast(base, derived) \
- do { \
- __typeof__(derived) p = derived; \
- base.pl = p->pl; \
- base.ul = p->ul; \
- base.nwords = ARRAY_SIZE(p->ul) - 1; \
- } while (0)
-
-/*
- * WAPL base class bitmap operations
- */
-void __wapl_bitmap_and(wapl_bitmap_t *dst,
- wapl_bitmap_t *src, wapl_bitmap_t *and);
-
-void __wapl_bitmap_or(wapl_bitmap_t *dst, wapl_bitmap_t *or);
-
-void __wapl_bitmap_set(wapl_bitmap_t *map, unsigned int bit);
-
-void __wapl_bitmap_clear(wapl_bitmap_t *map, unsigned int bit);
-
-/*
- * Generic WAPL bitmap operations
- */
-#define wapl_bitmap_zero(map) \
- ({ \
- __typeof__(map) p = map; \
- memset((void *)p, 0, sizeof(__typeof__(*p))); \
- })
-
-#define wapl_bitmap_copy(dst, src) \
- ({ \
- __typeof__(dst) d = dst; \
- __typeof__(src) s = src; \
- if (d != s) \
- memcpy((void *)d, (void *)s, \
- sizeof(__typeof__(*d))); \
- })
-
-#define wapl_bitmap_and(dst, src, and) \
- ({ \
- wapl_bitmap_t d, s, a; \
- __wapl_upcast(d, dst); \
- __wapl_upcast(s, src); \
- __wapl_upcast(a, and); \
- __wapl_bitmap_and(&d, &s, &a); \
- })
-
-#define wapl_bitmap_or(dst, src, or) \
- ({ \
- wapl_bitmap_t d, o; \
- wapl_bitmap_copy(dst, src); \
- __wapl_upcast(d, dst); \
- __wapl_upcast(o, or); \
- __wapl_bitmap_or(&d, &o); \
- })
-
-#define wapl_bitmap_set(map, bit) \
- ({ \
- wapl_bitmap_t b; \
- __wapl_upcast(b, map); \
- __wapl_bitmap_set(&b, bit); \
- })
-
-#define wapl_bitmap_clear(map, bit) \
- ({ \
- wapl_bitmap_t b; \
- __wapl_upcast(b, map); \
- __wapl_bitmap_clear(&b, bit); \
- })
-
-/*
- * Round robin iterator runs upon a WAPL bitmap:
- *
- * wapl_bitmap_iterator(iterator, WAPL bitmap);
- * for (iterator->start(); iterator->has_next(); ) {
- * unsigned int bit_index = iterator->next();
- * ...operations on this bit index...
- * }
- */
-typedef struct wapl_bitmap_iterator {
- int _start, _next, _nbits;
- wapl_bitmap_t _base;
-
- void (*start)(struct wapl_bitmap_iterator *this);
- bool (*has_next)(struct wapl_bitmap_iterator *this);
- unsigned int (*next)(struct wapl_bitmap_iterator *this);
-} wapl_bitmap_iterator_t;
-
-/*
- * WAPL bitmap iterator constructor
- */
-void __wapl_bitmap_iterator(wapl_bitmap_iterator_t *this);
-
-/*
- * Generic constructor accepts any derived WAPL bitmap class
- */
-#define wapl_bitmap_iterator(iterator, map) \
- ({ \
- __typeof__(iterator) __it = iterator; \
- __wapl_upcast(__it->_base, map); \
- __wapl_bitmap_iterator(__it); \
- })
-
-/* Sparse bitmap base class */
-typedef struct {
- unsigned int nbits;
- unsigned int *last, *pl, *il;
-} sparse_bitmap_t;
-
-/*
- * Sparse bitmap, lists all bit indexes directly as an array.
- * Expected to be significantly straightforward iteration.
- */
-#define instantiate_sparse_bitmap(line, nbits) \
- struct TOKENIZE(sparse_bitmap, line) { \
- unsigned int last; \
- unsigned int pl[nbits]; \
- unsigned int il[nbits]; \
- }
-
-#define SPARSE_BITMAP(nbits) instantiate_sparse_bitmap(__LINE__, nbits)
-
-/*
- * Upcast any derived sparse bitmap class to its base class
- */
-#define __sparse_upcast(base, derived) \
- do { \
- __typeof__(derived) p = derived; \
- base.pl = p->pl; \
- base.il = p->il; \
- base.last = &p->last; \
- base.nbits = ARRAY_SIZE(p->il); \
- } while (0)
-
-/*
- * Sparse base class bitmap operations
- */
-void __sparse_bitmap_set(sparse_bitmap_t *map, unsigned int bit);
-
-void __sparse_bitmap_clear(sparse_bitmap_t *map, unsigned int bit);
-
-/*
- * Generic sparse bitmap operations
- */
-#define sparse_bitmap_zero(map) \
- ({ \
- __typeof__(map) p = map; \
- memset((void *)p, 0, sizeof(__typeof__(*p))); \
- })
-
-#define sparse_bitmap_set(map, bit) \
- ({ \
- sparse_bitmap_t b; \
- __sparse_upcast(b, map); \
- __sparse_bitmap_set(&b, bit); \
- })
-
-#define sparse_bitmap_clear(map, bit) \
- ({ \
- sparse_bitmap_t b; \
- __sparse_upcast(b, map); \
- __sparse_bitmap_clear(&b, bit); \
- })
-
-/*
- * Round robin iterator runs upon a sparse bitmap:
- *
- * sparse_bitmap_iterator(iterator, SPARSE bitmap);
- * for (iterator->start(); iterator->has_next(); ) {
- * unsigned int bit_index = iterator->next();
- * ...operations on this bit index...
- * }
- */
-typedef struct sparse_bitmap_iterator {
- int _start, _next, _nbits;
- sparse_bitmap_t _base;
-
- void (*start)(struct sparse_bitmap_iterator *this);
- bool (*has_next)(struct sparse_bitmap_iterator *this);
- unsigned int (*next)(struct sparse_bitmap_iterator *this);
-} sparse_bitmap_iterator_t;
-
-/*
- * Sparse bitmap iterator constructor
- */
-void __sparse_bitmap_iterator(sparse_bitmap_iterator_t *this);
-
-/*
- * Generic constructor accepts any derived sparse bitmap class.
- */
-#define sparse_bitmap_iterator(iterator, map) \
- ({ \
- __typeof__(iterator) __it = iterator; \
- __sparse_upcast(__it->_base, map); \
- __sparse_bitmap_iterator(__it); \
- })
-
-/*
- * Raw bitmap atomic set and clear.
- */
-void raw_bitmap_set(unsigned long *map, unsigned int bit);
-
-void raw_bitmap_clear(unsigned long *map, unsigned int bit);
-
-/*
- * It will enter infinite loop incase that all bits are zero,
- * so please make sure the bitmap at least has one set.
- */
-static inline int __bitmap_wraparound_next(unsigned long *addr,
- unsigned int nbits, int start)
-{
- unsigned long tmp;
-
- if (start >= (int)nbits)
- start = 0;
-
- tmp = addr[BIT_WORD(start)];
-
- /* Handle 1st word. */
- tmp &= BITMAP_FIRST_WORD_MASK(start);
- start = start & ~(BITS_PER_LONG - 1);
-
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= (int)nbits)
- start = 0;
-
- tmp = addr[BIT_WORD(start)];
- }
-
- start += __builtin_ffsl(tmp) - 1;
- return start;
-}
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 6c6ec970d..62caa776e 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -89,7 +89,6 @@ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf);
void _odp_buffer_event_type_set(odp_buffer_t buf, int ev);
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
static inline odp_buffer_t buf_from_buf_hdr(odp_buffer_hdr_t *hdr)
{
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index 83063a853..ebd0107f9 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -20,6 +20,7 @@ extern "C" {
#include <odp/api/spinlock.h>
#include <odp/api/classification.h>
+#include <odp/api/debug.h>
#include <odp_pool_internal.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
@@ -28,6 +29,8 @@ extern "C" {
/* Maximum Class Of Service Entry */
#define CLS_COS_MAX_ENTRY 64
+/* Invalid CoS index */
+#define CLS_COS_IDX_NONE CLS_COS_MAX_ENTRY
/* Maximum PMR Entry */
#define CLS_PMR_MAX_ENTRY 256
/* Maximum PMR Terms in a PMR Set */
@@ -49,6 +52,9 @@ extern "C" {
/* Max number of implementation created queues */
#define CLS_QUEUE_GROUP_MAX (CLS_COS_MAX_ENTRY * CLS_COS_QUEUE_MAX)
+/* CoS index is stored in odp_packet_hdr_t */
+ODP_STATIC_ASSERT(CLS_COS_MAX_ENTRY <= UINT16_MAX, "CoS_does_not_fit_16_bits");
+
typedef union {
/* All proto fileds */
uint32_t all;
@@ -123,8 +129,9 @@ typedef struct pmr_term_value {
Class Of Service
*/
struct cos_s {
- odp_queue_t queue; /* Associated Queue */
+ odp_queue_t queue; /* Associated Queue */
odp_pool_t pool; /* Associated Buffer pool */
+ odp_pktin_vector_config_t vector; /* Packet vector config */
union pmr_u *pmr[CLS_PMR_PER_COS_MAX]; /* Chained PMR */
union cos_u *linked_cos[CLS_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/
uint32_t valid; /* validity Flag */
diff --git a/platform/linux-generic/include/odp_classification_internal.h b/platform/linux-generic/include/odp_classification_internal.h
index 71ad7abf2..48ee0526e 100644
--- a/platform/linux-generic/include/odp_classification_internal.h
+++ b/platform/linux-generic/include/odp_classification_internal.h
@@ -25,6 +25,8 @@ extern "C" {
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
+cos_t *_odp_cos_entry_from_idx(uint32_t ndx);
+
/** Classification Internal function **/
/**
@@ -36,9 +38,9 @@ Start function for Packet Classifier
This function calls Classifier module internal functions for a given packet and
selects destination queue and packet pool based on selected PMR and CoS.
**/
-int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
- uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
- odp_packet_hdr_t *pkt_hdr, odp_bool_t parse);
+int _odp_cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
+ uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
+ odp_packet_hdr_t *pkt_hdr, odp_bool_t parse);
/**
Packet IO classifier init
@@ -46,7 +48,7 @@ Packet IO classifier init
This function does initialization of classifier object associated with pktio.
This function should be called during pktio initialization.
**/
-int pktio_classifier_init(pktio_entry_t *pktio);
+int _odp_pktio_classifier_init(pktio_entry_t *pktio);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index ac5114a9f..bd7f710d2 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -146,6 +146,12 @@ extern "C" {
*/
#define CONFIG_POOL_CACHE_MAX_SIZE 256
+/* Maximum packet vector size */
+#define CONFIG_PACKET_VECTOR_MAX_SIZE 256
+
+/* Enable pool statistics collection */
+#define CONFIG_POOL_STATISTICS 1
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_ethtool_rss.h b/platform/linux-generic/include/odp_ethtool_rss.h
index e942e3fd1..66221aa51 100644
--- a/platform/linux-generic/include/odp_ethtool_rss.h
+++ b/platform/linux-generic/include/odp_ethtool_rss.h
@@ -22,8 +22,8 @@ extern "C" {
*
* @returns Number enabled hash protocols
*/
-int rss_conf_get_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto);
+int _odp_rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
/**
* Get supported RSS hash protocols of a packet socket
@@ -36,8 +36,8 @@ int rss_conf_get_fd(int fd, const char *name,
*
* @returns Number of supported hash protocols
*/
-int rss_conf_get_supported_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto);
+int _odp_rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
/**
* Set RSS hash protocols of a packet socket
@@ -49,15 +49,15 @@ int rss_conf_get_supported_fd(int fd, const char *name,
* @retval 0 on success
* @retval <0 on failure
*/
-int rss_conf_set_fd(int fd, const char *name,
- const odp_pktin_hash_proto_t *proto);
+int _odp_rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *proto);
/**
* Print enabled RSS hash protocols
*
* @param hash_proto Hash protocols
*/
-void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto);
+void _odp_rss_conf_print(const odp_pktin_hash_proto_t *hash_proto);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_ethtool_stats.h b/platform/linux-generic/include/odp_ethtool_stats.h
index 04d4698ec..a8783149d 100644
--- a/platform/linux-generic/include/odp_ethtool_stats.h
+++ b/platform/linux-generic/include/odp_ethtool_stats.h
@@ -17,7 +17,7 @@ extern "C" {
/**
* Get ethtool statistics of a packet socket
*/
-int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
+int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_event_vector_internal.h b/platform/linux-generic/include/odp_event_vector_internal.h
new file mode 100644
index 000000000..d3322eff5
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_vector_internal.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event vector descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_VECTOR_INTERNAL_H_
+#define ODP_EVENT_VECTOR_INTERNAL_H_
+
+#include <stdint.h>
+#include <odp/api/packet.h>
+#include <odp_buffer_internal.h>
+
+/**
+ * Internal event vector header
+ */
+typedef struct {
+ /* Common buffer header */
+ odp_buffer_hdr_t buf_hdr;
+
+ /* Event vector size */
+ uint32_t size;
+
+ /* Vector of packet handles */
+ odp_packet_t packet[0];
+
+} odp_event_vector_hdr_t;
+
+/**
+ * Return the vector header
+ */
+static inline odp_event_vector_hdr_t *_odp_packet_vector_hdr(odp_packet_vector_t pktv)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)pktv;
+}
+
+/**
+ * Free packet vector and contained packets
+ */
+static inline void _odp_packet_vector_free_full(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ if (pktv_hdr->size)
+ odp_packet_free_multi(pktv_hdr->packet, pktv_hdr->size);
+
+ odp_packet_vector_free(pktv);
+}
+
+#endif /* ODP_EVENT_VECTOR_INTERNAL_H_ */
diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h
index 7cc672aca..311c0e50c 100644
--- a/platform/linux-generic/include/odp_ipsec_internal.h
+++ b/platform/linux-generic/include/odp_ipsec_internal.h
@@ -190,6 +190,25 @@ struct ipsec_sa_s {
};
} out;
};
+
+ struct {
+ odp_atomic_u64_t proto_err;
+ odp_atomic_u64_t auth_err;
+ odp_atomic_u64_t antireplay_err;
+ odp_atomic_u64_t alg_err;
+ odp_atomic_u64_t mtu_err;
+ odp_atomic_u64_t hard_exp_bytes_err;
+ odp_atomic_u64_t hard_exp_pkts_err;
+
+ /*
+ * Track error packets after lifetime check is done.
+ * Required since, the stats tracking lifetime is being
+ * used for SA success packets stats.
+ */
+ odp_atomic_u64_t post_lifetime_err_pkts;
+ } stats;
+
+ odp_ipsec_sa_param_t param;
};
/**
@@ -250,12 +269,12 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
odp_ipsec_op_status_t *status);
/**
- * Update SA usage statistics, filling respective status for the packet.
+ * Update SA lifetime counters, filling respective status for the packet.
*
* @retval <0 if hard limits were breached
*/
-int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len,
- odp_ipsec_op_status_t *status);
+int _odp_ipsec_sa_lifetime_update(ipsec_sa_t *ipsec_sa, uint32_t len,
+ odp_ipsec_op_status_t *status);
/* Run pre-check on sequence number of the packet.
*
@@ -285,6 +304,12 @@ uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa);
int _odp_ipsec_try_inline(odp_packet_t *pkt);
/**
+ * Get number of packets successfully processed by the SA
+ *
+ */
+uint64_t _odp_ipsec_sa_stats_pkts(ipsec_sa_t *sa);
+
+/**
* @}
*/
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 83a6fdfed..2a961893b 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -33,7 +33,7 @@ extern "C" {
#include <stdint.h>
-/** Minimum segment length expected by packet_parse_common() */
+/** Minimum segment length expected by _odp_packet_parse_common() */
#define PACKET_PARSE_SEG_LEN 96
ODP_STATIC_ASSERT(sizeof(_odp_packet_input_flags_t) == sizeof(uint64_t),
@@ -126,6 +126,9 @@ typedef struct odp_packet_hdr_t {
/* Classifier mark */
uint16_t cls_mark;
+ /* Classifier handle index */
+ uint16_t cos;
+
union {
struct {
/* Result for crypto packet op */
@@ -235,6 +238,7 @@ static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
{
dst_hdr->p = src_hdr->p;
dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
dst_hdr->flow_hash = src_hdr->flow_hash;
dst_hdr->timestamp = src_hdr->timestamp;
dst_hdr->cls_mark = src_hdr->cls_mark;
@@ -288,9 +292,9 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
odp_packet_t pkt[], int max_num);
/* Perform packet parse up to a given protocol layer */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
- odp_proto_layer_t layer,
- odp_proto_chksums_t chksums);
+int _odp_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
+ odp_proto_layer_t layer,
+ odp_proto_chksums_t chksums);
/* Reset parser metadata for a new parse */
static inline void packet_parse_reset(odp_packet_hdr_t *pkt_hdr, int all)
@@ -341,9 +345,9 @@ static inline void packet_set_ts(odp_packet_hdr_t *pkt_hdr, odp_time_t *ts)
}
}
-int packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
- uint32_t pkt_len, uint32_t seg_len, int layer,
- odp_proto_chksums_t chksums);
+int _odp_packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
+ uint32_t pkt_len, uint32_t seg_len, int layer,
+ odp_proto_chksums_t chksums);
int _odp_cls_parse(odp_packet_hdr_t *pkt_hdr, const uint8_t *parseptr);
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 4f563de66..9254fad3c 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -104,6 +104,7 @@ struct pktio_entry {
/* Statistics counters used outside drivers */
struct {
odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t out_discards;
} stats_extra;
/* Latest Tx timestamp */
odp_atomic_u64_t tx_ts;
@@ -126,6 +127,7 @@ struct pktio_entry {
struct {
odp_queue_t queue;
odp_pktin_queue_t pktin;
+ odp_pktin_vector_config_t vector;
} in_queue[PKTIO_MAX_QUEUES];
struct {
@@ -205,7 +207,7 @@ typedef struct pktio_if_ops {
const odp_pktout_queue_param_t *p);
} pktio_if_ops_t;
-extern void *pktio_entry_ptr[];
+extern void *_odp_pktio_entry_ptr[];
static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
{
@@ -222,7 +224,7 @@ static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
idx = odp_pktio_index(pktio);
- return pktio_entry_ptr[idx];
+ return _odp_pktio_entry_ptr[idx];
}
static inline int pktio_cls_enabled(pktio_entry_t *entry)
@@ -249,15 +251,15 @@ static inline void _odp_pktio_tx_ts_set(pktio_entry_t *entry)
extern const pktio_if_ops_t netmap_pktio_ops;
extern const pktio_if_ops_t dpdk_pktio_ops;
-extern const pktio_if_ops_t sock_mmsg_pktio_ops;
-extern const pktio_if_ops_t sock_mmap_pktio_ops;
-extern const pktio_if_ops_t loopback_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_mmsg_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_mmap_pktio_ops;
+extern const pktio_if_ops_t _odp_loopback_pktio_ops;
#ifdef _ODP_PKTIO_PCAP
-extern const pktio_if_ops_t pcap_pktio_ops;
+extern const pktio_if_ops_t _odp_pcap_pktio_ops;
#endif
-extern const pktio_if_ops_t tap_pktio_ops;
-extern const pktio_if_ops_t null_pktio_ops;
-extern const pktio_if_ops_t ipc_pktio_ops;
+extern const pktio_if_ops_t _odp_tap_pktio_ops;
+extern const pktio_if_ops_t _odp_null_pktio_ops;
+extern const pktio_if_ops_t _odp_ipc_pktio_ops;
extern const pktio_if_ops_t * const pktio_if_ops[];
/**
@@ -274,11 +276,11 @@ extern const pktio_if_ops_t * const pktio_if_ops[];
* @return >=0 on success, number of packets received
* @return <0 on failure
*/
-int sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
- unsigned num_q, unsigned *from,
- odp_packet_t packets[], int num,
- uint64_t usecs,
- int *trial_successful);
+int _odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ unsigned int num_q, unsigned int *from,
+ odp_packet_t packets[], int num,
+ uint64_t usecs,
+ int *trial_successful);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_io_stats.h b/platform/linux-generic/include/odp_packet_io_stats.h
index 4ed46e0aa..22e3b5041 100644
--- a/platform/linux-generic/include/odp_packet_io_stats.h
+++ b/platform/linux-generic/include/odp_packet_io_stats.h
@@ -17,12 +17,12 @@ extern "C" {
#include <odp_packet_io_internal.h>
#include <odp_packet_io_stats_common.h>
-int sock_stats_fd(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats,
- int fd);
-int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
+int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd);
+int _odp_sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
-pktio_stats_type_t sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd);
+pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 007cb7b3f..a0b4591e3 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -73,6 +73,7 @@ typedef struct pool_t {
uint32_t block_size;
uint32_t block_offset;
uint8_t *base_addr;
+ uint8_t *max_addr;
uint8_t *uarea_base_addr;
/* Used by DPDK zero-copy pktio */
@@ -83,6 +84,14 @@ typedef struct pool_t {
pool_destroy_cb_fn ext_destroy;
void *ext_desc;
+ struct ODP_CACHE_ALIGNED {
+ odp_atomic_u64_t alloc_ops;
+ odp_atomic_u64_t alloc_fails;
+ odp_atomic_u64_t free_ops;
+ odp_atomic_u64_t cache_alloc_ops;
+ odp_atomic_u64_t cache_free_ops;
+ } stats;
+
pool_cache_t local_cache[ODP_THREAD_COUNT_MAX];
odp_shm_t ring_shm;
@@ -152,6 +161,7 @@ static inline odp_buffer_hdr_t *buf_hdr_from_index_u32(uint32_t u32)
int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int num);
void buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_free);
+int _odp_buffer_is_valid(odp_buffer_t buf);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h
index 4cfc6770b..25e35b22c 100644
--- a/platform/linux-generic/include/odp_queue_basic_internal.h
+++ b/platform/linux-generic/include/odp_queue_basic_internal.h
@@ -87,7 +87,7 @@ typedef struct queue_global_t {
} queue_global_t;
-extern queue_global_t *queue_glb;
+extern queue_global_t *_odp_queue_glb;
static inline uint32_t queue_to_index(odp_queue_t handle)
{
@@ -98,7 +98,7 @@ static inline uint32_t queue_to_index(odp_queue_t handle)
static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
{
- return &queue_glb->queue[queue_id];
+ return &_odp_queue_glb->queue[queue_id];
}
static inline odp_queue_t queue_from_index(uint32_t queue_id)
@@ -111,13 +111,13 @@ static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
return (queue_entry_t *)(uintptr_t)handle;
}
-void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
/* Functions for schedulers */
-void sched_queue_set_status(uint32_t queue_index, int status);
-int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
- int update_status);
-int sched_queue_empty(uint32_t queue_index);
+void _odp_sched_queue_set_status(uint32_t queue_index, int status);
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
+ int update_status);
+int _odp_sched_queue_empty(uint32_t queue_index);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_if.h b/platform/linux-generic/include/odp_queue_if.h
index 5fe28dac1..fa92a4171 100644
--- a/platform/linux-generic/include/odp_queue_if.h
+++ b/platform/linux-generic/include/odp_queue_if.h
@@ -61,7 +61,7 @@ typedef struct {
queue_deq_multi_fn_t orig_deq_multi;
} queue_fn_t;
-extern const queue_fn_t *queue_fn;
+extern const queue_fn_t *_odp_queue_fn;
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_lf.h b/platform/linux-generic/include/odp_queue_lf.h
index 28ada8e6d..9419812cf 100644
--- a/platform/linux-generic/include/odp_queue_lf.h
+++ b/platform/linux-generic/include/odp_queue_lf.h
@@ -22,13 +22,13 @@ typedef struct {
} queue_lf_func_t;
-uint32_t queue_lf_init_global(uint32_t *queue_lf_size,
- queue_lf_func_t *lf_func);
-void queue_lf_term_global(void);
-void *queue_lf_create(queue_entry_t *queue);
-void queue_lf_destroy(void *queue_lf);
-uint32_t queue_lf_length(void *queue_lf);
-uint32_t queue_lf_max_length(void);
+uint32_t _odp_queue_lf_init_global(uint32_t *queue_lf_size,
+ queue_lf_func_t *lf_func);
+void _odp_queue_lf_term_global(void);
+void *_odp_queue_lf_create(queue_entry_t *queue);
+void _odp_queue_lf_destroy(void *queue_lf);
+uint32_t _odp_queue_lf_length(void *queue_lf);
+uint32_t _odp_queue_lf_max_length(void);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h
index 8b9eac435..9f326a9ee 100644
--- a/platform/linux-generic/include/odp_queue_scalable_internal.h
+++ b/platform/linux-generic/include/odp_queue_scalable_internal.h
@@ -62,7 +62,7 @@ int _odp_queue_deq(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num);
int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num);
int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num);
int _odp_queue_enq_sp(sched_elem_t *q, odp_buffer_hdr_t *buf_hdr[], int num);
-queue_entry_t *qentry_from_ext(odp_queue_t handle);
+queue_entry_t *_odp_qentry_from_ext(odp_queue_t handle);
/* Round up memory size to next cache line size to
* align all memory addresses on cache line boundary.
@@ -79,7 +79,7 @@ static inline void *shm_pool_alloc_align(_odp_ishm_pool_t *pool, uint32_t size)
static inline uint32_t queue_to_id(odp_queue_t handle)
{
- return qentry_from_ext(handle)->s.index;
+ return _odp_qentry_from_ext(handle)->s.index;
}
static inline queue_entry_t *qentry_from_int(odp_queue_t handle)
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index af6b3294e..6ac6d0ee3 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -72,6 +72,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#undef _RING_DEQ_MULTI
#undef _RING_ENQ
#undef _RING_ENQ_MULTI
+#undef _RING_LEN
/* Remap generic types and function names to ring data type specific ones. One
* should never use the generic names (e.g. _RING_INIT) directly. */
@@ -85,6 +86,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#define _RING_DEQ_MULTI ring_u32_deq_multi
#define _RING_ENQ ring_u32_enq
#define _RING_ENQ_MULTI ring_u32_enq_multi
+ #define _RING_LEN ring_u32_len
#elif _ODP_RING_TYPE == _ODP_RING_TYPE_PTR
#define _ring_gen_t ring_ptr_t
#define _ring_data_t void *
@@ -94,6 +96,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#define _RING_DEQ_MULTI ring_ptr_deq_multi
#define _RING_ENQ ring_ptr_enq
#define _RING_ENQ_MULTI ring_ptr_enq_multi
+ #define _RING_LEN ring_ptr_len
#endif
/* Initialize ring */
@@ -244,6 +247,14 @@ static inline void _RING_ENQ_MULTI(_ring_gen_t *ring, uint32_t mask,
odp_atomic_store_rel_u32(&ring->r.w_tail, old_head + num);
}
+static inline uint32_t _RING_LEN(_ring_gen_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
+
+ return tail - head;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index ef1bbf33b..da240dca3 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -78,13 +78,13 @@ typedef struct schedule_fn_t {
} schedule_fn_t;
/* Interface towards the scheduler */
-extern const schedule_fn_t *sched_fn;
+extern const schedule_fn_t *_odp_sched_fn;
/* Interface for the scheduler */
-int sched_cb_pktin_poll(int pktio_index, int pktin_index,
- odp_buffer_hdr_t *hdr_tbl[], int num);
-int sched_cb_pktin_poll_one(int pktio_index, int rx_queue, odp_event_t evts[]);
-void sched_cb_pktio_stop_finalize(int pktio_index);
+int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
+ odp_buffer_hdr_t *hdr_tbl[], int num);
+int _odp_sched_cb_pktin_poll_one(int pktio_index, int rx_queue, odp_event_t evts[]);
+void _odp_sched_cb_pktio_stop_finalize(int pktio_index);
/* For debugging */
extern int _odp_schedule_configured;
diff --git a/platform/linux-generic/include/odp_schedule_scalable.h b/platform/linux-generic/include/odp_schedule_scalable.h
index 591b04471..c5e6a2880 100644
--- a/platform/linux-generic/include/odp_schedule_scalable.h
+++ b/platform/linux-generic/include/odp_schedule_scalable.h
@@ -142,9 +142,9 @@ typedef struct ODP_ALIGNED_CACHE {
uint32_t loop_cnt; /*Counter to check pktio ingress queue dead loop */
} sched_scalable_thread_state_t;
-void sched_update_enq(sched_elem_t *q, uint32_t actual);
-void sched_update_enq_sp(sched_elem_t *q, uint32_t actual);
-sched_queue_t *sched_queue_add(odp_schedule_group_t grp, uint32_t prio);
-void sched_queue_rem(odp_schedule_group_t grp, uint32_t prio);
+void _odp_sched_update_enq(sched_elem_t *q, uint32_t actual);
+void _odp_sched_update_enq_sp(sched_elem_t *q, uint32_t actual);
+sched_queue_t *_odp_sched_queue_add(odp_schedule_group_t grp, uint32_t prio);
+void _odp_sched_queue_rem(odp_schedule_group_t grp, uint32_t prio);
#endif /* ODP_SCHEDULE_SCALABLE_H */
diff --git a/platform/linux-generic/include/odp_schedule_scalable_ordered.h b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
index 7e1984f6f..3fa81f750 100644
--- a/platform/linux-generic/include/odp_schedule_scalable_ordered.h
+++ b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
@@ -110,15 +110,15 @@ struct ODP_ALIGNED_CACHE reorder_context {
queue_entry_t *destq[RC_EVT_SIZE];
};
-reorder_window_t *rwin_alloc(_odp_ishm_pool_t *pool,
- unsigned lock_count);
-int rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin);
-bool rwin_reserve(reorder_window_t *rwin, uint32_t *sn);
-bool rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn);
-void rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn);
-void rctx_init(reorder_context_t *rctx, uint16_t idx,
- reorder_window_t *rwin, uint32_t sn);
-void rctx_release(reorder_context_t *rctx);
-int rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+reorder_window_t *_odp_rwin_alloc(_odp_ishm_pool_t *pool,
+ unsigned int lock_count);
+int _odp_rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin);
+bool _odp_rwin_reserve(reorder_window_t *rwin, uint32_t *sn);
+bool _odp_rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn);
+void _odp_rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn);
+void _odp_rctx_init(reorder_context_t *rctx, uint16_t idx,
+ reorder_window_t *rwin, uint32_t sn);
+void _odp_rctx_release(reorder_context_t *rctx);
+int _odp_rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
#endif /* ODP_SCHEDULE_SCALABLE_ORDERED_H */
diff --git a/platform/linux-generic/include/odp_socket_common.h b/platform/linux-generic/include/odp_socket_common.h
index 0a9704076..02cebdf85 100644
--- a/platform/linux-generic/include/odp_socket_common.h
+++ b/platform/linux-generic/include/odp_socket_common.h
@@ -32,37 +32,37 @@ ethaddrs_equal(unsigned char mac_a[], unsigned char mac_b[])
/**
* Read the MAC address from a packet socket
*/
-int mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[]);
+int _odp_mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[]);
/**
* Read the MTU from a packet socket
*/
-uint32_t mtu_get_fd(int fd, const char *name);
+uint32_t _odp_mtu_get_fd(int fd, const char *name);
/**
* Set a packet socket MTU
*/
-int mtu_set_fd(int fd, const char *name, int mtu);
+int _odp_mtu_set_fd(int fd, const char *name, int mtu);
/**
* Enable/Disable promisc mode for a packet socket
*/
-int promisc_mode_set_fd(int fd, const char *name, int enable);
+int _odp_promisc_mode_set_fd(int fd, const char *name, int enable);
/**
* Return promisc mode of a packet socket
*/
-int promisc_mode_get_fd(int fd, const char *name);
+int _odp_promisc_mode_get_fd(int fd, const char *name);
/**
* Return link status of a packet socket (up/down)
*/
-int link_status_fd(int fd, const char *name);
+int _odp_link_status_fd(int fd, const char *name);
/**
* Read link information from a packet socket
*/
-int link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info);
+int _odp_link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_sysfs_stats.h b/platform/linux-generic/include/odp_sysfs_stats.h
index bf9f4f79f..4bcd2b7ff 100644
--- a/platform/linux-generic/include/odp_sysfs_stats.h
+++ b/platform/linux-generic/include/odp_sysfs_stats.h
@@ -14,8 +14,8 @@ extern "C" {
#include <odp/api/packet_io_stats.h>
#include <odp_packet_io_internal.h>
-int sysfs_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats);
+int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_sysinfo_internal.h b/platform/linux-generic/include/odp_sysinfo_internal.h
index 2b1f04ca6..81bfd045f 100644
--- a/platform/linux-generic/include/odp_sysinfo_internal.h
+++ b/platform/linux-generic/include/odp_sysinfo_internal.h
@@ -16,10 +16,10 @@ extern "C" {
#include <inttypes.h>
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo);
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo);
uint64_t odp_cpu_hz_current(int id);
uint64_t odp_cpu_arch_hz_current(int id);
-void sys_info_print_arch(void);
+void _odp_sys_info_print_arch(void);
static inline int _odp_dummy_cpuinfo(system_info_t *sysinfo)
{
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index c084d844b..d4aeb455f 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -42,6 +42,8 @@ AC_CONFIG_FILES([platform/linux-generic/Makefile
platform/linux-generic/dumpconfig/Makefile
platform/linux-generic/test/Makefile
platform/linux-generic/test/example/Makefile
+ platform/linux-generic/test/example/classifier/Makefile
+ platform/linux-generic/test/example/generator/Makefile
platform/linux-generic/test/example/l2fwd_simple/Makefile
platform/linux-generic/test/example/l3fwd/Makefile
platform/linux-generic/test/example/packet/Makefile
diff --git a/platform/linux-generic/odp_bitmap.c b/platform/linux-generic/odp_bitmap.c
deleted file mode 100644
index aa8734686..000000000
--- a/platform/linux-generic/odp_bitmap.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <string.h>
-#include <unistd.h>
-#include <odp/api/std_types.h>
-#include <odp/api/byteorder.h>
-#include <odp_bitmap_internal.h>
-
-/*
- * WAPL base class bitmap operations
- */
-static inline void __wapl_add_pos(
- wapl_bitmap_t *map, unsigned int p)
-{
- unsigned int s, k = 0;
- unsigned int *pl = map->pl;
-
- while (pl[k] && p > pl[k])
- k++;
-
- if (p == pl[k])
- return;
-
- /* sorted insertion */
- for (; pl[k] && p < pl[k]; k++) {
- s = pl[k];
- pl[k] = p;
- p = s;
- }
-
- if (k < map->nwords)
- pl[k++] = p;
-
- pl[k] = 0;
-}
-
-static inline void __wapl_remove_pos(
- wapl_bitmap_t *map, unsigned int p)
-{
- unsigned int k = 0;
- unsigned int *pl = map->pl;
-
- while (pl[k] && p != pl[k])
- k++;
-
- for (; pl[k]; k++)
- pl[k] = pl[k + 1];
-}
-
-void __wapl_bitmap_and(wapl_bitmap_t *dst,
- wapl_bitmap_t *src, wapl_bitmap_t *and)
-{
- unsigned int k = 0, p;
- unsigned int *pl = src->pl;
-
- while ((p = *pl++) != 0) {
- dst->ul[p] = src->ul[p] & and->ul[p];
- if (dst->ul[p])
- dst->pl[k++] = p;
- }
-
- dst->pl[k] = 0;
-}
-
-void __wapl_bitmap_or(wapl_bitmap_t *dst, wapl_bitmap_t *or)
-{
- unsigned int p;
- unsigned int *pl = or->pl;
-
- while ((p = *pl++) != 0) {
- if (dst->ul[p] == 0)
- __wapl_add_pos(dst, p);
-
- dst->ul[p] |= or->ul[p];
- }
-}
-
-void __wapl_bitmap_set(wapl_bitmap_t *map, unsigned int bit)
-{
- unsigned int p = BIT_WORD(bit) + 1;
- unsigned long set = 1UL << (bit & (BITS_PER_LONG - 1));
-
- if (p > map->nwords)
- return;
-
- if (map->ul[p] == 0)
- __wapl_add_pos(map, p);
-
- map->ul[p] |= set;
-}
-
-void __wapl_bitmap_clear(wapl_bitmap_t *map, unsigned int bit)
-{
- unsigned int p = BIT_WORD(bit) + 1;
- unsigned long clear = 1UL << (bit & (BITS_PER_LONG - 1));
-
- if (p > map->nwords)
- return;
-
- map->ul[p] &= ~clear;
-
- if (map->ul[p] == 0)
- __wapl_remove_pos(map, p);
-}
-
-/*
- * WAPL bitmap iterator implementation
- */
-static void __wapl_iterator_start(wapl_bitmap_iterator_t *this)
-{
- this->_nbits = this->_base.nwords * BITS_PER_LONG;
-
- /* Advance to next queue index to start this
- * new round iteration.
- */
- if (this->_base.pl[0] == 0)
- this->_start = -1;
- else
- this->_start = __bitmap_wraparound_next(
- &this->_base.ul[1], this->_nbits, this->_start + 1);
-
- this->_next = this->_start;
-}
-
-static bool __wapl_iterator_has_next(wapl_bitmap_iterator_t *this)
-{
- return (this->_next != -1);
-}
-
-static unsigned int __wapl_iterator_next(wapl_bitmap_iterator_t *this)
-{
- int next = this->_next;
-
- this->_next = __bitmap_wraparound_next(
- &this->_base.ul[1], this->_nbits, this->_next + 1);
-
- if (this->_next == this->_start)
- this->_next = -1;
-
- return next;
-}
-
-void __wapl_bitmap_iterator(wapl_bitmap_iterator_t *this)
-{
- this->start = __wapl_iterator_start;
- this->has_next = __wapl_iterator_has_next;
- this->next = __wapl_iterator_next;
-
- this->_start = -1;
- this->_next = this->_start;
-}
-
-/*
- * Sparse base class bitmap operations
- */
-void __sparse_bitmap_set(sparse_bitmap_t *map, unsigned int bit)
-{
- unsigned int last = *map->last;
-
- /* Index exceeds */
- if (bit >= map->nbits)
- return;
-
- /* Full bitmap */
- if (last >= map->nbits)
- return;
-
- /* Bit was not set previously,
- * also record where we set the bit
- */
- if (!map->pl[bit]) {
- map->il[last++] = bit;
- map->pl[bit] = last;
-
- *map->last = last;
- }
-}
-
-void __sparse_bitmap_clear(sparse_bitmap_t *map, unsigned int bit)
-{
- unsigned int p, i;
- unsigned int last = *map->last;
-
- /* Index exceeds */
- if (bit >= map->nbits)
- return;
-
- /* Empty bitmap */
- if (last == 0)
- return;
-
- /* Bit was set previously */
- if (map->pl[bit]) {
- p = map->pl[bit] - 1;
- map->pl[bit] = 0;
-
- last--;
- *map->last = last;
-
- /* Fill the hole with the latest index */
- if (p < last) {
- i = map->il[last];
- map->pl[i] = p + 1;
- map->il[p] = i;
- }
- }
-}
-
-/*
- * Sparse bitmap iterator implementation
- */
-static void __sparse_iterator_start(sparse_bitmap_iterator_t *this)
-{
- this->_nbits = (int)*this->_base.last;
-
- /* Advance to next queue index to start this
- * new round iteration.
- */
- if (this->_nbits == 0)
- this->_start = -1;
- else
- this->_start = (this->_start + 1) & (this->_nbits - 1);
-
- this->_next = this->_start;
-}
-
-static bool __sparse_iterator_has_next(sparse_bitmap_iterator_t *this)
-{
- return (this->_next != -1);
-}
-
-static unsigned int __sparse_iterator_next(sparse_bitmap_iterator_t *this)
-{
- int next = this->_next;
-
- this->_next = (this->_next + 1) & (this->_nbits - 1);
- if (this->_next == this->_start)
- this->_next = -1;
-
- return this->_base.il[next];
-}
-
-void __sparse_bitmap_iterator(sparse_bitmap_iterator_t *this)
-{
- this->start = __sparse_iterator_start;
- this->has_next = __sparse_iterator_has_next;
- this->next = __sparse_iterator_next;
-
- this->_start = -1;
- this->_next = this->_start;
-}
-
-/*
- * Generic byte-width atomic set/clear
- */
-static inline void atomic_byte_set(
- unsigned char *addr, unsigned int bit)
-{
- unsigned char load, store;
- unsigned char set = 1 << (bit & (BITS_PER_BYTE - 1));
-
- do {
- load = *addr;
- store = load | set;
- } while (!__atomic_compare_exchange_n(addr, &load, store,
- 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
-}
-
-static inline void atomic_byte_clear(
- unsigned char *addr, unsigned int bit)
-{
- unsigned char load, store;
- unsigned char clear = 1 << (bit & (BITS_PER_BYTE - 1));
-
- do {
- load = *addr;
- store = load & ~clear;
- } while (!__atomic_compare_exchange_n(addr, &load, store,
- 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
-}
-
-static inline unsigned char *__bit_byte(
- unsigned long *word, unsigned int bit)
-{
- unsigned int i;
- unsigned char *b;
-
- b = (unsigned char *)word;
-
- i = bit & (BITS_PER_LONG - 1);
- i = i / BITS_PER_BYTE;
-
-#if (ODP_BYTE_ORDER == ODP_BIG_ENDIAN)
- i = BYTES_PER_LONG - 1 - i;
-#endif
- return &b[i];
-}
-
-void raw_bitmap_set(unsigned long *map, unsigned int bit)
-{
- unsigned long *p = map + BIT_WORD(bit);
-
- atomic_byte_set(__bit_byte(p, bit), bit);
-}
-
-void raw_bitmap_clear(unsigned long *map, unsigned int bit)
-{
- unsigned long *p = map + BIT_WORD(bit);
-
- atomic_byte_clear(__bit_byte(p, bit), bit);
-}
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index 54b025779..4f06ce2f0 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -34,42 +34,26 @@ uint32_t odp_buffer_size(odp_buffer_t buf)
return pool->seg_len;
}
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
+void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
- pool_t *pool;
int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
if (!odp_buffer_is_valid(buf)) {
- ODP_PRINT("Buffer is not valid.\n");
- return len;
+ ODP_ERR("Buffer is not valid.\n");
+ return;
}
hdr = buf_hdl_to_hdr(buf);
- pool = hdr->pool_ptr;
-
- len += snprintf(&str[len], n - len,
- "Buffer\n");
- len += snprintf(&str[len], n - len,
- " pool %" PRIu64 "\n",
- odp_pool_to_u64(pool->pool_hdl));
- len += snprintf(&str[len], n - len,
- " addr %p\n", hdr->base_data);
- len += snprintf(&str[len], n - len,
- " size %" PRIu32 "\n", odp_buffer_size(buf));
- len += snprintf(&str[len], n - len,
- " type %i\n", hdr->type);
-
- return len;
-}
-
-void odp_buffer_print(odp_buffer_t buf)
-{
- int max_len = 512;
- char str[max_len];
- int len;
- len = odp_buffer_snprint(str, max_len - 1, buf);
+ len += snprintf(&str[len], n - len, "Buffer\n------\n");
+ len += snprintf(&str[len], n - len, " pool index %u\n", hdr->index.pool);
+ len += snprintf(&str[len], n - len, " buffer index %u\n", hdr->index.buffer);
+ len += snprintf(&str[len], n - len, " addr %p\n", hdr->base_data);
+ len += snprintf(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
str[len] = 0;
ODP_PRINT("\n%s\n", str);
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index b84f53e1c..5f452c598 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -9,6 +9,7 @@
#include <odp/api/align.h>
#include <odp/api/queue.h>
#include <odp/api/debug.h>
+#include <odp/api/pool.h>
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
@@ -61,6 +62,11 @@ static const rss_key default_rss = {
}
};
+cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
+{
+ return &cos_tbl->cos_entry[ndx];
+}
+
static inline uint32_t _odp_cos_to_ndx(odp_cos_t cos)
{
return _odp_typeval(cos) - 1;
@@ -138,10 +144,13 @@ int _odp_classification_term_global(void)
void odp_cls_cos_param_init(odp_cls_cos_param_t *param)
{
+ memset(param, 0, sizeof(odp_cls_cos_param_t));
+
param->queue = ODP_QUEUE_INVALID;
param->pool = ODP_POOL_INVALID;
param->drop_policy = ODP_COS_DROP_NEVER;
param->num_queue = 1;
+ param->vector.enable = false;
odp_queue_param_init(&param->queue_param);
}
@@ -229,6 +238,29 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param)
if (param->num_queue > CLS_COS_QUEUE_MAX || param->num_queue < 1)
return ODP_COS_INVALID;
+ /* Validate packet vector parameters */
+ if (param->vector.enable) {
+ odp_pool_t pool = param->vector.pool;
+ odp_pool_info_t pool_info;
+
+ if (pool == ODP_POOL_INVALID || odp_pool_info(pool, &pool_info)) {
+ ODP_ERR("invalid packet vector pool\n");
+ return ODP_COS_INVALID;
+ }
+ if (pool_info.params.type != ODP_POOL_VECTOR) {
+ ODP_ERR("wrong pool type\n");
+ return ODP_COS_INVALID;
+ }
+ if (param->vector.max_size == 0) {
+ ODP_ERR("vector.max_size is zero\n");
+ return ODP_COS_INVALID;
+ }
+ if (param->vector.max_size > pool_info.params.vector.max_size) {
+ ODP_ERR("vector.max_size larger than pool max vector size\n");
+ return ODP_COS_INVALID;
+ }
+ }
+
drop_policy = param->drop_policy;
for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
@@ -280,6 +312,7 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param)
cos->s.drop_policy = drop_policy;
odp_atomic_init_u32(&cos->s.num_rule, 0);
cos->s.index = i;
+ cos->s.vector = param->vector;
UNLOCK(&cos->s.lock);
return _odp_cos_from_ndx(i);
}
@@ -1462,7 +1495,7 @@ static cos_t *match_pmr_cos(cos_t *cos, const uint8_t *pkt_addr, pmr_t *pmr,
return NULL;
}
-int pktio_classifier_init(pktio_entry_t *entry)
+int _odp_pktio_classifier_init(pktio_entry_t *entry)
{
classifier_t *cls;
@@ -1548,9 +1581,9 @@ static uint32_t packet_rss_hash(odp_packet_hdr_t *pkt_hdr,
*
* @note *base is not released
*/
-int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
- uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
- odp_packet_hdr_t *pkt_hdr, odp_bool_t parse)
+int _odp_cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
+ uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
+ odp_packet_hdr_t *pkt_hdr, odp_bool_t parse)
{
cos_t *cos;
uint32_t tbl_index;
@@ -1562,9 +1595,9 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
packet_parse_reset(pkt_hdr, 1);
packet_set_len(pkt_hdr, pkt_len);
- packet_parse_common(&pkt_hdr->p, base, pkt_len, seg_len,
- ODP_PROTO_LAYER_ALL,
- entry->s.in_chksums);
+ _odp_packet_parse_common(&pkt_hdr->p, base, pkt_len, seg_len,
+ ODP_PROTO_LAYER_ALL,
+ entry->s.in_chksums);
}
cos = cls_select_cos(entry, base, pkt_hdr);
@@ -1579,6 +1612,7 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
*pool = cos->s.pool;
pkt_hdr->p.input_flags.dst_queue = 1;
+ pkt_hdr->cos = cos->s.index;
if (!cos->s.queue_group) {
pkt_hdr->dst_queue = cos->s.queue;
diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c
index 677cd0dde..5398442d6 100644
--- a/platform/linux-generic/odp_event.c
+++ b/platform/linux-generic/odp_event.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,10 +15,12 @@
#include <odp_ipsec_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
+#include <odp_event_vector_internal.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
odp_event_subtype_t odp_event_subtype(odp_event_t event)
{
@@ -60,6 +63,9 @@ void odp_event_free(odp_event_t event)
case ODP_EVENT_PACKET:
odp_packet_free(odp_packet_from_event(event));
break;
+ case ODP_EVENT_PACKET_VECTOR:
+ _odp_packet_vector_free_full(odp_packet_vector_from_event(event));
+ break;
case ODP_EVENT_TIMEOUT:
odp_timeout_free(odp_timeout_from_event(event));
break;
@@ -91,3 +97,34 @@ uint64_t odp_event_to_u64(odp_event_t hdl)
{
return _odp_pri(hdl);
}
+
+int odp_event_is_valid(odp_event_t event)
+{
+ odp_buffer_t buf;
+
+ if (event == ODP_EVENT_INVALID)
+ return 0;
+
+ buf = odp_buffer_from_event(event);
+ if (_odp_buffer_is_valid(buf) == 0)
+ return 0;
+
+ switch (odp_event_type(event)) {
+ case ODP_EVENT_BUFFER:
+ /* Fall through */
+ case ODP_EVENT_PACKET:
+ /* Fall through */
+ case ODP_EVENT_TIMEOUT:
+ /* Fall through */
+ case ODP_EVENT_CRYPTO_COMPL:
+ /* Fall through */
+ case ODP_EVENT_IPSEC_STATUS:
+ /* Fall through */
+ case ODP_EVENT_PACKET_VECTOR:
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 0341eb318..27390c13b 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -489,14 +489,14 @@ static int term_local(enum init_stage stage)
case ALL_INIT:
case SCHED_INIT:
- if (sched_fn->term_local()) {
+ if (_odp_sched_fn->term_local()) {
ODP_ERR("ODP schedule local term failed.\n");
rc = -1;
}
/* Fall through */
case QUEUE_INIT:
- if (queue_fn->term_local()) {
+ if (_odp_queue_fn->term_local()) {
ODP_ERR("ODP queue local term failed.\n");
rc = -1;
}
@@ -606,13 +606,13 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
}
stage = POOL_INIT;
- if (queue_fn->init_local()) {
+ if (_odp_queue_fn->init_local()) {
ODP_ERR("ODP queue local init failed.\n");
goto init_fail;
}
stage = QUEUE_INIT;
- if (sched_fn->init_local()) {
+ if (_odp_sched_fn->init_local()) {
ODP_ERR("ODP schedule local init failed.\n");
goto init_fail;
}
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 048a40572..37763d030 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -17,20 +17,94 @@
#include <odp_packet_internal.h>
#include <odp_ipsec_internal.h>
#include <odp/api/plat/queue_inlines.h>
+#include <odp_classification_internal.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
#include <protocols/ipsec.h>
#include <protocols/udp.h>
+#include <errno.h>
#include <string.h>
static odp_ipsec_config_t *ipsec_config;
-int odp_ipsec_capability(odp_ipsec_capability_t *capa)
+/*
+ * Set cabability bits for algorithms that are defined for use with IPsec
+ * and for which the IPsec crypto or auth capability function returns
+ * at least one supported instance.
+ */
+static int set_ipsec_crypto_capa(odp_ipsec_capability_t *capa)
{
int rc;
odp_crypto_capability_t crypto_capa;
+
+ rc = odp_crypto_capability(&crypto_capa);
+ if (rc < 0)
+ return rc;
+
+#define CHECK_CIPHER(field, alg) do { \
+ if (crypto_capa.ciphers.bit.field && \
+ odp_ipsec_cipher_capability(alg, NULL, 0) > 0) \
+ capa->ciphers.bit.field = 1; \
+} while (0)
+
+ CHECK_CIPHER(null, ODP_CIPHER_ALG_NULL);
+ CHECK_CIPHER(des, ODP_CIPHER_ALG_DES);
+ CHECK_CIPHER(trides_cbc, ODP_CIPHER_ALG_3DES_CBC);
+ CHECK_CIPHER(aes_cbc, ODP_CIPHER_ALG_AES_CBC);
+ CHECK_CIPHER(aes_ctr, ODP_CIPHER_ALG_AES_CTR);
+ CHECK_CIPHER(aes_gcm, ODP_CIPHER_ALG_AES_GCM);
+ CHECK_CIPHER(aes_ccm, ODP_CIPHER_ALG_AES_CCM);
+ CHECK_CIPHER(chacha20_poly1305, ODP_CIPHER_ALG_CHACHA20_POLY1305);
+
+#define CHECK_AUTH(field, alg) do { \
+ if (crypto_capa.auths.bit.field && \
+ odp_ipsec_auth_capability(alg, NULL, 0) > 0) \
+ capa->auths.bit.field = 1; \
+} while (0)
+
+ CHECK_AUTH(null, ODP_AUTH_ALG_NULL);
+ CHECK_AUTH(md5_hmac, ODP_AUTH_ALG_MD5_HMAC);
+ CHECK_AUTH(sha1_hmac, ODP_AUTH_ALG_SHA1_HMAC);
+ CHECK_AUTH(sha256_hmac, ODP_AUTH_ALG_SHA256_HMAC);
+ CHECK_AUTH(sha384_hmac, ODP_AUTH_ALG_SHA384_HMAC);
+ CHECK_AUTH(sha512_hmac, ODP_AUTH_ALG_SHA512_HMAC);
+ CHECK_AUTH(aes_gcm, ODP_AUTH_ALG_AES_GCM);
+ CHECK_AUTH(aes_gmac, ODP_AUTH_ALG_AES_GMAC);
+ CHECK_AUTH(aes_ccm, ODP_AUTH_ALG_AES_CCM);
+ CHECK_AUTH(aes_cmac, ODP_AUTH_ALG_AES_CMAC);
+ CHECK_AUTH(aes_xcbc_mac, ODP_AUTH_ALG_AES_XCBC_MAC);
+ CHECK_AUTH(chacha20_poly1305, ODP_AUTH_ALG_CHACHA20_POLY1305);
+
+ /*
+ * Certain combined mode algorithms are configured by setting
+ * both cipher and auth to the corresponding algorithm when
+ * creating an SA. Since such algorithms cannot be combined
+ * with anything else, clear both capability fields if the
+ * cipher and auth check did not both succeed.
+ *
+ * Although AES-GMAC is a combined mode algorithm, it does
+ * not appear here because it is configured by setting cipher
+ * to null.
+ */
+#define REQUIRE_BOTH(field) do { \
+ if (!capa->ciphers.bit.field) \
+ capa->auths.bit.field = 0; \
+ if (!capa->auths.bit.field) \
+ capa->ciphers.bit.field = 0; \
+ } while (0)
+
+ REQUIRE_BOTH(aes_gcm);
+ REQUIRE_BOTH(aes_ccm);
+ REQUIRE_BOTH(chacha20_poly1305);
+
+ return 0;
+}
+
+int odp_ipsec_capability(odp_ipsec_capability_t *capa)
+{
+ int rc;
odp_queue_capability_t queue_capa;
if (odp_global_ro.disable.ipsec) {
@@ -51,13 +125,10 @@ int odp_ipsec_capability(odp_ipsec_capability_t *capa)
capa->max_antireplay_ws = IPSEC_ANTIREPLAY_WS;
- rc = odp_crypto_capability(&crypto_capa);
+ rc = set_ipsec_crypto_capa(capa);
if (rc < 0)
return rc;
- capa->ciphers = crypto_capa.ciphers;
- capa->auths = crypto_capa.auths;
-
rc = odp_queue_capability(&queue_capa);
if (rc < 0)
return rc;
@@ -147,6 +218,7 @@ void odp_ipsec_config_init(odp_ipsec_config_t *config)
config->inbound.default_queue = ODP_QUEUE_INVALID;
config->inbound.lookup.min_spi = 0;
config->inbound.lookup.max_spi = UINT32_MAX;
+ config->stats_en = false;
}
int odp_ipsec_config(const odp_ipsec_config_t *config)
@@ -595,6 +667,37 @@ static int ipsec_in_ah_post(odp_packet_t pkt,
return 0;
}
+static void
+ipsec_sa_err_stats_update(ipsec_sa_t *sa, odp_ipsec_op_status_t *status)
+{
+ if (odp_likely(ODP_IPSEC_OK == status->error.all))
+ return;
+
+ if (NULL == sa)
+ return;
+
+ if (status->error.proto)
+ odp_atomic_inc_u64(&sa->stats.proto_err);
+
+ if (status->error.auth)
+ odp_atomic_inc_u64(&sa->stats.auth_err);
+
+ if (status->error.antireplay)
+ odp_atomic_inc_u64(&sa->stats.antireplay_err);
+
+ if (status->error.alg)
+ odp_atomic_inc_u64(&sa->stats.alg_err);
+
+ if (status->error.mtu)
+ odp_atomic_inc_u64(&sa->stats.mtu_err);
+
+ if (status->error.hard_exp_bytes)
+ odp_atomic_inc_u64(&sa->stats.hard_exp_bytes_err);
+
+ if (status->error.hard_exp_packets)
+ odp_atomic_inc_u64(&sa->stats.hard_exp_pkts_err);
+}
+
static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
odp_ipsec_sa_t sa,
odp_packet_t *pkt_out,
@@ -605,7 +708,6 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
odp_crypto_packet_op_param_t param;
int rc;
odp_crypto_packet_result_t crypto; /**< Crypto operation result */
- odp_packet_hdr_t *pkt_hdr;
state.ip_offset = odp_packet_l3_offset(pkt);
ODP_ASSERT(ODP_PACKET_OFFSET_INVALID != state.ip_offset);
@@ -633,7 +735,7 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
if (rc < 0 ||
state.ip_tot_len + state.ip_offset > odp_packet_len(pkt)) {
status->error.alg = 1;
- goto err;
+ goto exit;
}
/* Check IP header for IPSec protocols and look it up */
@@ -644,18 +746,18 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
rc = ipsec_in_ah(&pkt, &state, &ipsec_sa, sa, &param, status);
} else {
status->error.proto = 1;
- goto err;
+ goto exit;
}
if (rc < 0)
- goto err;
+ goto exit;
if (_odp_ipsec_sa_replay_precheck(ipsec_sa,
state.in.seq_no,
status) < 0)
- goto err;
+ goto exit;
if (_odp_ipsec_sa_stats_precheck(ipsec_sa, status) < 0)
- goto err;
+ goto exit;
param.session = ipsec_sa->session;
@@ -663,41 +765,37 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
if (rc < 0) {
ODP_DBG("Crypto failed\n");
status->error.alg = 1;
- goto err;
+ goto exit;
}
rc = odp_crypto_result(&crypto, pkt);
if (rc < 0) {
ODP_DBG("Crypto failed\n");
status->error.alg = 1;
- goto err;
+ goto exit;
}
if (!crypto.ok) {
- if ((crypto.cipher_status.alg_err !=
- ODP_CRYPTO_ALG_ERR_NONE) ||
- (crypto.cipher_status.hw_err !=
- ODP_CRYPTO_HW_ERR_NONE))
- status->error.alg = 1;
-
- if ((crypto.auth_status.alg_err !=
- ODP_CRYPTO_ALG_ERR_NONE) ||
- (crypto.auth_status.hw_err !=
- ODP_CRYPTO_HW_ERR_NONE))
+ if ((crypto.cipher_status.alg_err ==
+ ODP_CRYPTO_ALG_ERR_ICV_CHECK) ||
+ (crypto.auth_status.alg_err ==
+ ODP_CRYPTO_ALG_ERR_ICV_CHECK))
status->error.auth = 1;
+ else
+ status->error.alg = 1;
- goto err;
+ goto exit;
}
- if (_odp_ipsec_sa_stats_update(ipsec_sa,
- state.stats_length,
- status) < 0)
- goto err;
-
if (_odp_ipsec_sa_replay_update(ipsec_sa,
state.in.seq_no,
status) < 0)
- goto err;
+ goto exit;
+
+ if (_odp_ipsec_sa_lifetime_update(ipsec_sa,
+ state.stats_length,
+ status) < 0)
+ goto post_lifetime_err_cnt_update;
state.ip = odp_packet_l3_ptr(pkt, NULL);
@@ -709,12 +807,12 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
rc = -1;
if (rc < 0) {
status->error.proto = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
if (odp_packet_trunc_tail(&pkt, state.in.trl_len, NULL, NULL) < 0) {
status->error.alg = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
state.ip_tot_len -= state.in.trl_len;
@@ -728,7 +826,7 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
state.in.hdr_len,
NULL, NULL) < 0) {
status->error.alg = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
state.ip_tot_len -= state.ip_hdr_len + state.in.hdr_len;
if (_ODP_IPPROTO_IPIP == state.ip_next_hdr) {
@@ -742,7 +840,7 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
state.is_ipv6 = 0;
} else {
status->error.proto = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
} else {
odp_packet_move_data(pkt, state.in.hdr_len, 0,
@@ -750,7 +848,7 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
if (odp_packet_trunc_head(&pkt, state.in.hdr_len,
NULL, NULL) < 0) {
status->error.alg = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
state.ip_tot_len -= state.in.hdr_len;
}
@@ -775,7 +873,7 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
ipv6hdr->hop_limit -= ipsec_sa->dec_ttl;
} else if (state.ip_next_hdr != _ODP_IPPROTO_NO_NEXT) {
status->error.proto = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
if (_ODP_IPPROTO_NO_NEXT == state.ip_next_hdr &&
@@ -798,16 +896,18 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
odp_packet_parse(pkt, state.ip_offset, &parse_param);
}
- *pkt_out = pkt;
+ goto exit;
- return ipsec_sa;
-
-err:
- pkt_hdr = packet_hdr(pkt);
- pkt_hdr->p.flags.ipsec_err = 1;
+post_lifetime_err_cnt_update:
+ if (ipsec_config->stats_en)
+ odp_atomic_inc_u64(&ipsec_sa->stats.post_lifetime_err_pkts);
+exit:
*pkt_out = pkt;
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(ipsec_sa, status);
+
return ipsec_sa;
}
@@ -1391,7 +1491,6 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
odp_crypto_packet_op_param_t param;
int rc;
odp_crypto_packet_result_t crypto; /**< Crypto operation result */
- odp_packet_hdr_t *pkt_hdr;
odp_ipsec_frag_mode_t frag_mode;
uint32_t mtu;
@@ -1465,7 +1564,7 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
rc = -1;
if (rc < 0) {
status->error.alg = 1;
- goto err;
+ goto exit;
}
ipsec_out_checksums(pkt, &state);
@@ -1483,7 +1582,7 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
}
if (rc < 0) {
status->error.alg = 1;
- goto err;
+ goto exit;
}
if (ODP_IPSEC_ESP == ipsec_sa->proto) {
@@ -1493,16 +1592,16 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
rc = ipsec_out_ah(&pkt, &state, ipsec_sa, &param, status, mtu);
} else {
status->error.alg = 1;
- goto err;
+ goto exit;
}
if (rc < 0)
- goto err;
+ goto exit;
/* No need to run precheck here, we know that packet is authentic */
- if (_odp_ipsec_sa_stats_update(ipsec_sa,
- state.stats_length,
- status) < 0)
- goto err;
+ if (_odp_ipsec_sa_lifetime_update(ipsec_sa,
+ state.stats_length,
+ status) < 0)
+ goto post_lifetime_err_cnt_update;
param.session = ipsec_sa->session;
@@ -1522,30 +1621,19 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
if (rc < 0) {
ODP_DBG("Crypto failed\n");
status->error.alg = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
rc = odp_crypto_result(&crypto, pkt);
if (rc < 0) {
ODP_DBG("Crypto failed\n");
status->error.alg = 1;
- goto err;
+ goto post_lifetime_err_cnt_update;
}
if (!crypto.ok) {
- if ((crypto.cipher_status.alg_err !=
- ODP_CRYPTO_ALG_ERR_NONE) ||
- (crypto.cipher_status.hw_err !=
- ODP_CRYPTO_HW_ERR_NONE))
- status->error.alg = 1;
-
- if ((crypto.auth_status.alg_err !=
- ODP_CRYPTO_ALG_ERR_NONE) ||
- (crypto.auth_status.hw_err !=
- ODP_CRYPTO_HW_ERR_NONE))
- status->error.auth = 1;
-
- goto err;
+ status->error.alg = 1;
+ goto post_lifetime_err_cnt_update;
}
/* Finalize the IP header */
@@ -1554,14 +1642,13 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
else if (ODP_IPSEC_AH == ipsec_sa->proto)
ipsec_out_ah_post(&state, pkt);
- *pkt_out = pkt;
- return ipsec_sa;
-
-err:
- pkt_hdr = packet_hdr(pkt);
+ goto exit;
- pkt_hdr->p.flags.ipsec_err = 1;
+post_lifetime_err_cnt_update:
+ if (ipsec_config->stats_en)
+ odp_atomic_inc_u64(&ipsec_sa->stats.post_lifetime_err_pkts);
+exit:
*pkt_out = pkt;
return ipsec_sa;
}
@@ -1661,6 +1748,9 @@ int odp_ipsec_out(const odp_packet_t pkt_in[], int num_in,
result->status = status;
result->sa = ipsec_sa->ipsec_sa_hdl;
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(ipsec_sa, &status);
+
pkt_out[out_pkt] = pkt;
in_pkt++;
out_pkt++;
@@ -1766,6 +1856,9 @@ int odp_ipsec_out_enq(const odp_packet_t pkt_in[], int num_in,
result->sa = ipsec_sa->ipsec_sa_hdl;
queue = ipsec_sa->queue;
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(ipsec_sa, &status);
+
if (odp_queue_enq(queue, odp_ipsec_packet_to_event(pkt))) {
odp_packet_free(pkt);
break;
@@ -1808,6 +1901,8 @@ int _odp_ipsec_try_inline(odp_packet_t *pkt)
pkt_hdr = packet_hdr(*pkt);
pkt_hdr->p.input_flags.dst_queue = 1;
pkt_hdr->dst_queue = ipsec_sa->queue;
+ /* Distinguish inline IPsec packets from classifier packets */
+ pkt_hdr->cos = CLS_COS_IDX_NONE;
/* Last thing */
_odp_ipsec_sa_unuse(ipsec_sa);
@@ -1815,6 +1910,8 @@ int _odp_ipsec_try_inline(odp_packet_t *pkt)
return 0;
}
+#define MAX_HDR_LEN 100 /* Enough for VxLAN over IPv6 */
+
int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
const odp_ipsec_out_param_t *param,
const odp_ipsec_out_inline_param_t *inline_param)
@@ -1824,9 +1921,9 @@ int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
unsigned opt_idx = 0;
unsigned sa_inc = (param->num_sa > 1) ? 1 : 0;
unsigned opt_inc = (param->num_opt > 1) ? 1 : 0;
+ uint8_t hdr_buf[MAX_HDR_LEN];
ODP_ASSERT(param->num_sa != 0);
- ODP_ASSERT(inline_param->pktio != ODP_PKTIO_INVALID);
while (in_pkt < num_in) {
odp_packet_t pkt = pkt_in[in_pkt];
@@ -1838,6 +1935,8 @@ int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
uint32_t hdr_len, offset;
const void *ptr;
+ ODP_ASSERT(inline_param[in_pkt].pktio != ODP_PKTIO_INVALID);
+
memset(&status, 0, sizeof(status));
if (0 == param->num_sa) {
@@ -1847,6 +1946,25 @@ int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
ODP_ASSERT(ODP_IPSEC_SA_INVALID != sa);
}
+ hdr_len = inline_param[in_pkt].outer_hdr.len;
+ ptr = inline_param[in_pkt].outer_hdr.ptr;
+
+ if (!ptr) {
+ uint32_t l2_offset = odp_packet_l2_offset(pkt);
+
+ ODP_ASSERT(hdr_len == odp_packet_l3_offset(pkt) - l2_offset);
+
+ if (odp_likely(hdr_len <= MAX_HDR_LEN) &&
+ odp_likely(odp_packet_copy_to_mem(pkt, l2_offset,
+ hdr_len, hdr_buf) == 0)) {
+ ptr = hdr_buf;
+ } else {
+ status.error.proto = 1;
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+ goto err;
+ }
+ }
+
if (0 == param->num_opt)
opt = &default_out_opt;
else
@@ -1855,8 +1973,6 @@ int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
ipsec_sa = ipsec_out_single(pkt, sa, &pkt, opt, &status);
ODP_ASSERT(NULL != ipsec_sa);
- hdr_len = inline_param[in_pkt].outer_hdr.len;
- ptr = inline_param[in_pkt].outer_hdr.ptr;
offset = odp_packet_l3_offset(pkt);
if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
offset = 0;
@@ -1900,6 +2016,9 @@ int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
} else {
odp_queue_t queue;
err:
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(ipsec_sa, &status);
+
packet_subtype_set(pkt, ODP_EVENT_PACKET_IPSEC);
result = ipsec_pkt_result(pkt);
memset(result, 0, sizeof(*result));
@@ -1947,6 +2066,33 @@ odp_event_t odp_ipsec_packet_to_event(odp_packet_t pkt)
return odp_packet_to_event(pkt);
}
+int odp_ipsec_stats(odp_ipsec_sa_t sa, odp_ipsec_stats_t *stats)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ if (ODP_IPSEC_SA_INVALID == sa)
+ return -EINVAL;
+
+ if (!ipsec_config->stats_en)
+ return -ENOTSUP;
+
+ ODP_ASSERT(NULL != stats);
+
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+ ODP_ASSERT(NULL != ipsec_sa);
+
+ stats->success = _odp_ipsec_sa_stats_pkts(ipsec_sa);
+ stats->proto_err = odp_atomic_load_u64(&ipsec_sa->stats.proto_err);
+ stats->auth_err = odp_atomic_load_u64(&ipsec_sa->stats.auth_err);
+ stats->antireplay_err = odp_atomic_load_u64(&ipsec_sa->stats.antireplay_err);
+ stats->alg_err = odp_atomic_load_u64(&ipsec_sa->stats.alg_err);
+ stats->mtu_err = odp_atomic_load_u64(&ipsec_sa->stats.mtu_err);
+ stats->hard_exp_bytes_err = odp_atomic_load_u64(&ipsec_sa->stats.hard_exp_bytes_err);
+ stats->hard_exp_pkts_err = odp_atomic_load_u64(&ipsec_sa->stats.hard_exp_pkts_err);
+
+ return 0;
+}
+
int _odp_ipsec_init_global(void)
{
odp_shm_t shm;
@@ -1991,3 +2137,19 @@ int _odp_ipsec_term_global(void)
return 0;
}
+
+void odp_ipsec_print(void)
+{
+ ODP_PRINT("\nIPSEC print\n");
+ ODP_PRINT("-----------\n");
+ ODP_PRINT(" max number of SA %u\n", ipsec_config->max_num_sa);
+}
+
+void odp_ipsec_sa_print(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+
+ ODP_PRINT("\nIPSEC SA print\n");
+ ODP_PRINT("--------------\n");
+ ODP_PRINT(" SPI %u\n", ipsec_sa->spi);
+}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index b104b9a6a..0e021361c 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -61,7 +61,7 @@ typedef struct sa_thread_local_s {
* Packets that can be processed in this thread before looking at
* the SA-global packet counter and checking hard and soft limits.
*/
- uint32_t packet_quota;
+ odp_atomic_u32_t packet_quota;
/*
* Bytes that can be processed in this thread before looking at
* at the SA-global byte counter and checking hard and soft limits.
@@ -125,7 +125,7 @@ static void init_sa_thread_local(ipsec_sa_t *sa)
for (n = 0; n < ODP_THREAD_COUNT_MAX; n++) {
sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx];
- sa_tl->packet_quota = 0;
+ odp_atomic_init_u32(&sa_tl->packet_quota, 0);
sa_tl->byte_quota = 0;
sa_tl->lifetime_status.all = 0;
}
@@ -458,6 +458,29 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->hard_limit_bytes = param->lifetime.hard_limit.bytes;
ipsec_sa->hard_limit_packets = param->lifetime.hard_limit.packets;
+ odp_atomic_init_u64(&ipsec_sa->stats.proto_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.auth_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.antireplay_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.alg_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.mtu_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.hard_exp_bytes_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.hard_exp_pkts_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_pkts, 0);
+
+ /* Copy application provided parameter values. */
+ ipsec_sa->param = *param;
+
+ /* Set all the key related pointers and ip address pointers to null. */
+ ipsec_sa->param.crypto.cipher_key.data = NULL;
+ ipsec_sa->param.crypto.cipher_key_extra.data = NULL;
+ ipsec_sa->param.crypto.auth_key.data = NULL;
+ ipsec_sa->param.crypto.auth_key_extra.data = NULL;
+ ipsec_sa->param.inbound.lookup_param.dst_addr = NULL;
+ ipsec_sa->param.outbound.tunnel.ipv4.src_addr = NULL;
+ ipsec_sa->param.outbound.tunnel.ipv4.dst_addr = NULL;
+ ipsec_sa->param.outbound.tunnel.ipv6.src_addr = NULL;
+ ipsec_sa->param.outbound.tunnel.ipv6.dst_addr = NULL;
+
if (ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode &&
ODP_IPSEC_DIR_OUTBOUND == param->dir) {
if (ODP_IPSEC_TUNNEL_IPV4 == param->outbound.tunnel.type) {
@@ -560,7 +583,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
case ODP_CIPHER_ALG_AES_GCM:
ipsec_sa->use_counter_iv = 1;
ipsec_sa->esp_iv_len = 8;
- ipsec_sa->esp_pad_mask = esp_block_len_to_mask(16);
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
ipsec_sa->salt_length = 4;
salt_param = &param->crypto.cipher_key_extra;
break;
@@ -594,7 +617,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
goto error;
ipsec_sa->use_counter_iv = 1;
ipsec_sa->esp_iv_len = 8;
- ipsec_sa->esp_pad_mask = esp_block_len_to_mask(16);
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
crypto_param.auth_iv.length = 12;
ipsec_sa->salt_length = 4;
salt_param = &param->crypto.auth_key_extra;
@@ -781,17 +804,19 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
return rc;
}
-int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len,
- odp_ipsec_op_status_t *status)
+int _odp_ipsec_sa_lifetime_update(ipsec_sa_t *ipsec_sa, uint32_t len,
+ odp_ipsec_op_status_t *status)
{
sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
uint64_t packets, bytes;
+ uint32_t tl_pkt_quota;
- if (odp_unlikely(sa_tl->packet_quota == 0)) {
+ tl_pkt_quota = odp_atomic_load_u32(&sa_tl->packet_quota);
+ if (odp_unlikely(tl_pkt_quota == 0)) {
packets = odp_atomic_fetch_add_u64(&ipsec_sa->hot.packets,
SA_LIFE_PACKETS_PREALLOC);
packets += SA_LIFE_PACKETS_PREALLOC;
- sa_tl->packet_quota += SA_LIFE_PACKETS_PREALLOC;
+ tl_pkt_quota += SA_LIFE_PACKETS_PREALLOC;
if (ipsec_sa->soft_limit_packets > 0 &&
packets >= ipsec_sa->soft_limit_packets)
@@ -801,7 +826,8 @@ int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len,
packets >= ipsec_sa->hard_limit_packets)
sa_tl->lifetime_status.error.hard_exp_packets = 1;
}
- sa_tl->packet_quota--;
+ tl_pkt_quota--;
+ odp_atomic_store_u32(&sa_tl->packet_quota, tl_pkt_quota);
if (odp_unlikely(sa_tl->byte_quota < len)) {
bytes = odp_atomic_fetch_add_u64(&ipsec_sa->hot.bytes,
@@ -827,6 +853,16 @@ int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len,
return 0;
}
+static uint64_t ipsec_sa_antireplay_max_seq(ipsec_sa_t *ipsec_sa)
+{
+ uint64_t state, max_seq;
+
+ state = odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay);
+ max_seq = state & 0xffffffff;
+
+ return max_seq;
+}
+
int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
odp_ipsec_op_status_t *status)
{
@@ -910,3 +946,105 @@ uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa)
/* No need to convert to BE: ID just should not be duplicated */
return tl->next_ipv4_id++;
}
+
+uint64_t _odp_ipsec_sa_stats_pkts(ipsec_sa_t *sa)
+{
+ uint64_t tl_pkt_quota = 0;
+ sa_thread_local_t *sa_tl;
+ int n;
+
+ /*
+ * Field 'hot.packets' tracks SA lifetime. The same field is being used
+ * to track the number of success packets.
+ *
+ * SA lifetime tracking implements a per thread packet quota to allow
+ * less frequent updates to the hot field. The per thread quota need
+ * to be decremented. In addition, SA lifetime gets consumed for any
+ * errors occurring after lifetime check is done. Those packets also
+ * need to be accounted for.
+ */
+
+ for (n = 0; n < ODP_THREAD_COUNT_MAX; n++) {
+ sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx];
+ tl_pkt_quota += odp_atomic_load_u32(&sa_tl->packet_quota);
+ }
+
+ return odp_atomic_load_u64(&sa->hot.packets)
+ - odp_atomic_load_u64(&sa->stats.post_lifetime_err_pkts)
+ - tl_pkt_quota;
+}
+
+static void ipsec_out_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
+{
+ sa_info->outbound.seq_num =
+ (uint64_t)odp_atomic_load_u64(&ipsec_sa->hot.out.seq) - 1;
+
+ if (ipsec_sa->param.mode == ODP_IPSEC_MODE_TUNNEL) {
+ uint8_t *src, *dst;
+
+ if (ipsec_sa->param.outbound.tunnel.type ==
+ ODP_IPSEC_TUNNEL_IPV4) {
+ src = sa_info->outbound.tunnel.ipv4.src_addr;
+ dst = sa_info->outbound.tunnel.ipv4.dst_addr;
+ memcpy(src, &ipsec_sa->out.tun_ipv4.src_ip,
+ ODP_IPV4_ADDR_SIZE);
+ memcpy(dst, &ipsec_sa->out.tun_ipv4.dst_ip,
+ ODP_IPV4_ADDR_SIZE);
+ sa_info->param.outbound.tunnel.ipv4.src_addr = src;
+ sa_info->param.outbound.tunnel.ipv4.dst_addr = dst;
+ } else {
+ src = sa_info->outbound.tunnel.ipv6.src_addr;
+ dst = sa_info->outbound.tunnel.ipv6.dst_addr;
+ memcpy(src, &ipsec_sa->out.tun_ipv6.src_ip,
+ ODP_IPV6_ADDR_SIZE);
+ memcpy(dst, &ipsec_sa->out.tun_ipv6.dst_ip,
+ ODP_IPV6_ADDR_SIZE);
+ sa_info->param.outbound.tunnel.ipv6.src_addr = src;
+ sa_info->param.outbound.tunnel.ipv6.dst_addr = dst;
+ }
+ }
+}
+
+static void ipsec_in_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
+{
+ if (ipsec_sa->param.mode == ODP_IPSEC_MODE_TUNNEL) {
+ uint8_t *dst = sa_info->inbound.lookup_param.dst_addr;
+
+ if (ipsec_sa->param.inbound.lookup_param.ip_version ==
+ ODP_IPSEC_IPV4)
+ memcpy(dst, &ipsec_sa->in.lookup_dst_ipv4,
+ ODP_IPV4_ADDR_SIZE);
+ else
+ memcpy(dst, &ipsec_sa->in.lookup_dst_ipv6,
+ ODP_IPV6_ADDR_SIZE);
+
+ sa_info->param.inbound.lookup_param.dst_addr = dst;
+ }
+
+ if (ipsec_sa->antireplay) {
+ sa_info->inbound.antireplay_ws = IPSEC_ANTIREPLAY_WS;
+ sa_info->inbound.antireplay_window_top =
+ ipsec_sa_antireplay_max_seq(ipsec_sa);
+ }
+}
+
+int odp_ipsec_sa_info(odp_ipsec_sa_t sa, odp_ipsec_sa_info_t *sa_info)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+
+ ODP_ASSERT(ipsec_sa != NULL);
+ ODP_ASSERT(sa_info != NULL);
+
+ memset(sa_info, 0, sizeof(*sa_info));
+
+ sa_info->param = ipsec_sa->param;
+
+ if (ipsec_sa->param.dir == ODP_IPSEC_DIR_OUTBOUND)
+ ipsec_out_sa_info(ipsec_sa, sa_info);
+ else
+ ipsec_in_sa_info(ipsec_sa, sa_info);
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index 1a778ff73..dac2100ee 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -169,8 +169,9 @@ static inline void packet_seg_copy_md(odp_packet_hdr_t *dst,
* .tailroom
*/
- dst->input = src->input;
+ dst->input = src->input;
dst->dst_queue = src->dst_queue;
+ dst->cos = src->cos;
dst->flow_hash = src->flow_hash;
dst->timestamp = src->timestamp;
@@ -1566,10 +1567,11 @@ void odp_packet_print(odp_packet_t pkt)
int len = 0;
int n = max_len - 1;
odp_packet_hdr_t *hdr = packet_hdr(pkt);
- odp_buffer_t buf = packet_to_buffer(pkt);
- len += snprintf(&str[len], n - len, "Packet ");
- len += odp_buffer_snprint(&str[len], n - len, buf);
+ len += snprintf(&str[len], n - len, "Packet\n------\n");
+ len += snprintf(&str[len], n - len, " pool index %u\n", hdr->buf_hdr.index.pool);
+ len += snprintf(&str[len], n - len, " buf index %u\n", hdr->buf_hdr.index.buffer);
+ len += snprintf(&str[len], n - len, " ev subtype %i\n", hdr->subtype);
len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
hdr->p.input_flags.all);
if (hdr->p.input_flags.all) {
@@ -1649,7 +1651,7 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
" buf index %" PRIu32 "\n",
hdr->buf_hdr.index.buffer);
len += snprintf(&str[len], n - len,
- " seg_count %" PRIu16 "\n", hdr->seg_count);
+ " seg_count %" PRIu16 "\n", hdr->seg_count);
len += snprintf(&str[len], n - len,
" data len %" PRIu32 "\n", data_len);
len += snprintf(&str[len], n - len,
@@ -1693,11 +1695,32 @@ void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
int odp_packet_is_valid(odp_packet_t pkt)
{
- if (odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
+ odp_event_t ev;
+
+ if (pkt == ODP_PACKET_INVALID)
+ return 0;
+
+ if (_odp_buffer_is_valid(packet_to_buffer(pkt)) == 0)
+ return 0;
+
+ ev = odp_packet_to_event(pkt);
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
- if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET)
+ switch (odp_event_subtype(ev)) {
+ case ODP_EVENT_PACKET_BASIC:
+ /* Fall through */
+ case ODP_EVENT_PACKET_COMP:
+ /* Fall through */
+ case ODP_EVENT_PACKET_CRYPTO:
+ /* Fall through */
+ case ODP_EVENT_PACKET_IPSEC:
+ /* Fall through */
+ break;
+ default:
return 0;
+ }
return 1;
}
@@ -1720,6 +1743,7 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
dsthdr->input = srchdr->input;
dsthdr->dst_queue = srchdr->dst_queue;
+ dsthdr->cos = srchdr->cos;
dsthdr->cls_mark = srchdr->cls_mark;
dsthdr->buf_hdr.user_ptr = srchdr->buf_hdr.user_ptr;
if (dsthdr->buf_hdr.uarea_addr != NULL &&
@@ -1916,7 +1940,7 @@ static inline uint16_t parse_eth(packet_parser_t *prs, const uint8_t **parseptr,
goto error;
}
ethtype = odp_be_to_cpu_16(*((const uint16_t *)(uintptr_t)
- (parseptr + 6)));
+ (*parseptr + 6)));
*offset += 8;
*parseptr += 8;
}
@@ -2287,9 +2311,9 @@ int packet_parse_common_l3_l4(packet_parser_t *prs, const uint8_t *parseptr,
* The function expects at least PACKET_PARSE_SEG_LEN bytes of data to be
* available from the ptr. Also parse metadata must be already initialized.
*/
-int packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
- uint32_t frame_len, uint32_t seg_len,
- int layer, odp_proto_chksums_t chksums)
+int _odp_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
+ uint32_t frame_len, uint32_t seg_len,
+ int layer, odp_proto_chksums_t chksums)
{
uint32_t offset;
uint16_t ethtype;
@@ -2558,9 +2582,9 @@ static int packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
/**
* Simple packet parser
*/
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
- odp_proto_layer_t layer,
- odp_proto_chksums_t chksums)
+int _odp_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
+ odp_proto_layer_t layer,
+ odp_proto_chksums_t chksums)
{
uint32_t seg_len = packet_first_seg_len(pkt_hdr);
const uint8_t *base = packet_data(pkt_hdr);
diff --git a/platform/linux-generic/odp_packet_api.c b/platform/linux-generic/odp_packet_api.c
index 6f81f4ce4..e9be4ca4e 100644
--- a/platform/linux-generic/odp_packet_api.c
+++ b/platform/linux-generic/odp_packet_api.c
@@ -26,3 +26,4 @@
/* Include non-inlined versions of API functions */
#define _ODP_NO_INLINE
#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 4ae03de36..e46da443b 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -29,6 +29,7 @@
#include <odp_pcapng.h>
#include <odp/api/plat/queue_inlines.h>
#include <odp_libconfig_internal.h>
+#include <odp_event_vector_internal.h>
#include <string.h>
#include <inttypes.h>
@@ -52,11 +53,16 @@
static pktio_global_t *pktio_global;
/* pktio pointer entries ( for inlines) */
-void *pktio_entry_ptr[ODP_CONFIG_PKTIO_ENTRIES];
+void *_odp_pktio_entry_ptr[ODP_CONFIG_PKTIO_ENTRIES];
static inline pktio_entry_t *pktio_entry_by_index(int index)
{
- return pktio_entry_ptr[index];
+ return _odp_pktio_entry_ptr[index];
+}
+
+static inline odp_buffer_hdr_t *packet_vector_to_buf_hdr(odp_packet_vector_t pktv)
+{
+ return &_odp_packet_vector_hdr(pktv)->buf_hdr;
}
static int read_config_file(pktio_global_t *pktio_glb)
@@ -117,7 +123,7 @@ int _odp_pktio_init_global(void)
odp_spinlock_init(&pktio_entry->s.cls.l2_cos_table.lock);
odp_spinlock_init(&pktio_entry->s.cls.l3_cos_table.lock);
- pktio_entry_ptr[i] = pktio_entry;
+ _odp_pktio_entry_ptr[i] = pktio_entry;
}
for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) {
@@ -199,7 +205,7 @@ static void init_pktio_entry(pktio_entry_t *entry)
init_in_queues(entry);
init_out_queues(entry);
- pktio_classifier_init(entry);
+ _odp_pktio_classifier_init(entry);
}
static odp_pktio_t alloc_lock_pktio_entry(void)
@@ -314,6 +320,7 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
pktio_entry->s.handle = hdl;
pktio_entry->s.pktin_frame_offset = pktin_frame_offset;
odp_atomic_init_u64(&pktio_entry->s.stats_extra.in_discards, 0);
+ odp_atomic_init_u64(&pktio_entry->s.stats_extra.out_discards, 0);
/* Tx timestamping is disabled by default */
pktio_entry->s.enabled.tx_ts = 0;
@@ -368,12 +375,13 @@ static const char *driver_name(odp_pktio_t hdl)
{
pktio_entry_t *entry;
- if (hdl != ODP_PKTIO_INVALID) {
- entry = get_pktio_entry(hdl);
- return entry->s.ops->name;
+ entry = get_pktio_entry(hdl);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %d does not exist\n", hdl);
+ return "bad handle";
}
- return "bad handle";
+ return entry->s.ops->name;
}
odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool,
@@ -624,7 +632,7 @@ int odp_pktio_start(odp_pktio_t hdl)
}
}
- sched_fn->pktio_start(odp_pktio_index(hdl), num, index, odpq);
+ _odp_sched_fn->pktio_start(odp_pktio_index(hdl), num, index, odpq);
}
ODP_DBG("interface: %s, input queues: %u, output queues: %u\n",
@@ -718,18 +726,119 @@ odp_pktio_t odp_pktio_lookup(const char *name)
return hdl;
}
+static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
+ uint32_t num, const cos_t *cos_hdr)
+{
+ odp_packet_vector_t pktv;
+ odp_pool_t pool = cos_hdr->s.vector.pool;
+ uint32_t max_size = cos_hdr->s.vector.max_size;
+ uint32_t num_enq;
+ int num_pktv = (num + max_size - 1) / max_size;
+ int ret;
+ int i;
+ odp_packet_vector_t pktv_tbl[num_pktv];
+ odp_event_t event_tbl[num_pktv];
+
+ for (i = 0; i < num_pktv; i++) {
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ break;
+ pktv_tbl[i] = pktv;
+ event_tbl[i] = odp_packet_vector_to_event(pktv);
+ }
+ if (odp_unlikely(i == 0)) {
+ odp_event_free_multi(events, num);
+ return;
+ }
+ num_pktv = i;
+ num_enq = 0;
+ for (i = 0; i < num_pktv; i++) {
+ odp_packet_t *pkt_tbl;
+ int pktv_size = max_size;
+
+ pktv = pktv_tbl[i];
+
+ if (num_enq + max_size > num)
+ pktv_size = num - num_enq;
+
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ odp_packet_from_event_multi(pkt_tbl, &events[num_enq], pktv_size);
+ odp_packet_vector_size_set(pktv, pktv_size);
+ num_enq += pktv_size;
+ }
+
+ ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
+ if (odp_unlikely(ret != num_pktv)) {
+ if (ret < 0)
+ ret = 0;
+ odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
+ }
+}
+
+static void packet_vector_enq(odp_queue_t queue, odp_event_t events[],
+ uint32_t num, odp_pool_t pool)
+{
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkt_tbl;
+
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID)) {
+ odp_event_free_multi(events, num);
+ return;
+ }
+
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ odp_packet_from_event_multi(pkt_tbl, events, num);
+ odp_packet_vector_size_set(pktv, num);
+
+ if (odp_unlikely(odp_queue_enq(queue, odp_packet_vector_to_event(pktv))))
+ odp_event_free(odp_packet_vector_to_event(pktv));
+}
+
+static inline odp_packet_vector_t packet_vector_create(odp_packet_t packets[], uint32_t num,
+ odp_pool_t pool)
+{
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkt_tbl;
+ uint32_t i;
+
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID)) {
+ odp_packet_free_multi(packets, num);
+ return ODP_PACKET_VECTOR_INVALID;
+ }
+
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ for (i = 0; i < num; i++)
+ pkt_tbl[i] = packets[i];
+ odp_packet_vector_size_set(pktv, num);
+
+ return pktv;
+}
+
static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
odp_buffer_hdr_t *buffer_hdrs[], int num)
{
odp_packet_t pkt;
odp_packet_t packets[num];
odp_packet_hdr_t *pkt_hdr;
+ odp_pool_t pool = ODP_POOL_INVALID;
odp_buffer_hdr_t *buf_hdr;
int i, pkts, num_rx, num_ev, num_dst;
odp_queue_t cur_queue;
odp_event_t ev[num];
odp_queue_t dst[num];
+ uint16_t cos[num];
+ uint16_t cur_cos = 0;
int dst_idx[num];
+ odp_bool_t vector_enabled = entry->s.in_queue[pktin_index].vector.enable;
+
+ if (vector_enabled) {
+ /* Make sure all packets will fit into a single packet vector */
+ if ((int)entry->s.in_queue[pktin_index].vector.max_size < num)
+ num = entry->s.in_queue[pktin_index].vector.max_size;
+ pool = entry->s.in_queue[pktin_index].vector.pool;
+ }
num_rx = 0;
num_dst = 0;
@@ -746,19 +855,24 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
buf_hdr = packet_to_buf_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
- /* Sort events for enqueue multi operation(s) */
+ /* Sort events for enqueue multi operation(s) based on CoS
+ * and destination queue. */
if (odp_unlikely(num_dst == 0)) {
num_dst = 1;
cur_queue = pkt_hdr->dst_queue;
+ cur_cos = pkt_hdr->cos;
dst[0] = cur_queue;
+ cos[0] = cur_cos;
dst_idx[0] = 0;
}
ev[num_ev] = odp_packet_to_event(pkt);
- if (cur_queue != pkt_hdr->dst_queue) {
+ if (cur_queue != pkt_hdr->dst_queue || cur_cos != pkt_hdr->cos) {
cur_queue = pkt_hdr->dst_queue;
+ cur_cos = pkt_hdr->cos;
dst[num_dst] = cur_queue;
+ cos[num_dst] = cur_cos;
dst_idx[num_dst] = num_ev;
num_dst++;
}
@@ -770,8 +884,20 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
}
/* Optimization for the common case */
- if (odp_likely(num_dst == 0))
- return num_rx;
+ if (odp_likely(num_dst == 0)) {
+ if (!vector_enabled || num_rx < 1)
+ return num_rx;
+
+ /* Create packet vector */
+ odp_packet_vector_t pktv = packet_vector_create((odp_packet_t *)buffer_hdrs,
+ num_rx, pool);
+
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ return 0;
+
+ buffer_hdrs[0] = packet_vector_to_buf_hdr(pktv);
+ return 1;
+ }
for (i = 0; i < num_dst; i++) {
int num_enq, ret;
@@ -782,6 +908,20 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
else
num_enq = dst_idx[i + 1] - idx;
+ if (cos[i] != CLS_COS_IDX_NONE) {
+ /* Packets from classifier */
+ cos_t *cos_hdr = _odp_cos_entry_from_idx(cos[i]);
+
+ if (cos_hdr->s.vector.enable) {
+ packet_vector_enq_cos(dst[i], &ev[idx], num_enq, cos_hdr);
+ continue;
+ }
+ } else if (vector_enabled) {
+ /* Packets from inline IPsec */
+ packet_vector_enq(dst[i], &ev[idx], num_enq, pool);
+ continue;
+ }
+
ret = odp_queue_enq_multi(dst[i], &ev[idx], num_enq);
if (ret < 0)
@@ -794,34 +934,99 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
return num_rx;
}
+static inline int packet_vector_send(odp_pktout_queue_t pktout_queue, odp_event_t event)
+{
+ odp_packet_vector_t pktv = odp_packet_vector_from_event(event);
+ odp_packet_t *pkt_tbl;
+ int num, sent;
+
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ ODP_ASSERT(num > 0);
+ sent = odp_pktout_send(pktout_queue, pkt_tbl, num);
+
+ /* Return success if any packets were sent. Free the possible remaining
+ packets in the vector and increase out_discards count accordingly. */
+ if (odp_unlikely(sent <= 0)) {
+ return -1;
+ } else if (odp_unlikely(sent != num)) {
+ pktio_entry_t *entry = get_pktio_entry(pktout_queue.pktio);
+ int discards = num - sent;
+
+ ODP_ASSERT(entry != NULL);
+
+ odp_atomic_add_u64(&entry->s.stats_extra.out_discards, discards);
+ odp_packet_free_multi(&pkt_tbl[sent], discards);
+ }
+
+ odp_packet_vector_free(pktv);
+
+ return 0;
+}
+
static int pktout_enqueue(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
{
+ odp_event_t event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr));
odp_packet_t pkt = packet_from_buf_hdr(buf_hdr);
+ odp_pktout_queue_t pktout_queue;
int len = 1;
int nbr;
- if (sched_fn->ord_enq_multi(queue, (void **)buf_hdr, len, &nbr))
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)buf_hdr, len, &nbr))
return (nbr == len ? 0 : -1);
- nbr = odp_pktout_send(queue_fn->get_pktout(queue), &pkt, len);
+ pktout_queue = _odp_queue_fn->get_pktout(queue);
+
+ if (odp_event_type(event) == ODP_EVENT_PACKET_VECTOR)
+ return packet_vector_send(pktout_queue, event);
+
+ nbr = odp_pktout_send(pktout_queue, &pkt, len);
return (nbr == len ? 0 : -1);
}
static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
int num)
{
+ odp_event_t event;
odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
+ odp_pktout_queue_t pktout_queue;
+ int have_pktv = 0;
int nbr;
int i;
- if (sched_fn->ord_enq_multi(queue, (void **)buf_hdr, num, &nbr))
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)buf_hdr, num, &nbr))
return nbr;
- for (i = 0; i < num; ++i)
+ for (i = 0; i < num; ++i) {
+ event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr[i]));
+
+ if (odp_event_type(event) == ODP_EVENT_PACKET_VECTOR) {
+ have_pktv = 1;
+ break;
+ }
+
pkt_tbl[i] = packet_from_buf_hdr(buf_hdr[i]);
+ }
- nbr = odp_pktout_send(queue_fn->get_pktout(queue), pkt_tbl, num);
- return nbr;
+ pktout_queue = _odp_queue_fn->get_pktout(queue);
+
+ if (!have_pktv)
+ return odp_pktout_send(pktout_queue, pkt_tbl, num);
+
+ for (i = 0; i < num; ++i) {
+ event = odp_buffer_to_event(buf_from_buf_hdr(buf_hdr[i]));
+
+ if (odp_event_type(event) == ODP_EVENT_PACKET_VECTOR) {
+ if (odp_unlikely(packet_vector_send(pktout_queue, event)))
+ break;
+ } else {
+ odp_packet_t pkt = packet_from_buf_hdr(buf_hdr[i]);
+
+ nbr = odp_pktout_send(pktout_queue, &pkt, 1);
+ if (odp_unlikely(nbr != 1))
+ break;
+ }
+ }
+ return i;
}
static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
@@ -829,12 +1034,14 @@ static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
odp_buffer_hdr_t *buf_hdr;
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts;
- odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(queue);
+ odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
int pktin_index = pktin_queue.index;
pktio_entry_t *entry = get_pktio_entry(pktio);
- if (queue_fn->orig_deq_multi(queue, &buf_hdr, 1) == 1)
+ ODP_ASSERT(entry != NULL);
+
+ if (_odp_queue_fn->orig_deq_multi(queue, &buf_hdr, 1) == 1)
return buf_hdr;
pkts = pktin_recv_buf(entry, pktin_index, hdr_tbl, QUEUE_MULTI_MAX);
@@ -869,12 +1076,14 @@ static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
int nbr;
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts, i, j;
- odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(queue);
+ odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
int pktin_index = pktin_queue.index;
pktio_entry_t *entry = get_pktio_entry(pktio);
- nbr = queue_fn->orig_deq_multi(queue, buf_hdr, num);
+ ODP_ASSERT(entry != NULL);
+
+ nbr = _odp_queue_fn->orig_deq_multi(queue, buf_hdr, num);
if (odp_unlikely(nbr > num))
ODP_ABORT("queue_deq_multi req: %d, returned %d\n", num, nbr);
@@ -914,17 +1123,19 @@ static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
return nbr;
}
-int sched_cb_pktin_poll_one(int pktio_index,
- int rx_queue,
- odp_event_t evt_tbl[QUEUE_MULTI_MAX])
+int _odp_sched_cb_pktin_poll_one(int pktio_index,
+ int rx_queue,
+ odp_event_t evt_tbl[QUEUE_MULTI_MAX])
{
int num_rx, num_pkts, i;
pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
odp_packet_t pkt;
odp_packet_hdr_t *pkt_hdr;
- odp_buffer_hdr_t *buf_hdr;
+ odp_pool_t pool = ODP_POOL_INVALID;
odp_packet_t packets[QUEUE_MULTI_MAX];
odp_queue_t queue;
+ odp_bool_t vector_enabled = entry->s.in_queue[rx_queue].vector.enable;
+ uint32_t num = QUEUE_MULTI_MAX;
if (odp_unlikely(entry->s.state != PKTIO_STATE_STARTED)) {
if (entry->s.state < PKTIO_STATE_ACTIVE ||
@@ -935,23 +1146,40 @@ int sched_cb_pktin_poll_one(int pktio_index,
return 0;
}
+ if (vector_enabled) {
+ /* Make sure all packets will fit into a single packet vector */
+ if (entry->s.in_queue[rx_queue].vector.max_size < num)
+ num = entry->s.in_queue[rx_queue].vector.max_size;
+ pool = entry->s.in_queue[rx_queue].vector.pool;
+ }
+
ODP_ASSERT((unsigned int)rx_queue < entry->s.num_in_queue);
- num_pkts = entry->s.ops->recv(entry, rx_queue,
- packets, QUEUE_MULTI_MAX);
+ num_pkts = entry->s.ops->recv(entry, rx_queue, packets, num);
num_rx = 0;
for (i = 0; i < num_pkts; i++) {
pkt = packets[i];
pkt_hdr = packet_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
- int num_enq;
+ odp_event_t event = odp_packet_to_event(pkt);
queue = pkt_hdr->dst_queue;
- buf_hdr = packet_to_buf_hdr(pkt);
- num_enq = odp_queue_enq_multi(queue,
- (odp_event_t *)&buf_hdr,
- 1);
- if (num_enq < 0) {
+
+ if (pkt_hdr->cos != CLS_COS_IDX_NONE) {
+ /* Packets from classifier */
+ cos_t *cos_hdr = _odp_cos_entry_from_idx(pkt_hdr->cos);
+
+ if (cos_hdr->s.vector.enable) {
+ packet_vector_enq_cos(queue, &event, 1, cos_hdr);
+ continue;
+ }
+ } else if (vector_enabled) {
+ /* Packets from inline IPsec */
+ packet_vector_enq(queue, &event, 1, pool);
+ continue;
+ }
+
+ if (odp_unlikely(odp_queue_enq(queue, event))) {
/* Queue full? */
odp_packet_free(pkt);
odp_atomic_inc_u64(&entry->s.stats_extra.in_discards);
@@ -960,11 +1188,24 @@ int sched_cb_pktin_poll_one(int pktio_index,
evt_tbl[num_rx++] = odp_packet_to_event(pkt);
}
}
+
+ /* Create packet vector */
+ if (vector_enabled && num_rx > 0) {
+ odp_packet_vector_t pktv = packet_vector_create((odp_packet_t *)evt_tbl,
+ num_rx, pool);
+
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ return 0;
+
+ evt_tbl[0] = odp_packet_vector_to_event(pktv);
+ return 1;
+ }
+
return num_rx;
}
-int sched_cb_pktin_poll(int pktio_index, int pktin_index,
- odp_buffer_hdr_t *hdr_tbl[], int num)
+int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
+ odp_buffer_hdr_t *hdr_tbl[], int num)
{
pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
int state = entry->s.state;
@@ -981,7 +1222,7 @@ int sched_cb_pktin_poll(int pktio_index, int pktin_index,
return pktin_recv_buf(entry, pktin_index, hdr_tbl, num);
}
-void sched_cb_pktio_stop_finalize(int pktio_index)
+void _odp_sched_cb_pktio_stop_finalize(int pktio_index)
{
int state;
pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
@@ -1495,6 +1736,16 @@ int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
capa->config.pktout.bit.no_packet_refs = 1;
}
+ /* Packet vector generation is common for all pktio types */
+ if (ret == 0 && (entry->s.param.in_mode == ODP_PKTIN_MODE_QUEUE ||
+ entry->s.param.in_mode == ODP_PKTIN_MODE_SCHED)) {
+ capa->vector.supported = ODP_SUPPORT_YES;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.min_size = 1;
+ capa->vector.max_tmo_ns = 0;
+ capa->vector.min_tmo_ns = 0;
+ }
+
return ret;
}
@@ -1525,8 +1776,10 @@ int odp_pktio_stats(odp_pktio_t pktio,
if (entry->s.ops->stats)
ret = entry->s.ops->stats(entry, stats);
- if (odp_likely(ret == 0))
+ if (odp_likely(ret == 0)) {
stats->in_discards += odp_atomic_load_u64(&entry->s.stats_extra.in_discards);
+ stats->out_discards += odp_atomic_load_u64(&entry->s.stats_extra.out_discards);
+ }
unlock_entry(entry);
return ret;
@@ -1552,6 +1805,7 @@ int odp_pktio_stats_reset(odp_pktio_t pktio)
}
odp_atomic_store_u64(&entry->s.stats_extra.in_discards, 0);
+ odp_atomic_store_u64(&entry->s.stats_extra.out_discards, 0);
if (entry->s.ops->stats)
ret = entry->s.ops->stats_reset(entry);
unlock_entry(entry);
@@ -1614,6 +1868,45 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
return -1;
}
+ /* Validate packet vector parameters */
+ if (param->vector.enable) {
+ odp_pool_t pool = param->vector.pool;
+ odp_pool_info_t pool_info;
+
+ if (mode == ODP_PKTIN_MODE_DIRECT) {
+ ODP_ERR("packet vectors not supported with ODP_PKTIN_MODE_DIRECT\n");
+ return -1;
+ }
+ if (param->vector.max_size < capa.vector.min_size) {
+ ODP_ERR("vector.max_size too small %" PRIu32 "\n",
+ param->vector.max_size);
+ return -1;
+ }
+ if (param->vector.max_size > capa.vector.max_size) {
+ ODP_ERR("vector.max_size too large %" PRIu32 "\n",
+ param->vector.max_size);
+ return -1;
+ }
+ if (param->vector.max_tmo_ns > capa.vector.max_tmo_ns) {
+ ODP_ERR("vector.max_tmo_ns too large %" PRIu64 "\n",
+ param->vector.max_tmo_ns);
+ return -1;
+ }
+
+ if (pool == ODP_POOL_INVALID || odp_pool_info(pool, &pool_info)) {
+ ODP_ERR("invalid packet vector pool\n");
+ return -1;
+ }
+ if (pool_info.params.type != ODP_POOL_VECTOR) {
+ ODP_ERR("wrong pool type\n");
+ return -1;
+ }
+ if (param->vector.max_size > pool_info.params.vector.max_size) {
+ ODP_ERR("vector.max_size larger than pool max vector size\n");
+ return -1;
+ }
+ }
+
/* If re-configuring, destroy old queues */
if (entry->s.num_in_queue)
destroy_in_queues(entry, entry->s.num_in_queue);
@@ -1657,12 +1950,12 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
}
if (mode == ODP_PKTIN_MODE_QUEUE) {
- queue_fn->set_pktin(queue, pktio, i);
- queue_fn->set_enq_deq_fn(queue,
- NULL,
- NULL,
- pktin_dequeue,
- pktin_deq_multi);
+ _odp_queue_fn->set_pktin(queue, pktio, i);
+ _odp_queue_fn->set_enq_deq_fn(queue,
+ NULL,
+ NULL,
+ pktin_dequeue,
+ pktin_deq_multi);
}
entry->s.in_queue[i].queue = queue;
@@ -1673,6 +1966,7 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
entry->s.in_queue[i].pktin.index = i;
entry->s.in_queue[i].pktin.pktio = entry->s.handle;
+ entry->s.in_queue[i].vector = param->vector;
}
entry->s.num_in_queue = num_queues;
@@ -1779,14 +2073,14 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
return -1;
}
- queue_fn->set_pktout(queue, pktio, i);
+ _odp_queue_fn->set_pktout(queue, pktio, i);
/* Override default enqueue / dequeue functions */
- queue_fn->set_enq_deq_fn(queue,
- pktout_enqueue,
- pktout_enq_multi,
- NULL,
- NULL);
+ _odp_queue_fn->set_enq_deq_fn(queue,
+ pktout_enqueue,
+ pktout_enq_multi,
+ NULL,
+ NULL);
entry->s.out_queue[i].queue = queue;
}
@@ -2060,9 +2354,9 @@ int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], unsigned int num_q,
if (wait == 0)
return 0;
- ret = sock_recv_mq_tmo_try_int_driven(queues, num_q, &lfrom,
- packets, num, wait,
- &trial_successful);
+ ret = _odp_sock_recv_mq_tmo_try_int_driven(queues, num_q, &lfrom,
+ packets, num, wait,
+ &trial_successful);
if (ret > 0 && from)
*from = lfrom;
if (trial_successful) {
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
new file mode 100644
index 000000000..98f373814
--- /dev/null
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/pool.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_debug_internal.h>
+#include <odp_event_vector_internal.h>
+#include <odp_pool_internal.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <odp/visibility_begin.h>
+
+/* Packet vector header field offsets for inline functions */
+const _odp_event_vector_inline_offset_t _odp_event_vector_inline ODP_ALIGNED_CACHE = {
+ .packet = offsetof(odp_event_vector_hdr_t, packet),
+ .pool = offsetof(odp_event_vector_hdr_t, buf_hdr.pool_ptr),
+ .size = offsetof(odp_event_vector_hdr_t, size)
+};
+
+#include <odp/visibility_end.h>
+
+static inline odp_event_vector_hdr_t *event_vector_hdr_from_buffer(odp_buffer_t buf)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)buf;
+}
+
+odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool)
+{
+ odp_buffer_t buf;
+
+ ODP_ASSERT(pool_entry_from_hdl(pool)->params.type == ODP_POOL_VECTOR);
+
+ buf = odp_buffer_alloc(pool);
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return ODP_PACKET_VECTOR_INVALID;
+
+ ODP_ASSERT(event_vector_hdr_from_buffer(buf)->size == 0);
+
+ return odp_packet_vector_from_event(odp_buffer_to_event(buf));
+}
+
+void odp_packet_vector_free(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+ odp_event_t ev = odp_packet_vector_to_event(pktv);
+
+ pktv_hdr->size = 0;
+
+ odp_buffer_free(odp_buffer_from_event(ev));
+}
+
+int odp_packet_vector_valid(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr;
+ odp_event_t ev;
+ pool_t *pool;
+ uint32_t i;
+
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ return 0;
+
+ if (_odp_buffer_is_valid((odp_buffer_t)pktv) == 0)
+ return 0;
+
+ ev = odp_packet_vector_to_event(pktv);
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET_VECTOR)
+ return 0;
+
+ pktv_hdr = _odp_packet_vector_hdr(pktv);
+ pool = pktv_hdr->buf_hdr.pool_ptr;
+
+ if (odp_unlikely(pktv_hdr->size > pool->params.vector.max_size))
+ return 0;
+
+ for (i = 0; i < pktv_hdr->size; i++) {
+ if (pktv_hdr->packet[i] == ODP_PACKET_INVALID)
+ return 0;
+ }
+
+ return 1;
+}
+
+void odp_packet_vector_print(odp_packet_vector_t pktv)
+{
+ int max_len = 4096;
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+ uint32_t i;
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ len += snprintf(&str[len], n - len, "Packet Vector\n");
+ len += snprintf(&str[len], n - len,
+ " handle %p\n", pktv);
+ len += snprintf(&str[len], n - len,
+ " size %" PRIu32 "\n", pktv_hdr->size);
+
+ for (i = 0; i < pktv_hdr->size; i++) {
+ odp_packet_t pkt = pktv_hdr->packet[i];
+ char seg_str[max_len];
+ int str_len;
+
+ str_len = snprintf(seg_str, max_len,
+ " packet %p len %" PRIu32 "\n",
+ pkt, odp_packet_len(pkt));
+
+ /* Prevent print buffer overflow */
+ if (n - len - str_len < 10) {
+ len += snprintf(&str[len], n - len, " ...\n");
+ break;
+ }
+ len += snprintf(&str[len], n - len, "%s", seg_str);
+ }
+
+ ODP_PRINT("%s\n", str);
+}
+
+uint64_t odp_packet_vector_to_u64(odp_packet_vector_t pktv)
+{
+ return _odp_pri(pktv);
+}
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 236a57f4e..d1fb7c933 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -23,6 +23,7 @@
#include <odp_libconfig_internal.h>
#include <odp_shm_internal.h>
#include <odp_timer_internal.h>
+#include <odp_event_vector_internal.h>
#include <string.h>
#include <stdio.h>
@@ -55,13 +56,6 @@ typedef struct pool_local_t {
pool_cache_t *cache[ODP_CONFIG_POOLS];
int thr_id;
- /* Number of event allocs and frees by this thread. */
- struct {
- uint64_t num_alloc;
- uint64_t num_free;
-
- } stat[ODP_CONFIG_POOLS];
-
} pool_local_t;
pool_global_t *_odp_pool_glb;
@@ -141,6 +135,17 @@ static void cache_flush(pool_cache_t *cache, pool_t *pool)
ring_ptr_enq(ring, mask, buf_hdr);
}
+static inline uint64_t cache_total_available(pool_t *pool)
+{
+ uint64_t cached = 0;
+ int i;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ cached += pool->local_cache[i].cache_num;
+
+ return cached;
+}
+
static int read_config_file(pool_global_t *pool_glb)
{
uint32_t local_cache_size, burst_size, align;
@@ -281,6 +286,9 @@ int _odp_pool_init_global(void)
ODP_DBG("\nPool init global\n");
ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
+ ODP_DBG(" odp_timeout_hdr_t size %zu\n", sizeof(odp_timeout_hdr_t));
+ ODP_DBG(" odp_event_vector_hdr_t size %zu\n", sizeof(odp_event_vector_hdr_t));
+
ODP_DBG("\n");
return 0;
}
@@ -341,18 +349,6 @@ int _odp_pool_term_local(void)
pool_t *pool = pool_entry(i);
cache_flush(local.cache[i], pool);
-
- if (ODP_DEBUG == 1) {
- uint64_t num_alloc = local.stat[i].num_alloc;
- uint64_t num_free = local.stat[i].num_free;
-
- if (num_alloc || num_free) {
- ODP_DBG("Pool[%i] stats: thr %i, "
- "allocs % " PRIu64 ", "
- "frees % " PRIu64 "\n",
- i, local.thr_id, num_alloc, num_free);
- }
- }
}
return 0;
@@ -397,6 +393,7 @@ static void init_buffers(pool_t *pool)
uint64_t i;
odp_buffer_hdr_t *buf_hdr;
odp_packet_hdr_t *pkt_hdr;
+ odp_event_vector_hdr_t *vect_hdr;
odp_shm_info_t shm_info;
void *addr;
void *uarea = NULL;
@@ -423,6 +420,7 @@ static void init_buffers(pool_t *pool)
pool->block_offset];
buf_hdr = addr;
pkt_hdr = addr;
+ vect_hdr = addr;
/* Skip packet buffers which cross huge page boundaries. Some
* NICs cannot handle buffers which cross page boundaries. */
if (pool->params.type == ODP_POOL_PACKET &&
@@ -462,6 +460,8 @@ static void init_buffers(pool_t *pool)
buf_hdr->index.buffer = i;
buf_hdr->type = type;
buf_hdr->event_type = type;
+ if (type == ODP_POOL_VECTOR)
+ buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
buf_hdr->pool_ptr = pool;
buf_hdr->uarea_addr = uarea;
@@ -475,6 +475,10 @@ static void init_buffers(pool_t *pool)
odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
+ /* Initialize event vector metadata */
+ if (type == ODP_POOL_VECTOR)
+ vect_hdr->size = 0;
+
/* Store base values for fast init */
buf_hdr->base_data = &data[offset];
buf_hdr->buf_end = &data[offset + pool->seg_len +
@@ -600,6 +604,12 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
cache_size = params->tmo.cache_size;
break;
+ case ODP_POOL_VECTOR:
+ num = params->vector.num;
+ cache_size = params->vector.cache_size;
+ seg_len = params->vector.max_size * sizeof(odp_packet_t);
+ break;
+
default:
ODP_ERR("Bad pool type\n");
return ODP_POOL_INVALID;
@@ -656,9 +666,12 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t align_pad = (align > ODP_CACHE_LINE_SIZE) ?
align - ODP_CACHE_LINE_SIZE : 0;
- hdr_size = (params->type == ODP_POOL_BUFFER) ?
- ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t)) :
- ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
+ if (params->type == ODP_POOL_BUFFER)
+ hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
+ else if (params->type == ODP_POOL_TIMEOUT)
+ hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
+ else
+ hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
block_size = ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len);
}
@@ -720,6 +733,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
pool->mem_from_huge_pages = shm_is_from_huge_pages(pool->shm);
pool->base_addr = odp_shm_addr(pool->shm);
+ pool->max_addr = pool->base_addr + pool->shm_size - 1;
pool->uarea_shm = ODP_SHM_INVALID;
if (uarea_size) {
@@ -745,6 +759,13 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
goto error;
}
+ /* Reset pool stats */
+ odp_atomic_init_u64(&pool->stats.alloc_ops, 0);
+ odp_atomic_init_u64(&pool->stats.alloc_fails, 0);
+ odp_atomic_init_u64(&pool->stats.free_ops, 0);
+ odp_atomic_init_u64(&pool->stats.cache_alloc_ops, 0);
+ odp_atomic_init_u64(&pool->stats.cache_free_ops, 0);
+
return pool->pool_hdl;
error:
@@ -793,6 +814,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->stats.all & ~capa.buf.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
case ODP_POOL_PACKET:
@@ -839,6 +865,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->stats.all & ~capa.pkt.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
case ODP_POOL_TIMEOUT:
@@ -849,6 +880,43 @@ static int check_params(const odp_pool_param_t *params)
ODP_ERR("tmo.num too large %u\n", params->tmo.num);
return -1;
}
+
+ if (params->stats.all & ~capa.tmo.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
+ break;
+
+ case ODP_POOL_VECTOR:
+ num = params->vector.num;
+ cache_size = params->vector.cache_size;
+
+ if (params->vector.num == 0) {
+ ODP_ERR("vector.num zero\n");
+ return -1;
+ }
+
+ if (params->vector.num > capa.vector.max_num) {
+ ODP_ERR("vector.num too large %u\n", params->vector.num);
+ return -1;
+ }
+
+ if (params->vector.max_size == 0) {
+ ODP_ERR("vector.max_size zero\n");
+ return -1;
+ }
+
+ if (params->vector.max_size > capa.vector.max_size) {
+ ODP_ERR("vector.max_size too large %u\n", params->vector.max_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.vector.stats.all) {
+ ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
default:
@@ -968,7 +1036,7 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
info->pkt.max_num = pool->num;
info->min_data_addr = (uintptr_t)pool->base_addr;
- info->max_data_addr = (uintptr_t)pool->base_addr + pool->shm_size - 1;
+ info->max_data_addr = (uintptr_t)pool->max_addr;
return 0;
}
@@ -986,6 +1054,9 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
/* First pull packets from local cache */
num_ch = cache_pop(cache, buf_hdr, max_num);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_alloc_ops && num_ch)
+ odp_atomic_inc_u64(&pool->stats.cache_alloc_ops);
+
/* If needed, get more from the global pool */
if (odp_unlikely(num_ch != (uint32_t)max_num)) {
uint32_t burst = burst_size;
@@ -1003,6 +1074,13 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
burst);
cache_num = burst - num_deq;
+ if (CONFIG_POOL_STATISTICS) {
+ if (pool->params.stats.bit.alloc_ops)
+ odp_atomic_inc_u64(&pool->stats.alloc_ops);
+ if (odp_unlikely(pool->params.stats.bit.alloc_fails && burst == 0))
+ odp_atomic_inc_u64(&pool->stats.alloc_fails);
+ }
+
if (odp_unlikely(burst < num_deq)) {
num_deq = burst;
cache_num = 0;
@@ -1023,9 +1101,6 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
num_alloc = num_ch + num_deq;
- if (ODP_DEBUG == 1)
- local.stat[pool_idx].num_alloc += num_alloc;
-
return num_alloc;
}
@@ -1038,9 +1113,6 @@ static inline void buffer_free_to_pool(pool_t *pool,
uint32_t cache_num, mask;
uint32_t cache_size = pool->cache_size;
- if (ODP_DEBUG == 1)
- local.stat[pool_idx].num_free += num;
-
/* Special case of a very large free. Move directly to
* the global pool. */
if (odp_unlikely(num > (int)cache_size)) {
@@ -1049,6 +1121,9 @@ static inline void buffer_free_to_pool(pool_t *pool,
ring_ptr_enq_multi(ring, mask, (void **)buf_hdr, num);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
+ odp_atomic_inc_u64(&pool->stats.free_ops);
+
return;
}
@@ -1072,9 +1147,13 @@ static inline void buffer_free_to_pool(pool_t *pool,
cache_pop(cache, buf_hdr, burst);
ring_ptr_enq_multi(ring, mask, (void **)buf_hdr, burst);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
+ odp_atomic_inc_u64(&pool->stats.free_ops);
}
cache_push(cache, buf_hdr, num);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_free_ops)
+ odp_atomic_inc_u64(&pool->stats.cache_free_ops);
}
void buffer_free_multi(odp_buffer_hdr_t *buf_hdr[], int num_total)
@@ -1147,6 +1226,7 @@ void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
int odp_pool_capability(odp_pool_capability_t *capa)
{
+ odp_pool_stats_opt_t supported_stats;
uint32_t max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
/* Reserve one for internal usage */
int max_pools = ODP_CONFIG_POOLS - 1;
@@ -1155,6 +1235,16 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->max_pools = max_pools;
+ supported_stats.all = 0;
+ supported_stats.bit.available = 1;
+ supported_stats.bit.alloc_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.alloc_fails = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.free_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.total_ops = 0;
+ supported_stats.bit.cache_available = 1;
+ supported_stats.bit.cache_alloc_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.cache_free_ops = CONFIG_POOL_STATISTICS;
+
/* Buffer pools */
capa->buf.max_pools = max_pools;
capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
@@ -1162,6 +1252,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
capa->buf.min_cache_size = 0;
capa->buf.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->buf.stats.all = supported_stats.all;
/* Packet pools */
capa->pkt.max_pools = max_pools;
@@ -1177,13 +1268,22 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->pkt.max_uarea_size = MAX_SIZE;
capa->pkt.min_cache_size = 0;
capa->pkt.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->pkt.stats.all = supported_stats.all;
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
capa->tmo.min_cache_size = 0;
capa->tmo.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
-
+ capa->tmo.stats.all = supported_stats.all;
+
+ /* Vector pools */
+ capa->vector.max_pools = max_pools;
+ capa->vector.max_num = CONFIG_POOL_MAX_NUM;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.min_cache_size = 0;
+ capa->vector.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->vector.stats.all = supported_stats.all;
return 0;
}
@@ -1202,7 +1302,8 @@ void odp_pool_print(odp_pool_t pool_hdl)
pool->params.type == ODP_POOL_BUFFER ? "buffer" :
(pool->params.type == ODP_POOL_PACKET ? "packet" :
(pool->params.type == ODP_POOL_TIMEOUT ? "timeout" :
- "unknown")));
+ (pool->params.type == ODP_POOL_VECTOR ? "vector" :
+ "unknown"))));
ODP_PRINT(" pool shm %" PRIu64 "\n",
odp_shm_to_u64(pool->shm));
ODP_PRINT(" user area shm %" PRIu64 "\n",
@@ -1217,6 +1318,7 @@ void odp_pool_print(odp_pool_t pool_hdl)
ODP_PRINT(" uarea size %u\n", pool->uarea_size);
ODP_PRINT(" shm size %" PRIu64 "\n", pool->shm_size);
ODP_PRINT(" base addr %p\n", pool->base_addr);
+ ODP_PRINT(" max addr %p\n", pool->max_addr);
ODP_PRINT(" uarea shm size %" PRIu64 "\n", pool->uarea_shm_size);
ODP_PRINT(" uarea base addr %p\n", pool->uarea_base_addr);
ODP_PRINT(" cache size %u\n", pool->cache_size);
@@ -1240,6 +1342,7 @@ void odp_pool_param_init(odp_pool_param_t *params)
params->buf.cache_size = default_cache_size;
params->pkt.cache_size = default_cache_size;
params->tmo.cache_size = default_cache_size;
+ params->vector.cache_size = default_cache_size;
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
@@ -1247,19 +1350,129 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl)
return _odp_pri(hdl);
}
-int odp_buffer_is_valid(odp_buffer_t buf)
+unsigned int odp_pool_max_index(void)
+{
+ return ODP_CONFIG_POOLS - 1;
+}
+
+int odp_pool_index(odp_pool_t pool_hdl)
{
pool_t *pool;
+ ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ return pool->pool_idx;
+}
+
+int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ memset(stats, 0, sizeof(odp_pool_stats_t));
+
+ if (pool->params.stats.bit.available)
+ stats->available = ring_ptr_len(&pool->ring->hdr);
+
+ if (pool->params.stats.bit.alloc_ops)
+ stats->alloc_ops = odp_atomic_load_u64(&pool->stats.alloc_ops);
+
+ if (pool->params.stats.bit.alloc_fails)
+ stats->alloc_fails = odp_atomic_load_u64(&pool->stats.alloc_fails);
+
+ if (pool->params.stats.bit.free_ops)
+ stats->free_ops = odp_atomic_load_u64(&pool->stats.free_ops);
+
+ if (pool->params.stats.bit.cache_available)
+ stats->cache_available = cache_total_available(pool);
+
+ if (pool->params.stats.bit.cache_alloc_ops)
+ stats->cache_alloc_ops = odp_atomic_load_u64(&pool->stats.cache_alloc_ops);
+
+ if (pool->params.stats.bit.cache_free_ops)
+ stats->cache_free_ops = odp_atomic_load_u64(&pool->stats.cache_free_ops);
+
+ return 0;
+}
+
+int odp_pool_stats_reset(odp_pool_t pool_hdl)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ odp_atomic_store_u64(&pool->stats.alloc_ops, 0);
+ odp_atomic_store_u64(&pool->stats.alloc_fails, 0);
+ odp_atomic_store_u64(&pool->stats.free_ops, 0);
+ odp_atomic_store_u64(&pool->stats.cache_alloc_ops, 0);
+ odp_atomic_store_u64(&pool->stats.cache_free_ops, 0);
+
+ return 0;
+}
+
+static pool_t *find_pool(odp_buffer_hdr_t *buf_hdr)
+{
+ int i;
+ uint8_t *ptr = (uint8_t *)buf_hdr;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool_t *pool = pool_entry(i);
+
+ if (pool->reserved == 0)
+ continue;
+
+ if (ptr >= pool->base_addr && ptr < pool->max_addr)
+ return pool;
+ }
+
+ return NULL;
+}
+
+int _odp_buffer_is_valid(odp_buffer_t buf)
+{
+ pool_t *pool;
+ odp_buffer_hdr_t *buf_hdr = buf_hdl_to_hdr(buf);
+
if (buf == ODP_BUFFER_INVALID)
return 0;
- pool = pool_from_buf(buf);
+ /* Check that buffer header is from a known pool */
+ pool = find_pool(buf_hdr);
+ if (pool == NULL)
+ return 0;
+
+ if (pool != buf_hdr->pool_ptr)
+ return 0;
- if (pool->pool_idx >= ODP_CONFIG_POOLS)
+ if (buf_hdr->index.buffer >= (pool->num + pool->skipped_blocks))
+ return 0;
+
+ return 1;
+}
+
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ if (_odp_buffer_is_valid(buf) == 0)
return 0;
- if (pool->reserved == 0)
+ if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
return 0;
return 1;
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
index 537e2654d..0ffad0807 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -42,7 +42,7 @@
static int queue_init(queue_entry_t *queue, const char *name,
const odp_queue_param_t *param);
-queue_global_t *queue_glb;
+queue_global_t *_odp_queue_glb;
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
@@ -52,16 +52,16 @@ static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
/* Reserve some queues for internal use */
capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
- capa->plain.max_size = queue_glb->config.max_queue_size;
- capa->plain.lockfree.max_num = queue_glb->queue_lf_num;
- capa->plain.lockfree.max_size = queue_glb->queue_lf_size;
+ capa->plain.max_size = _odp_queue_glb->config.max_queue_size;
+ capa->plain.lockfree.max_num = _odp_queue_glb->queue_lf_num;
+ capa->plain.lockfree.max_size = _odp_queue_glb->queue_lf_size;
#if ODP_DEPRECATED_API
capa->sched.max_num = CONFIG_MAX_SCHED_QUEUES;
- capa->sched.max_size = queue_glb->config.max_queue_size;
+ capa->sched.max_size = _odp_queue_glb->config.max_queue_size;
if (sched) {
- capa->max_ordered_locks = sched_fn->max_ordered_locks();
- capa->max_sched_groups = sched_fn->num_grps();
+ capa->max_ordered_locks = _odp_sched_fn->max_ordered_locks();
+ capa->max_sched_groups = _odp_sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
}
#endif
@@ -69,7 +69,7 @@ static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
return 0;
}
-static int read_config_file(queue_global_t *queue_glb)
+static int read_config_file(queue_global_t *_odp_queue_glb)
{
const char *str;
uint32_t val_u32;
@@ -91,7 +91,7 @@ static int read_config_file(queue_global_t *queue_glb)
return -1;
}
- queue_glb->config.max_queue_size = val_u32;
+ _odp_queue_glb->config.max_queue_size = val_u32;
ODP_PRINT(" %s: %u\n", str, val_u32);
str = "queue_basic.default_queue_size";
@@ -102,14 +102,14 @@ static int read_config_file(queue_global_t *queue_glb)
val_u32 = val;
- if (val_u32 > queue_glb->config.max_queue_size ||
+ if (val_u32 > _odp_queue_glb->config.max_queue_size ||
val_u32 < MIN_QUEUE_SIZE ||
!CHECK_IS_POWER2(val_u32)) {
ODP_ERR("Bad value %s = %u\n", str, val_u32);
return -1;
}
- queue_glb->config.default_queue_size = val_u32;
+ _odp_queue_glb->config.default_queue_size = val_u32;
ODP_PRINT(" %s: %u\n\n", str, val_u32);
return 0;
@@ -139,9 +139,9 @@ static int queue_init_global(void)
if (shm == ODP_SHM_INVALID)
return -1;
- queue_glb = odp_shm_addr(shm);
+ _odp_queue_glb = odp_shm_addr(shm);
- memset(queue_glb, 0, sizeof(queue_global_t));
+ memset(_odp_queue_glb, 0, sizeof(queue_global_t));
for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
/* init locks */
@@ -151,30 +151,30 @@ static int queue_init_global(void)
queue->s.handle = (odp_queue_t)queue;
}
- if (read_config_file(queue_glb)) {
+ if (read_config_file(_odp_queue_glb)) {
odp_shm_free(shm);
return -1;
}
- queue_glb->queue_gbl_shm = shm;
+ _odp_queue_glb->queue_gbl_shm = shm;
mem_size = sizeof(uint32_t) * CONFIG_MAX_QUEUES *
- (uint64_t)queue_glb->config.max_queue_size;
+ (uint64_t)_odp_queue_glb->config.max_queue_size;
shm = odp_shm_reserve("_odp_queue_rings", mem_size,
ODP_CACHE_LINE_SIZE,
0);
if (shm == ODP_SHM_INVALID) {
- odp_shm_free(queue_glb->queue_gbl_shm);
+ odp_shm_free(_odp_queue_glb->queue_gbl_shm);
return -1;
}
- queue_glb->queue_ring_shm = shm;
- queue_glb->ring_data = odp_shm_addr(shm);
+ _odp_queue_glb->queue_ring_shm = shm;
+ _odp_queue_glb->ring_data = odp_shm_addr(shm);
- lf_func = &queue_glb->queue_lf_func;
- queue_glb->queue_lf_num = queue_lf_init_global(&lf_size, lf_func);
- queue_glb->queue_lf_size = lf_size;
+ lf_func = &_odp_queue_glb->queue_lf_func;
+ _odp_queue_glb->queue_lf_num = _odp_queue_lf_init_global(&lf_size, lf_func);
+ _odp_queue_glb->queue_lf_size = lf_size;
queue_capa(&capa, 0);
@@ -214,14 +214,14 @@ static int queue_term_global(void)
UNLOCK(queue);
}
- queue_lf_term_global();
+ _odp_queue_lf_term_global();
- if (odp_shm_free(queue_glb->queue_ring_shm)) {
+ if (odp_shm_free(_odp_queue_glb->queue_ring_shm)) {
ODP_ERR("shm free failed");
ret = -1;
}
- if (odp_shm_free(queue_glb->queue_gbl_shm)) {
+ if (odp_shm_free(_odp_queue_glb->queue_gbl_shm)) {
ODP_ERR("shm free failed");
ret = -1;
}
@@ -289,13 +289,13 @@ static odp_queue_t queue_create(const char *name,
}
if (param->nonblocking == ODP_BLOCKING) {
- if (param->size > queue_glb->config.max_queue_size)
+ if (param->size > _odp_queue_glb->config.max_queue_size)
return ODP_QUEUE_INVALID;
} else if (param->nonblocking == ODP_NONBLOCKING_LF) {
/* Only plain type lock-free queues supported */
if (type != ODP_QUEUE_TYPE_PLAIN)
return ODP_QUEUE_INVALID;
- if (param->size > queue_glb->queue_lf_size)
+ if (param->size > _odp_queue_glb->queue_lf_size)
return ODP_QUEUE_INVALID;
} else {
/* Wait-free queues not supported */
@@ -330,9 +330,9 @@ static odp_queue_t queue_create(const char *name,
param->nonblocking == ODP_NONBLOCKING_LF) {
queue_lf_func_t *lf_fn;
- lf_fn = &queue_glb->queue_lf_func;
+ lf_fn = &_odp_queue_glb->queue_lf_func;
- queue_lf = queue_lf_create(queue);
+ queue_lf = _odp_queue_lf_create(queue);
if (queue_lf == NULL) {
UNLOCK(queue);
@@ -363,8 +363,8 @@ static odp_queue_t queue_create(const char *name,
return ODP_QUEUE_INVALID;
if (type == ODP_QUEUE_TYPE_SCHED) {
- if (sched_fn->create_queue(queue->s.index,
- &queue->s.param.sched)) {
+ if (_odp_sched_fn->create_queue(queue->s.index,
+ &queue->s.param.sched)) {
queue->s.status = QUEUE_STATUS_FREE;
ODP_ERR("schedule queue init failed\n");
return ODP_QUEUE_INVALID;
@@ -374,7 +374,7 @@ static odp_queue_t queue_create(const char *name,
return handle;
}
-void sched_queue_set_status(uint32_t queue_index, int status)
+void _odp_sched_queue_set_status(uint32_t queue_index, int status)
{
queue_entry_t *queue = qentry_from_index(queue_index);
@@ -425,7 +425,7 @@ static int queue_destroy(odp_queue_t handle)
break;
case QUEUE_STATUS_NOTSCHED:
queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue->s.index);
+ _odp_sched_fn->destroy_queue(queue->s.index);
break;
case QUEUE_STATUS_SCHED:
/* Queue is still in scheduling */
@@ -436,7 +436,7 @@ static int queue_destroy(odp_queue_t handle)
}
if (queue->s.queue_lf)
- queue_lf_destroy(queue->s.queue_lf);
+ _odp_queue_lf_destroy(queue->s.queue_lf);
UNLOCK(queue);
@@ -506,7 +506,7 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_mpmc = &queue->s.ring_mpmc;
- if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
return ret;
buffer_index_from_buf(buf_idx, buf_hdr, num);
@@ -748,7 +748,7 @@ static void queue_print(odp_queue_t handle)
if (queue->s.queue_lf) {
ODP_PRINT(" implementation queue_lf\n");
ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
- queue_lf_length(queue->s.queue_lf), queue_lf_max_length());
+ _odp_queue_lf_length(queue->s.queue_lf), _odp_queue_lf_max_length());
} else if (queue->s.spsc) {
ODP_PRINT(" implementation ring_spsc\n");
ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
@@ -780,7 +780,7 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
queue = qentry_from_handle(handle);
ring_st = &queue->s.ring_st;
- if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
return ret;
buffer_index_from_buf(buf_idx, buf_hdr, num);
@@ -803,14 +803,14 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
UNLOCK(queue);
/* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
+ if (sched && _odp_sched_fn->sched_queue(queue->s.index))
ODP_ABORT("schedule_queue failed\n");
return num_enq;
}
-int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
- int update_status)
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
+ int update_status)
{
int num_deq, status;
ring_st_t *ring_st;
@@ -828,7 +828,7 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
* Inform scheduler about a destroyed queue. */
if (queue->s.status == QUEUE_STATUS_DESTROYED) {
queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue_index);
+ _odp_sched_fn->destroy_queue(queue_index);
}
UNLOCK(queue);
@@ -873,7 +873,7 @@ static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-int sched_queue_empty(uint32_t queue_index)
+int _odp_sched_queue_empty(uint32_t queue_index)
{
queue_entry_t *queue = qentry_from_index(queue_index);
int ret = 0;
@@ -916,7 +916,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
}
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
+ if (queue->s.param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
return -1;
if (queue_type == ODP_QUEUE_TYPE_SCHED)
@@ -930,7 +930,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue_size = param->size;
if (queue_size == 0)
- queue_size = queue_glb->config.default_queue_size;
+ queue_size = _odp_queue_glb->config.default_queue_size;
if (queue_size < MIN_QUEUE_SIZE)
queue_size = MIN_QUEUE_SIZE;
@@ -938,12 +938,12 @@ static int queue_init(queue_entry_t *queue, const char *name,
/* Round up if not already a power of two */
queue_size = ROUNDUP_POWER2_U32(queue_size);
- if (queue_size > queue_glb->config.max_queue_size) {
+ if (queue_size > _odp_queue_glb->config.max_queue_size) {
ODP_ERR("Too large queue size %u\n", queue_size);
return -1;
}
- offset = queue->s.index * (uint64_t)queue_glb->config.max_queue_size;
+ offset = queue->s.index * (uint64_t)_odp_queue_glb->config.max_queue_size;
/* Single-producer / single-consumer plain queue has simple and
* lock-free implementation */
@@ -962,7 +962,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.orig_dequeue_multi = error_dequeue_multi;
if (spsc) {
- queue_spsc_init(queue, queue_size);
+ _odp_queue_spsc_init(queue, queue_size);
} else {
if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
queue->s.enqueue = plain_queue_enq;
@@ -971,7 +971,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.dequeue_multi = plain_queue_deq_multi;
queue->s.orig_dequeue_multi = plain_queue_deq_multi;
- queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_data = &_odp_queue_glb->ring_data[offset];
queue->s.ring_mask = queue_size - 1;
ring_mpmc_init(&queue->s.ring_mpmc);
@@ -979,7 +979,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.enqueue = sched_queue_enq;
queue->s.enqueue_multi = sched_queue_enq_multi;
- queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_data = &_odp_queue_glb->ring_data[offset];
queue->s.ring_mask = queue_size - 1;
ring_st_init(&queue->s.ring_st);
}
@@ -1119,7 +1119,7 @@ static odp_event_t queue_api_deq(odp_queue_t handle)
}
/* API functions */
-_odp_queue_api_fn_t queue_basic_api = {
+_odp_queue_api_fn_t _odp_queue_basic_api = {
.queue_create = queue_create,
.queue_destroy = queue_destroy,
.queue_lookup = queue_lookup,
@@ -1141,7 +1141,7 @@ _odp_queue_api_fn_t queue_basic_api = {
};
/* Functions towards internal components */
-queue_fn_t queue_basic_fn = {
+queue_fn_t _odp_queue_basic_fn = {
.init_global = queue_init_global,
.term_global = queue_term_global,
.init_local = queue_init_local,
diff --git a/platform/linux-generic/odp_queue_if.c b/platform/linux-generic/odp_queue_if.c
index 7de06faa3..d4b1c550c 100644
--- a/platform/linux-generic/odp_queue_if.c
+++ b/platform/linux-generic/odp_queue_if.c
@@ -23,13 +23,13 @@ const _odp_queue_api_fn_t *_odp_queue_api;
#include <odp/visibility_end.h>
-extern const _odp_queue_api_fn_t queue_scalable_api;
-extern const queue_fn_t queue_scalable_fn;
+extern const _odp_queue_api_fn_t _odp_queue_scalable_api;
+extern const queue_fn_t _odp_queue_scalable_fn;
-extern const _odp_queue_api_fn_t queue_basic_api;
-extern const queue_fn_t queue_basic_fn;
+extern const _odp_queue_api_fn_t _odp_queue_basic_api;
+extern const queue_fn_t _odp_queue_basic_fn;
-const queue_fn_t *queue_fn;
+const queue_fn_t *_odp_queue_fn;
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
{
@@ -109,20 +109,20 @@ int _odp_queue_init_global(void)
sched = _ODP_SCHEDULE_DEFAULT;
if (!strcmp(sched, "basic") || !strcmp(sched, "sp")) {
- queue_fn = &queue_basic_fn;
- _odp_queue_api = &queue_basic_api;
+ _odp_queue_fn = &_odp_queue_basic_fn;
+ _odp_queue_api = &_odp_queue_basic_api;
} else if (!strcmp(sched, "scalable")) {
- queue_fn = &queue_scalable_fn;
- _odp_queue_api = &queue_scalable_api;
+ _odp_queue_fn = &_odp_queue_scalable_fn;
+ _odp_queue_api = &_odp_queue_scalable_api;
} else {
ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
return -1;
}
- return queue_fn->init_global();
+ return _odp_queue_fn->init_global();
}
int _odp_queue_term_global(void)
{
- return queue_fn->term_global();
+ return _odp_queue_fn->term_global();
}
diff --git a/platform/linux-generic/odp_queue_lf.c b/platform/linux-generic/odp_queue_lf.c
index 7d8893c86..82b95c34d 100644
--- a/platform/linux-generic/odp_queue_lf.c
+++ b/platform/linux-generic/odp_queue_lf.c
@@ -302,8 +302,8 @@ static int queue_lf_deq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
return 1;
}
-uint32_t queue_lf_init_global(uint32_t *queue_lf_size,
- queue_lf_func_t *lf_func)
+uint32_t _odp_queue_lf_init_global(uint32_t *queue_lf_size,
+ queue_lf_func_t *lf_func)
{
odp_shm_t shm;
int lockfree;
@@ -339,7 +339,7 @@ uint32_t queue_lf_init_global(uint32_t *queue_lf_size,
return QUEUE_LF_NUM;
}
-void queue_lf_term_global(void)
+void _odp_queue_lf_term_global(void)
{
odp_shm_t shm;
@@ -362,7 +362,7 @@ static void init_queue(queue_lf_t *queue_lf)
atomic_zero_u128(&queue_lf->node[i].u128);
}
-void *queue_lf_create(queue_entry_t *queue)
+void *_odp_queue_lf_create(queue_entry_t *queue)
{
int i;
queue_lf_t *queue_lf = NULL;
@@ -388,14 +388,14 @@ void *queue_lf_create(queue_entry_t *queue)
return queue_lf;
}
-void queue_lf_destroy(void *queue_lf_ptr)
+void _odp_queue_lf_destroy(void *queue_lf_ptr)
{
queue_lf_t *queue_lf = queue_lf_ptr;
queue_lf->used = 0;
}
-uint32_t queue_lf_length(void *queue_lf_ptr)
+uint32_t _odp_queue_lf_length(void *queue_lf_ptr)
{
queue_lf_t *queue_lf = queue_lf_ptr;
ring_lf_node_t node_val;
@@ -410,7 +410,7 @@ uint32_t queue_lf_length(void *queue_lf_ptr)
return num;
}
-uint32_t queue_lf_max_length(void)
+uint32_t _odp_queue_lf_max_length(void)
{
return RING_LF_SIZE;
}
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c
index d601fb477..02bf9fbc2 100644
--- a/platform/linux-generic/odp_queue_scalable.c
+++ b/platform/linux-generic/odp_queue_scalable.c
@@ -43,7 +43,7 @@
#define UNLOCK(a) odp_ticketlock_unlock(a)
#define LOCK_INIT(a) odp_ticketlock_init(a)
-extern __thread sched_scalable_thread_state_t *sched_ts;
+extern __thread sched_scalable_thread_state_t *_odp_sched_ts;
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
typedef struct queue_table_t {
@@ -65,7 +65,7 @@ static queue_entry_t *get_qentry(uint32_t queue_id)
return &queue_tbl->queue[queue_id];
}
-queue_entry_t *qentry_from_ext(odp_queue_t handle)
+queue_entry_t *_odp_qentry_from_ext(odp_queue_t handle)
{
return (queue_entry_t *)(uintptr_t)handle;
}
@@ -168,8 +168,8 @@ static int queue_init(queue_entry_t *queue, const char *name,
if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
sched_elem->rwin =
- rwin_alloc(queue_shm_pool,
- queue->s.param.sched.lock_count);
+ _odp_rwin_alloc(queue_shm_pool,
+ queue->s.param.sched.lock_count);
if (sched_elem->rwin == NULL) {
ODP_ERR("Reorder window not created\n");
goto rwin_create_failed;
@@ -178,7 +178,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
sched_elem->sched_grp = param->sched.group;
sched_elem->sched_prio = prio;
sched_elem->schedq =
- sched_queue_add(param->sched.group, prio);
+ _odp_sched_queue_add(param->sched.group, prio);
ODP_ASSERT(sched_elem->schedq != NULL);
}
@@ -313,8 +313,8 @@ static int queue_capability(odp_queue_capability_t *capa)
/* Reserve some queues for internal use */
capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
#if ODP_DEPRECATED_API
- capa->max_ordered_locks = sched_fn->max_ordered_locks();
- capa->max_sched_groups = sched_fn->num_grps();
+ capa->max_ordered_locks = _odp_sched_fn->max_ordered_locks();
+ capa->max_sched_groups = _odp_sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
capa->sched.max_num = CONFIG_MAX_SCHED_QUEUES;
capa->sched.max_size = 0;
@@ -327,27 +327,27 @@ static int queue_capability(odp_queue_capability_t *capa)
static odp_queue_type_t queue_type(odp_queue_t handle)
{
- return qentry_from_ext(handle)->s.type;
+ return _odp_qentry_from_ext(handle)->s.type;
}
static odp_schedule_sync_t queue_sched_type(odp_queue_t handle)
{
- return qentry_from_ext(handle)->s.param.sched.sync;
+ return _odp_qentry_from_ext(handle)->s.param.sched.sync;
}
static odp_schedule_prio_t queue_sched_prio(odp_queue_t handle)
{
- return qentry_from_ext(handle)->s.param.sched.prio;
+ return _odp_qentry_from_ext(handle)->s.param.sched.prio;
}
static odp_schedule_group_t queue_sched_group(odp_queue_t handle)
{
- return qentry_from_ext(handle)->s.param.sched.group;
+ return _odp_qentry_from_ext(handle)->s.param.sched.group;
}
static uint32_t queue_lock_count(odp_queue_t handle)
{
- queue_entry_t *queue = qentry_from_ext(handle);
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
return queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
queue->s.param.sched.lock_count : 0;
@@ -419,7 +419,7 @@ static int queue_destroy(odp_queue_t handle)
if (handle == ODP_QUEUE_INVALID)
return -1;
- queue = qentry_from_ext(handle);
+ queue = _odp_qentry_from_ext(handle);
LOCK(&queue->s.lock);
if (queue->s.status != QUEUE_STATUS_READY) {
UNLOCK(&queue->s.lock);
@@ -469,14 +469,14 @@ static int queue_destroy(odp_queue_t handle)
}
if (q->schedq != NULL) {
- sched_queue_rem(q->sched_grp, q->sched_prio);
+ _odp_sched_queue_rem(q->sched_grp, q->sched_prio);
q->schedq = NULL;
}
_odp_ishm_pool_free(queue_shm_pool, q->prod_ring);
if (q->rwin != NULL) {
- if (rwin_free(queue_shm_pool, q->rwin) < 0) {
+ if (_odp_rwin_free(queue_shm_pool, q->rwin) < 0) {
ODP_ERR("Failed to free reorder window\n");
UNLOCK(&queue->s.lock);
return -1;
@@ -492,7 +492,7 @@ static int queue_context_set(odp_queue_t handle, void *context,
uint32_t len ODP_UNUSED)
{
odp_mb_full();
- qentry_from_ext(handle)->s.param.context = context;
+ _odp_qentry_from_ext(handle)->s.param.context = context;
odp_mb_full();
return 0;
}
@@ -642,10 +642,10 @@ static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
sched_scalable_thread_state_t *ts;
queue = qentry_from_int(handle);
- ts = sched_ts;
+ ts = _odp_sched_ts;
if (ts && odp_unlikely(ts->out_of_order) &&
(queue->s.param.order == ODP_QUEUE_ORDER_KEEP)) {
- actual = rctx_save(queue, buf_hdr, num);
+ actual = _odp_rctx_save(queue, buf_hdr, num);
return actual;
}
@@ -659,9 +659,9 @@ static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
if (odp_likely(queue->s.sched_elem.schedq != NULL && actual != 0)) {
/* Perform scheduler related updates. */
#ifdef CONFIG_QSCHST_LOCK
- sched_update_enq_sp(&queue->s.sched_elem, actual);
+ _odp_sched_update_enq_sp(&queue->s.sched_elem, actual);
#else
- sched_update_enq(&queue->s.sched_elem, actual);
+ _odp_sched_update_enq(&queue->s.sched_elem, actual);
#endif
}
@@ -686,7 +686,7 @@ static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
- queue = qentry_from_ext(handle);
+ queue = _odp_qentry_from_ext(handle);
for (i = 0; i < num; i++)
buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
@@ -699,7 +699,7 @@ static int queue_enq(odp_queue_t handle, odp_event_t ev)
odp_buffer_hdr_t *buf_hdr;
queue_entry_t *queue;
- queue = qentry_from_ext(handle);
+ queue = _odp_qentry_from_ext(handle);
buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
return queue->s.enqueue(handle, buf_hdr);
@@ -867,7 +867,7 @@ static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
- queue = qentry_from_ext(handle);
+ queue = _odp_qentry_from_ext(handle);
ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
@@ -880,7 +880,7 @@ static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
static odp_event_t queue_deq(odp_queue_t handle)
{
- queue_entry_t *queue = qentry_from_ext(handle);
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
odp_event_t ev = (odp_event_t)queue->s.dequeue(handle);
if (odp_global_rw->inline_timers &&
@@ -1068,20 +1068,20 @@ static int queue_orig_multi(odp_queue_t handle,
static void queue_timer_add(odp_queue_t handle)
{
- queue_entry_t *queue = qentry_from_ext(handle);
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
odp_atomic_inc_u64(&queue->s.num_timers);
}
static void queue_timer_rem(odp_queue_t handle)
{
- queue_entry_t *queue = qentry_from_ext(handle);
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
odp_atomic_dec_u64(&queue->s.num_timers);
}
/* API functions */
-_odp_queue_api_fn_t queue_scalable_api = {
+_odp_queue_api_fn_t _odp_queue_scalable_api = {
.queue_create = queue_create,
.queue_destroy = queue_destroy,
.queue_lookup = queue_lookup,
@@ -1103,7 +1103,7 @@ _odp_queue_api_fn_t queue_scalable_api = {
};
/* Functions towards internal components */
-queue_fn_t queue_scalable_fn = {
+queue_fn_t _odp_queue_scalable_fn = {
.init_global = queue_init_global,
.term_global = queue_term_global,
.init_local = queue_init_local,
diff --git a/platform/linux-generic/odp_queue_spsc.c b/platform/linux-generic/odp_queue_spsc.c
index 8bf2c3ddc..92f16e657 100644
--- a/platform/linux-generic/odp_queue_spsc.c
+++ b/platform/linux-generic/odp_queue_spsc.c
@@ -116,7 +116,7 @@ static odp_buffer_hdr_t *queue_spsc_deq(odp_queue_t handle)
return NULL;
}
-void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
{
uint64_t offset;
@@ -126,9 +126,9 @@ void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
queue->s.dequeue_multi = queue_spsc_deq_multi;
queue->s.orig_dequeue_multi = queue_spsc_deq_multi;
- offset = queue->s.index * (uint64_t)queue_glb->config.max_queue_size;
+ offset = queue->s.index * (uint64_t)_odp_queue_glb->config.max_queue_size;
- queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_data = &_odp_queue_glb->ring_data[offset];
queue->s.ring_mask = queue_size - 1;
ring_spsc_init(&queue->s.ring_spsc);
}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 423b50d95..67a3f50cc 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -479,7 +479,7 @@ static int schedule_term_global(void)
odp_event_t events[1];
int num;
- num = sched_queue_deq(qi, events, 1, 1);
+ num = _odp_sched_queue_deq(qi, events, 1, 1);
if (num > 0)
ODP_ERR("Queue not empty\n");
@@ -580,6 +580,11 @@ static int schedule_create_queue(uint32_t queue_index,
return -1;
}
+ if (sched_param->group < 0 || sched_param->group >= NUM_SCHED_GRPS) {
+ ODP_ERR("Bad schedule group\n");
+ return -1;
+ }
+
odp_spinlock_lock(&sched->mask_lock);
/* update scheduler prio queue usage status */
@@ -669,7 +674,7 @@ static void schedule_pktio_start(int pktio_index, int num_pktin,
ODP_ASSERT(pktin_idx[i] <= MAX_PKTIN_INDEX);
/* Start polling */
- sched_queue_set_status(qi, QUEUE_STATUS_SCHED);
+ _odp_sched_queue_set_status(qi, QUEUE_STATUS_SCHED);
schedule_sched_queue(qi);
}
}
@@ -800,7 +805,7 @@ static int schedule_term_local(void)
static void schedule_config_init(odp_schedule_config_t *config)
{
config->num_queues = CONFIG_MAX_SCHED_QUEUES;
- config->queue_size = queue_glb->config.max_queue_size;
+ config->queue_size = _odp_queue_glb->config.max_queue_size;
}
static int schedule_config(const odp_schedule_config_t *config)
@@ -907,7 +912,7 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
pktio_index = sched->queue[qi].pktio_index;
pktin_index = sched->queue[qi].pktin_index;
- num = sched_cb_pktin_poll(pktio_index, pktin_index, hdr_tbl, max_num);
+ num = _odp_sched_cb_pktin_poll(pktio_index, pktin_index, hdr_tbl, max_num);
if (num == 0)
return 0;
@@ -920,10 +925,10 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
num_pktin = sched->pktio[pktio_index].num_pktin;
odp_spinlock_unlock(&sched->pktio_lock);
- sched_queue_set_status(qi, QUEUE_STATUS_NOTSCHED);
+ _odp_sched_queue_set_status(qi, QUEUE_STATUS_NOTSCHED);
if (num_pktin == 0)
- sched_cb_pktio_stop_finalize(pktio_index);
+ _odp_sched_cb_pktio_stop_finalize(pktio_index);
return num;
}
@@ -1023,7 +1028,7 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
pktin = queue_is_pktin(qi);
- num = sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
+ num = _odp_sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
if (odp_unlikely(num < 0)) {
/* Destroyed queue. Continue scheduling the same
@@ -1562,7 +1567,7 @@ static void schedule_prefetch(int num ODP_UNUSED)
static int schedule_num_grps(void)
{
- return NUM_SCHED_GRPS;
+ return NUM_SCHED_GRPS - SCHED_GROUP_NAMED;
}
static void schedule_get_config(schedule_config_t *config)
@@ -1578,14 +1583,14 @@ static int schedule_capability(odp_schedule_capability_t *capa)
capa->max_groups = schedule_num_grps();
capa->max_prios = schedule_num_prio();
capa->max_queues = CONFIG_MAX_SCHED_QUEUES;
- capa->max_queue_size = queue_glb->config.max_queue_size;
+ capa->max_queue_size = _odp_queue_glb->config.max_queue_size;
capa->max_flow_id = BUF_HDR_MAX_FLOW_ID;
return 0;
}
/* Fill in scheduler interface */
-const schedule_fn_t schedule_basic_fn = {
+const schedule_fn_t _odp_schedule_basic_fn = {
.pktio_start = schedule_pktio_start,
.thr_add = schedule_thr_add,
.thr_rem = schedule_thr_rem,
@@ -1605,7 +1610,7 @@ const schedule_fn_t schedule_basic_fn = {
};
/* Fill in scheduler API calls */
-const schedule_api_t schedule_basic_api = {
+const schedule_api_t _odp_schedule_basic_api = {
.schedule_wait_time = schedule_wait_time,
.schedule_capability = schedule_capability,
.schedule_config_init = schedule_config_init,
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index 5ffa0a417..92d70d0f8 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -13,34 +13,34 @@
#include <stdlib.h>
#include <string.h>
-extern const schedule_fn_t schedule_sp_fn;
-extern const schedule_api_t schedule_sp_api;
+extern const schedule_fn_t _odp_schedule_sp_fn;
+extern const schedule_api_t _odp_schedule_sp_api;
-extern const schedule_fn_t schedule_basic_fn;
-extern const schedule_api_t schedule_basic_api;
+extern const schedule_fn_t _odp_schedule_basic_fn;
+extern const schedule_api_t _odp_schedule_basic_api;
-extern const schedule_fn_t schedule_scalable_fn;
-extern const schedule_api_t schedule_scalable_api;
+extern const schedule_fn_t _odp_schedule_scalable_fn;
+extern const schedule_api_t _odp_schedule_scalable_api;
-const schedule_fn_t *sched_fn;
-const schedule_api_t *sched_api;
+const schedule_fn_t *_odp_sched_fn;
+const schedule_api_t *_odp_sched_api;
int _odp_schedule_configured;
uint64_t odp_schedule_wait_time(uint64_t ns)
{
- return sched_api->schedule_wait_time(ns);
+ return _odp_sched_api->schedule_wait_time(ns);
}
int odp_schedule_capability(odp_schedule_capability_t *capa)
{
- return sched_api->schedule_capability(capa);
+ return _odp_sched_api->schedule_capability(capa);
}
void odp_schedule_config_init(odp_schedule_config_t *config)
{
memset(config, 0, sizeof(*config));
- sched_api->schedule_config_init(config);
+ _odp_sched_api->schedule_config_init(config);
}
int odp_schedule_config(const odp_schedule_config_t *config)
@@ -58,7 +58,7 @@ int odp_schedule_config(const odp_schedule_config_t *config)
config = &defconfig;
}
- ret = sched_api->schedule_config(config);
+ ret = _odp_sched_api->schedule_config(config);
if (ret >= 0)
_odp_schedule_configured = 1;
@@ -70,7 +70,7 @@ odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
{
ODP_ASSERT(_odp_schedule_configured);
- return sched_api->schedule(from, wait);
+ return _odp_sched_api->schedule(from, wait);
}
int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
@@ -78,127 +78,127 @@ int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
{
ODP_ASSERT(_odp_schedule_configured);
- return sched_api->schedule_multi(from, wait, events, num);
+ return _odp_sched_api->schedule_multi(from, wait, events, num);
}
int odp_schedule_multi_wait(odp_queue_t *from, odp_event_t events[], int num)
{
- return sched_api->schedule_multi_wait(from, events, num);
+ return _odp_sched_api->schedule_multi_wait(from, events, num);
}
int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
{
- return sched_api->schedule_multi_no_wait(from, events, num);
+ return _odp_sched_api->schedule_multi_no_wait(from, events, num);
}
void odp_schedule_pause(void)
{
- return sched_api->schedule_pause();
+ return _odp_sched_api->schedule_pause();
}
void odp_schedule_resume(void)
{
- return sched_api->schedule_resume();
+ return _odp_sched_api->schedule_resume();
}
void odp_schedule_release_atomic(void)
{
- return sched_api->schedule_release_atomic();
+ return _odp_sched_api->schedule_release_atomic();
}
void odp_schedule_release_ordered(void)
{
- return sched_api->schedule_release_ordered();
+ return _odp_sched_api->schedule_release_ordered();
}
void odp_schedule_prefetch(int num)
{
- return sched_api->schedule_prefetch(num);
+ return _odp_sched_api->schedule_prefetch(num);
}
int odp_schedule_min_prio(void)
{
- return sched_api->schedule_min_prio();
+ return _odp_sched_api->schedule_min_prio();
}
int odp_schedule_max_prio(void)
{
- return sched_api->schedule_max_prio();
+ return _odp_sched_api->schedule_max_prio();
}
int odp_schedule_default_prio(void)
{
- return sched_api->schedule_default_prio();
+ return _odp_sched_api->schedule_default_prio();
}
int odp_schedule_num_prio(void)
{
- return sched_api->schedule_num_prio();
+ return _odp_sched_api->schedule_num_prio();
}
odp_schedule_group_t odp_schedule_group_create(const char *name,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_create(name, mask);
+ return _odp_sched_api->schedule_group_create(name, mask);
}
int odp_schedule_group_destroy(odp_schedule_group_t group)
{
- return sched_api->schedule_group_destroy(group);
+ return _odp_sched_api->schedule_group_destroy(group);
}
odp_schedule_group_t odp_schedule_group_lookup(const char *name)
{
- return sched_api->schedule_group_lookup(name);
+ return _odp_sched_api->schedule_group_lookup(name);
}
int odp_schedule_group_join(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_join(group, mask);
+ return _odp_sched_api->schedule_group_join(group, mask);
}
int odp_schedule_group_leave(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_leave(group, mask);
+ return _odp_sched_api->schedule_group_leave(group, mask);
}
int odp_schedule_group_thrmask(odp_schedule_group_t group,
odp_thrmask_t *thrmask)
{
- return sched_api->schedule_group_thrmask(group, thrmask);
+ return _odp_sched_api->schedule_group_thrmask(group, thrmask);
}
int odp_schedule_group_info(odp_schedule_group_t group,
odp_schedule_group_info_t *info)
{
- return sched_api->schedule_group_info(group, info);
+ return _odp_sched_api->schedule_group_info(group, info);
}
void odp_schedule_order_lock(uint32_t lock_index)
{
- return sched_api->schedule_order_lock(lock_index);
+ return _odp_sched_api->schedule_order_lock(lock_index);
}
void odp_schedule_order_unlock(uint32_t lock_index)
{
- return sched_api->schedule_order_unlock(lock_index);
+ return _odp_sched_api->schedule_order_unlock(lock_index);
}
void odp_schedule_order_unlock_lock(uint32_t unlock_index, uint32_t lock_index)
{
- sched_api->schedule_order_unlock_lock(unlock_index, lock_index);
+ _odp_sched_api->schedule_order_unlock_lock(unlock_index, lock_index);
}
void odp_schedule_order_lock_start(uint32_t lock_index)
{
- sched_api->schedule_order_lock_start(lock_index);
+ _odp_sched_api->schedule_order_lock_start(lock_index);
}
void odp_schedule_order_lock_wait(uint32_t lock_index)
{
- sched_api->schedule_order_lock_wait(lock_index);
+ _odp_sched_api->schedule_order_lock_wait(lock_index);
}
int _odp_schedule_init_global(void)
@@ -211,23 +211,23 @@ int _odp_schedule_init_global(void)
ODP_PRINT("Using scheduler '%s'\n", sched);
if (!strcmp(sched, "basic")) {
- sched_fn = &schedule_basic_fn;
- sched_api = &schedule_basic_api;
+ _odp_sched_fn = &_odp_schedule_basic_fn;
+ _odp_sched_api = &_odp_schedule_basic_api;
} else if (!strcmp(sched, "sp")) {
- sched_fn = &schedule_sp_fn;
- sched_api = &schedule_sp_api;
+ _odp_sched_fn = &_odp_schedule_sp_fn;
+ _odp_sched_api = &_odp_schedule_sp_api;
} else if (!strcmp(sched, "scalable")) {
- sched_fn = &schedule_scalable_fn;
- sched_api = &schedule_scalable_api;
+ _odp_sched_fn = &_odp_schedule_scalable_fn;
+ _odp_sched_api = &_odp_schedule_scalable_api;
} else {
ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
return -1;
}
- return sched_fn->init_global();
+ return _odp_sched_fn->init_global();
}
int _odp_schedule_term_global(void)
{
- return sched_fn->term_global();
+ return _odp_sched_fn->term_global();
}
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 39fa498e8..6d3cb6996 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -50,6 +50,7 @@ ODP_STATIC_ASSERT(CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
#define SCHED_GROUP_JOIN 0
#define SCHED_GROUP_LEAVE 1
+#define NUM_AUTO_GROUPS (ODP_SCHED_GROUP_CONTROL + 1)
typedef struct {
odp_shm_t shm;
@@ -66,7 +67,7 @@ typedef struct {
static sched_global_t *global;
-__thread sched_scalable_thread_state_t *sched_ts;
+__thread sched_scalable_thread_state_t *_odp_sched_ts;
static int thread_state_init(int tidx)
{
@@ -93,7 +94,7 @@ static int thread_state_init(int tidx)
ts->rvec[i].rvec_free = &ts->rvec_free;
ts->rvec[i].idx = i;
}
- sched_ts = ts;
+ _odp_sched_ts = ts;
return 0;
}
@@ -184,7 +185,7 @@ static inline bool schedq_elem_on_queue(sched_elem_t *elem)
* Shared metadata btwn scheduler and queue
******************************************************************************/
-void sched_update_enq(sched_elem_t *q, uint32_t actual)
+void _odp_sched_update_enq(sched_elem_t *q, uint32_t actual)
{
qschedstate_t oss, nss;
uint32_t ticket;
@@ -244,7 +245,7 @@ void sched_update_enq(sched_elem_t *q, uint32_t actual)
/* Else queue was not empty or atomic queue already busy. */
}
-void sched_update_enq_sp(sched_elem_t *q, uint32_t actual)
+void _odp_sched_update_enq_sp(sched_elem_t *q, uint32_t actual)
{
qschedstate_t oss, nss;
uint32_t ticket;
@@ -315,7 +316,7 @@ sched_update_deq(sched_elem_t *q,
*/
oss = q->qschst;
do {
- ODP_ASSERT(oss.cur_ticket == sched_ts->ticket);
+ ODP_ASSERT(oss.cur_ticket == _odp_sched_ts->ticket);
nss = oss;
nss.numevts -= actual;
if (nss.numevts > 0 && !pushed) {
@@ -327,7 +328,7 @@ sched_update_deq(sched_elem_t *q,
* Unfortunately nxt_ticket will also be included in
* the CAS operation
*/
- nss.cur_ticket = sched_ts->ticket + 1;
+ nss.cur_ticket = _odp_sched_ts->ticket + 1;
} while (odp_unlikely(!__atomic_compare_exchange(
&q->qschst,
&oss, &nss,
@@ -422,10 +423,10 @@ sched_update_deq_sc(sched_elem_t *q,
uint32_t ticket;
if (atomic) {
- ODP_ASSERT(q->qschst.cur_ticket == sched_ts->ticket);
+ ODP_ASSERT(q->qschst.cur_ticket == _odp_sched_ts->ticket);
ODP_ASSERT(q->qschst.cur_ticket != q->qschst.nxt_ticket);
q->qschst.numevts -= actual;
- q->qschst.cur_ticket = sched_ts->ticket + 1;
+ q->qschst.cur_ticket = _odp_sched_ts->ticket + 1;
if (q->qschst.numevts > 0)
schedq_push(q->schedq, q);
return;
@@ -523,7 +524,7 @@ static void signal_threads_add(sched_group_t *sg, uint32_t sgi, uint32_t prio)
}
}
-sched_queue_t *sched_queue_add(odp_schedule_group_t grp, uint32_t prio)
+sched_queue_t *_odp_sched_queue_add(odp_schedule_group_t grp, uint32_t prio)
{
uint32_t sgi;
sched_group_t *sg;
@@ -562,7 +563,7 @@ static uint32_t sched_pktin_add(odp_schedule_group_t grp, uint32_t prio)
sgi = grp;
sg = global->sg_vec[sgi];
- (void)sched_queue_add(grp, ODP_SCHED_PRIO_PKTIN);
+ (void)_odp_sched_queue_add(grp, ODP_SCHED_PRIO_PKTIN);
return (ODP_SCHED_PRIO_PKTIN - prio) * sg->xfactor;
}
@@ -584,7 +585,7 @@ static void signal_threads_rem(sched_group_t *sg, uint32_t sgi, uint32_t prio)
}
}
-void sched_queue_rem(odp_schedule_group_t grp, uint32_t prio)
+void _odp_sched_queue_rem(odp_schedule_group_t grp, uint32_t prio)
{
uint32_t sgi;
sched_group_t *sg;
@@ -609,7 +610,7 @@ void sched_queue_rem(odp_schedule_group_t grp, uint32_t prio)
static void sched_pktin_rem(odp_schedule_group_t grp)
{
- sched_queue_rem(grp, ODP_SCHED_PRIO_PKTIN);
+ _odp_sched_queue_rem(grp, ODP_SCHED_PRIO_PKTIN);
}
static void update_sg_add(sched_scalable_thread_state_t *ts,
@@ -699,7 +700,7 @@ static inline void _schedule_release_atomic(sched_scalable_thread_state_t *ts)
static inline void _schedule_release_ordered(sched_scalable_thread_state_t *ts)
{
ts->out_of_order = false;
- rctx_release(ts->rctx);
+ _odp_rctx_release(ts->rctx);
ts->rctx = NULL;
}
@@ -718,7 +719,7 @@ static void pktio_start(int pktio_idx,
ODP_ASSERT(rxq < PKTIO_MAX_QUEUES);
__atomic_fetch_add(&global->poll_count[pktio_idx], 1,
__ATOMIC_RELAXED);
- qentry = qentry_from_ext(odpq[i]);
+ qentry = _odp_qentry_from_ext(odpq[i]);
elem = &qentry->s.sched_elem;
elem->cons_type |= FLAG_PKTIN; /* Set pktin queue flag */
elem->pktio_idx = pktio_idx;
@@ -737,7 +738,7 @@ static void pktio_stop(sched_elem_t *elem)
1, __ATOMIC_RELAXED) == 0) {
/* Call stop_finalize when all queues
* of the pktio have been removed */
- sched_cb_pktio_stop_finalize(elem->pktio_idx);
+ _odp_sched_cb_pktio_stop_finalize(elem->pktio_idx);
}
}
@@ -771,7 +772,7 @@ static inline bool is_ordered(sched_elem_t *elem)
static int poll_pktin(sched_elem_t *elem, odp_event_t ev[], int num_evts)
{
- sched_scalable_thread_state_t *ts = sched_ts;
+ sched_scalable_thread_state_t *ts = _odp_sched_ts;
int num, i;
/* For ordered queues only */
reorder_context_t *rctx;
@@ -784,7 +785,7 @@ static int poll_pktin(sched_elem_t *elem, odp_event_t ev[], int num_evts)
rwin = queue_get_rwin((queue_entry_t *)elem);
ODP_ASSERT(rwin != NULL);
if (odp_unlikely(!have_reorder_ctx(ts) ||
- !rwin_reserve_sc(rwin, &sn))) {
+ !_odp_rwin_reserve_sc(rwin, &sn))) {
/* Put back queue on source schedq */
schedq_push(ts->src_schedq, elem);
return 0;
@@ -808,7 +809,7 @@ events_dequeued:
ts->priv_rvec_free =
bitset_clr(ts->priv_rvec_free, idx);
rctx = &ts->rvec[idx];
- rctx_init(rctx, idx, rwin, sn);
+ _odp_rctx_init(rctx, idx, rwin, sn);
/* Are we in-order or out-of-order? */
ts->out_of_order = sn != rwin->hc.head;
ts->rctx = rctx;
@@ -820,7 +821,7 @@ events_dequeued:
/* Ingress queue empty => poll pktio RX queue */
odp_event_t rx_evts[QUEUE_MULTI_MAX];
- int num_rx = sched_cb_pktin_poll_one(elem->pktio_idx,
+ int num_rx = _odp_sched_cb_pktin_poll_one(elem->pktio_idx,
elem->rx_queue,
rx_evts);
if (odp_likely(num_rx > 0)) {
@@ -846,7 +847,7 @@ events_dequeued:
if (is_atomic(elem))
ts->atomq = NULL;
else if (is_ordered(elem))
- rwin_unreserve_sc(rwin, sn);
+ _odp_rwin_unreserve_sc(rwin, sn);
if (odp_likely(num_rx == 0)) {
/* RX queue empty, push it to pktin priority schedq */
@@ -879,7 +880,7 @@ static int _schedule(odp_queue_t *from, odp_event_t ev[], int num_evts)
int cpu_id;
uint32_t i;
- ts = sched_ts;
+ ts = _odp_sched_ts;
atomq = ts->atomq;
timer_run(1);
@@ -1050,13 +1051,13 @@ restart_same:
if (odp_unlikely(!have_reorder_ctx(ts)))
continue;
- /* rwin_reserve and odp_queue_deq must be atomic or
+ /* _odp_rwin_reserve and odp_queue_deq must be atomic or
* there will be a potential race condition.
* Allocate a slot in the reorder window.
*/
rwin = queue_get_rwin((queue_entry_t *)elem);
ODP_ASSERT(rwin != NULL);
- if (odp_unlikely(!rwin_reserve(rwin, &sn))) {
+ if (odp_unlikely(!_odp_rwin_reserve(rwin, &sn))) {
/* Reorder window full */
/* Look at next schedq, find other queue */
continue;
@@ -1088,7 +1089,7 @@ restart_same:
/* Need to initialise reorder context or we can't
* release it later.
*/
- rctx_init(rctx, idx, rwin, sn);
+ _odp_rctx_init(rctx, idx, rwin, sn);
/* Was dequeue successful? */
if (odp_likely(num != 0)) {
@@ -1117,7 +1118,7 @@ restart_same:
* the reorder context needs to be released and
* inserted into the reorder window.
*/
- rctx_release(rctx);
+ _odp_rctx_release(rctx);
ODP_ASSERT(ts->rctx == NULL);
}
/* Dequeue from parallel/ordered queue failed
@@ -1143,7 +1144,7 @@ restart_same:
static void schedule_order_lock(uint32_t lock_index)
{
- struct reorder_context *rctx = sched_ts->rctx;
+ struct reorder_context *rctx = _odp_sched_ts->rctx;
if (odp_unlikely(rctx == NULL ||
rctx->rwin == NULL ||
@@ -1165,7 +1166,7 @@ static void schedule_order_unlock(uint32_t lock_index)
{
struct reorder_context *rctx;
- rctx = sched_ts->rctx;
+ rctx = _odp_sched_ts->rctx;
if (odp_unlikely(rctx == NULL ||
rctx->rwin == NULL ||
lock_index >= rctx->rwin->lock_count ||
@@ -1200,7 +1201,7 @@ static void schedule_release_atomic(void)
{
sched_scalable_thread_state_t *ts;
- ts = sched_ts;
+ ts = _odp_sched_ts;
if (odp_likely(ts->atomq != NULL)) {
#ifdef CONFIG_QSCHST_LOCK
sched_elem_t *atomq;
@@ -1219,7 +1220,7 @@ static void schedule_release_ordered(void)
{
sched_scalable_thread_state_t *ts;
- ts = sched_ts;
+ ts = _odp_sched_ts;
if (ts->rctx != NULL)
_schedule_release_ordered(ts);
}
@@ -1233,7 +1234,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t ev[],
odp_time_t delta;
odp_time_t deadline;
- ts = sched_ts;
+ ts = _odp_sched_ts;
/* Release any previous reorder context. */
if (ts->rctx != NULL)
_schedule_release_ordered(ts);
@@ -1293,7 +1294,7 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
odp_time_t delta;
odp_time_t deadline;
- ts = sched_ts;
+ ts = _odp_sched_ts;
/* Release any previous reorder context. */
if (ts->rctx != NULL)
_schedule_release_ordered(ts);
@@ -1359,12 +1360,12 @@ static int schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
static void schedule_pause(void)
{
- sched_ts->pause = true;
+ _odp_sched_ts->pause = true;
}
static void schedule_resume(void)
{
- sched_ts->pause = false;
+ _odp_sched_ts->pause = false;
}
static uint64_t schedule_wait_time(uint64_t ns)
@@ -1544,13 +1545,13 @@ static int schedule_group_destroy(odp_schedule_group_t group)
goto invalid_group;
}
- if (sched_ts &&
- odp_unlikely(__atomic_load_n(&sched_ts->sg_sem,
+ if (_odp_sched_ts &&
+ odp_unlikely(__atomic_load_n(&_odp_sched_ts->sg_sem,
__ATOMIC_RELAXED) != 0)) {
- (void)__atomic_load_n(&sched_ts->sg_sem,
+ (void)__atomic_load_n(&_odp_sched_ts->sg_sem,
__ATOMIC_ACQUIRE);
- sched_ts->sg_sem = 0;
- update_sg_membership(sched_ts);
+ _odp_sched_ts->sg_sem = 0;
+ update_sg_membership(_odp_sched_ts);
}
odp_spinlock_lock(&global->sched_grp_lock);
@@ -1985,12 +1986,12 @@ static int schedule_term_local(void)
ODP_ERR("Failed to leave ODP_SCHED_GROUP_WORKER\n");
}
- update_sg_membership(sched_ts);
+ update_sg_membership(_odp_sched_ts);
/* Check if the thread is still part of any groups */
- if (sched_ts->num_schedq != 0) {
+ if (_odp_sched_ts->num_schedq != 0) {
ODP_ERR("Thread %d still part of scheduler group(s)\n",
- sched_ts->tidx);
+ _odp_sched_ts->tidx);
rc = -1;
}
@@ -2012,7 +2013,7 @@ static int schedule_config(const odp_schedule_config_t *config)
static int num_grps(void)
{
- return MAX_SCHED_GROUP;
+ return MAX_SCHED_GROUP - NUM_AUTO_GROUPS;
}
/*
@@ -2066,11 +2067,11 @@ static int ord_enq_multi(odp_queue_t handle, void *buf_hdr[], int num,
sched_scalable_thread_state_t *ts;
int actual;
- ts = sched_ts;
+ ts = _odp_sched_ts;
queue = qentry_from_int(handle);
if (ts && odp_unlikely(ts->out_of_order) &&
(queue->s.param.order == ODP_QUEUE_ORDER_KEEP)) {
- actual = rctx_save(queue, (odp_buffer_hdr_t **)buf_hdr, num);
+ actual = _odp_rctx_save(queue, (odp_buffer_hdr_t **)buf_hdr, num);
*ret = actual;
return 1;
}
@@ -2091,7 +2092,7 @@ static void order_lock(void)
reorder_window_t *rwin;
uint32_t sn;
- ts = sched_ts;
+ ts = _odp_sched_ts;
if (odp_unlikely(ts->out_of_order)) {
/* We are processing ordered queue and are currently
* out-of-order.
@@ -2137,7 +2138,7 @@ static int schedule_capability(odp_schedule_capability_t *capa)
return 0;
}
-const schedule_fn_t schedule_scalable_fn = {
+const schedule_fn_t _odp_schedule_scalable_fn = {
.pktio_start = pktio_start,
.thr_add = thr_add,
.thr_rem = thr_rem,
@@ -2155,7 +2156,7 @@ const schedule_fn_t schedule_scalable_fn = {
.max_ordered_locks = schedule_max_ordered_locks,
};
-const schedule_api_t schedule_scalable_api = {
+const schedule_api_t _odp_schedule_scalable_api = {
.schedule_wait_time = schedule_wait_time,
.schedule_capability = schedule_capability,
.schedule_config_init = schedule_config_init,
diff --git a/platform/linux-generic/odp_schedule_scalable_ordered.c b/platform/linux-generic/odp_schedule_scalable_ordered.c
index c70b225a8..239d18239 100644
--- a/platform/linux-generic/odp_schedule_scalable_ordered.c
+++ b/platform/linux-generic/odp_schedule_scalable_ordered.c
@@ -15,9 +15,9 @@
#include <string.h>
-extern __thread sched_scalable_thread_state_t *sched_ts;
+extern __thread sched_scalable_thread_state_t *_odp_sched_ts;
-reorder_window_t *rwin_alloc(_odp_ishm_pool_t *pool, unsigned lock_count)
+reorder_window_t *_odp_rwin_alloc(_odp_ishm_pool_t *pool, unsigned int lock_count)
{
reorder_window_t *rwin;
uint32_t i;
@@ -40,12 +40,12 @@ reorder_window_t *rwin_alloc(_odp_ishm_pool_t *pool, unsigned lock_count)
return rwin;
}
-int rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin)
+int _odp_rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin)
{
return _odp_ishm_pool_free(pool, rwin);
}
-bool rwin_reserve(reorder_window_t *rwin, uint32_t *sn)
+bool _odp_rwin_reserve(reorder_window_t *rwin, uint32_t *sn)
{
uint32_t head;
uint32_t oldt;
@@ -73,7 +73,7 @@ bool rwin_reserve(reorder_window_t *rwin, uint32_t *sn)
return true;
}
-bool rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn)
+bool _odp_rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn)
{
uint32_t head;
uint32_t oldt;
@@ -93,7 +93,7 @@ bool rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn)
return true;
}
-void rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn)
+void _odp_rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn)
{
ODP_ASSERT(rwin->tail == sn + 1);
rwin->tail = sn;
@@ -183,8 +183,8 @@ static void rwin_insert(reorder_window_t *rwin,
__ATOMIC_ACQUIRE));
}
-void rctx_init(reorder_context_t *rctx, uint16_t idx,
- reorder_window_t *rwin, uint32_t sn)
+void _odp_rctx_init(reorder_context_t *rctx, uint16_t idx,
+ reorder_window_t *rwin, uint32_t sn)
{
/* rctx->rvec_free and rctx->idx already initialised in
* thread_state_init function.
@@ -210,13 +210,13 @@ static inline void rctx_free(const reorder_context_t *rctx)
ODP_ASSERT(rctx->rwin != NULL);
/* Set free bit */
- if (rctx->rvec_free == &sched_ts->rvec_free)
+ if (rctx->rvec_free == &_odp_sched_ts->rvec_free)
/* Since it is our own reorder context, we can instead
* perform a non-atomic and relaxed update on our private
* rvec_free.
*/
- sched_ts->priv_rvec_free =
- bitset_set(sched_ts->priv_rvec_free, rctx->idx);
+ _odp_sched_ts->priv_rvec_free =
+ bitset_set(_odp_sched_ts->priv_rvec_free, rctx->idx);
else
atom_bitset_set(rctx->rvec_free, rctx->idx, __ATOMIC_RELEASE);
@@ -225,9 +225,9 @@ static inline void rctx_free(const reorder_context_t *rctx)
rctx = &base[next_idx];
next_idx = rctx->next_idx;
/* Set free bit */
- if (rctx->rvec_free == &sched_ts->rvec_free)
- sched_ts->priv_rvec_free =
- bitset_set(sched_ts->priv_rvec_free, rctx->idx);
+ if (rctx->rvec_free == &_odp_sched_ts->rvec_free)
+ _odp_sched_ts->priv_rvec_free =
+ bitset_set(_odp_sched_ts->priv_rvec_free, rctx->idx);
else
atom_bitset_set(rctx->rvec_free, rctx->idx,
__ATOMIC_RELEASE);
@@ -304,7 +304,7 @@ static void rctx_retire(reorder_context_t *first)
rctx_free(first);
}
-void rctx_release(reorder_context_t *rctx)
+void _odp_rctx_release(reorder_context_t *rctx)
{
/* Insert reorder context into reorder window, potentially calling the
* rctx_retire function for all pending reorder_contexts.
@@ -315,7 +315,7 @@ void rctx_release(reorder_context_t *rctx)
/* Save destination queue and events in the reorder context for deferred
* enqueue.
*/
-int rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int _odp_rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
int i;
sched_scalable_thread_state_t *ts;
@@ -323,7 +323,7 @@ int rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
reorder_context_t *cur;
bitset_t next_idx;
- ts = sched_ts;
+ ts = _odp_sched_ts;
first = ts->rctx;
ODP_ASSERT(ts->rctx != NULL);
cur = &first[(int)first->cur_idx - (int)first->idx];
@@ -361,7 +361,7 @@ int rctx_save(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
first->cur_idx = next_idx;
/* Update current to next */
cur = &ts->rvec[next_idx];
- rctx_init(cur, next_idx, NULL, 0);
+ _odp_rctx_init(cur, next_idx, NULL, 0);
/* The last rctx (so far) */
cur->next_idx = first->idx;
}
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index c0eb4a419..08be438f4 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2016-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -240,7 +240,7 @@ static int term_global(void)
int report = 1;
if (sched_global->queue_cmd[qi].s.init) {
- while (sched_queue_deq(qi, &event, 1, 1) > 0) {
+ while (_odp_sched_queue_deq(qi, &event, 1, 1) > 0) {
if (report) {
ODP_ERR("Queue not empty\n");
report = 0;
@@ -268,7 +268,7 @@ static int term_local(void)
static void schedule_config_init(odp_schedule_config_t *config)
{
config->num_queues = CONFIG_MAX_SCHED_QUEUES;
- config->queue_size = queue_glb->config.max_queue_size;
+ config->queue_size = _odp_queue_glb->config.max_queue_size;
}
static int schedule_config(const odp_schedule_config_t *config)
@@ -293,7 +293,7 @@ static void add_group(sched_group_t *sched_group, int thr, int group)
thr_group->group[num] = group;
thr_group->num_group = num + 1;
gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
- odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+ odp_atomic_store_rel_u32(&thr_group->gen_cnt, gen_cnt + 1);
}
static void remove_group(sched_group_t *sched_group, int thr, int group)
@@ -326,7 +326,7 @@ static void remove_group(sched_group_t *sched_group, int thr, int group)
thr_group->num_group = num - 1;
gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
- odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+ odp_atomic_store_rel_u32(&thr_group->gen_cnt, gen_cnt + 1);
}
}
@@ -571,7 +571,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
if (sched_local.cmd) {
/* Continue scheduling if queue is not empty */
- if (sched_queue_empty(sched_local.cmd->s.index) == 0)
+ if (_odp_sched_queue_empty(sched_local.cmd->s.index) == 0)
add_tail(sched_local.cmd);
sched_local.cmd = NULL;
@@ -598,13 +598,13 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
odp_queue_t *queue = cmd->s.queue;
for (i = 0; i < num_pktin; i++) {
- num_pkt = sched_cb_pktin_poll(pktio_idx,
- pktin_idx[i],
- hdr_tbl, max_num);
+ num_pkt = _odp_sched_cb_pktin_poll(pktio_idx,
+ pktin_idx[i],
+ hdr_tbl, max_num);
if (num_pkt < 0) {
/* Pktio stopped or closed. */
- sched_cb_pktio_stop_finalize(pktio_idx);
+ _odp_sched_cb_pktio_stop_finalize(pktio_idx);
break;
}
@@ -646,7 +646,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
}
qi = cmd->s.index;
- num = sched_queue_deq(qi, events, 1, 1);
+ num = _odp_sched_queue_deq(qi, events, 1, 1);
if (num <= 0) {
timer_run(1);
@@ -740,7 +740,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
{
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
sched_group_t *sched_group = &sched_global->sched_group;
- int i;
+ int i, thr;
odp_ticketlock_lock(&sched_group->s.lock);
@@ -755,10 +755,16 @@ static odp_schedule_group_t schedule_group_create(const char *name,
ODP_SCHED_GROUP_NAME_LEN - 1);
grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
}
- odp_thrmask_copy(&sched_group->s.group[i].mask,
- thrmask);
+
+ odp_thrmask_copy(&sched_group->s.group[i].mask, thrmask);
sched_group->s.group[i].allocated = 1;
group = i;
+
+ thr = odp_thrmask_first(thrmask);
+ while (thr >= 0) {
+ add_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
break;
}
}
@@ -771,6 +777,8 @@ static odp_schedule_group_t schedule_group_create(const char *name,
static int schedule_group_destroy(odp_schedule_group_t group)
{
sched_group_t *sched_group = &sched_global->sched_group;
+ int thr;
+ const odp_thrmask_t *thrmask;
if (group < NUM_STATIC_GROUP || group >= NUM_GROUP)
return -1;
@@ -782,6 +790,14 @@ static int schedule_group_destroy(odp_schedule_group_t group)
return -1;
}
+ thrmask = &sched_group->s.group[group].mask;
+
+ thr = odp_thrmask_first(thrmask);
+ while (thr >= 0) {
+ remove_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
+
memset(&sched_group->s.group[group], 0,
sizeof(sched_group->s.group[0]));
@@ -965,13 +981,13 @@ static int schedule_capability(odp_schedule_capability_t *capa)
capa->max_groups = num_grps();
capa->max_prios = schedule_num_prio();
capa->max_queues = CONFIG_MAX_SCHED_QUEUES;
- capa->max_queue_size = queue_glb->config.max_queue_size;
+ capa->max_queue_size = _odp_queue_glb->config.max_queue_size;
return 0;
}
/* Fill in scheduler interface */
-const schedule_fn_t schedule_sp_fn = {
+const schedule_fn_t _odp_schedule_sp_fn = {
.pktio_start = pktio_start,
.thr_add = thr_add,
.thr_rem = thr_rem,
@@ -990,7 +1006,7 @@ const schedule_fn_t schedule_sp_fn = {
};
/* Fill in scheduler API calls */
-const schedule_api_t schedule_sp_api = {
+const schedule_api_t _odp_schedule_sp_api = {
.schedule_wait_time = schedule_wait_time,
.schedule_capability = schedule_capability,
.schedule_config_init = schedule_config_init,
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index 4721a2cd7..638c756e8 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -392,7 +392,7 @@ int _odp_system_info_init(void)
if (file != NULL) {
/* Read CPU model, and set max cpu frequency
* if not set from cpufreq. */
- cpuinfo_parser(file, &odp_global_ro.system_info);
+ _odp_cpuinfo_parser(file, &odp_global_ro.system_info);
fclose(file);
} else {
_odp_dummy_cpuinfo(&odp_global_ro.system_info);
@@ -536,6 +536,7 @@ int odp_system_info(odp_system_info_t *info)
info->cpu_arch = sys_info->cpu_arch;
info->cpu_isa_sw = sys_info->cpu_isa_sw;
+ info->cpu_isa_hw = sys_info->cpu_isa_hw;
return 0;
}
@@ -576,7 +577,7 @@ void odp_sys_info_print(void)
str[len] = '\0';
ODP_PRINT("%s", str);
- sys_info_print_arch();
+ _odp_sys_info_print_arch();
}
void odp_sys_config_print(void)
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index d88e6da5c..bec7362bf 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -140,10 +140,10 @@ int _odp_thread_init_local(odp_thread_type_t type)
group_worker = 1;
group_control = 1;
- if (sched_fn->get_config) {
+ if (_odp_sched_fn->get_config) {
schedule_config_t schedule_config;
- sched_fn->get_config(&schedule_config);
+ _odp_sched_fn->get_config(&schedule_config);
group_all = schedule_config.group_enable.all;
group_worker = schedule_config.group_enable.worker;
group_control = schedule_config.group_enable.control;
@@ -172,13 +172,13 @@ int _odp_thread_init_local(odp_thread_type_t type)
_odp_this_thread = &thread_globals->thr[id];
if (group_all)
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
if (type == ODP_THREAD_WORKER && group_worker)
- sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
if (type == ODP_THREAD_CONTROL && group_control)
- sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0;
}
@@ -194,23 +194,23 @@ int _odp_thread_term_local(void)
group_worker = 1;
group_control = 1;
- if (sched_fn->get_config) {
+ if (_odp_sched_fn->get_config) {
schedule_config_t schedule_config;
- sched_fn->get_config(&schedule_config);
+ _odp_sched_fn->get_config(&schedule_config);
group_all = schedule_config.group_enable.all;
group_worker = schedule_config.group_enable.worker;
group_control = schedule_config.group_enable.control;
}
if (group_all)
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
if (type == ODP_THREAD_WORKER && group_worker)
- sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
if (type == ODP_THREAD_CONTROL && group_control)
- sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
odp_spinlock_lock(&thread_globals->lock);
num = free_id(id);
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index 35a168063..ce33c6787 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -513,7 +513,7 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp,
_ODP_MEMMODEL_RLS);
hdl = tp_idx_to_handle(tp, idx);
/* Add timer to queue */
- queue_fn->timer_add(queue);
+ _odp_queue_fn->timer_add(queue);
} else {
__odp_errno = ENFILE; /* Reusing file table overflow */
hdl = ODP_TIMER_INVALID;
@@ -534,7 +534,7 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx)
odp_buffer_t old_buf = timer_set_unused(tp, idx);
/* Remove timer from queue */
- queue_fn->timer_rem(tim->queue);
+ _odp_queue_fn->timer_rem(tim->queue);
/* Destroy timer */
timer_fini(tim, &tp->tick_buf[idx]);
@@ -1254,6 +1254,8 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src,
capa->max_tmo.res_hz = timer_global->highest_res_hz;
capa->max_tmo.min_tmo = 0;
capa->max_tmo.max_tmo = MAX_TMO_NSEC;
+ capa->queue_type_sched = true;
+ capa->queue_type_plain = true;
return 0;
}
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index ebd7d1785..e1aff6e8c 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -2031,9 +2031,9 @@ static int tm_enqueue(tm_system_t *tm_system,
work_item.queue_num = tm_queue_obj->queue_num;
work_item.pkt = pkt;
- sched_fn->order_lock();
+ _odp_sched_fn->order_lock();
rc = input_work_queue_append(tm_system, &work_item);
- sched_fn->order_unlock();
+ _odp_sched_fn->order_unlock();
if (rc < 0) {
ODP_DBG("%s work queue full\n", __func__);
@@ -3985,8 +3985,8 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
queue_obj->queue = queue;
odp_queue_context_set(queue, queue_obj, sizeof(tm_queue_obj_t));
- queue_fn->set_enq_deq_fn(queue, queue_tm_reenq,
- queue_tm_reenq_multi, NULL, NULL);
+ _odp_queue_fn->set_enq_deq_fn(queue, queue_tm_reenq,
+ queue_tm_reenq_multi, NULL, NULL);
tm_system->queue_num_tbl[queue_obj->queue_num - 1] = queue_obj;
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 63cce4005..f65bee7a1 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -605,10 +605,10 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
rte_pktmbuf_free(mbuf);
continue;
}
- if (cls_classify_packet(pktio_entry,
- (const uint8_t *)data,
- pkt_len, pkt_len, &pool,
- &parsed_hdr, false))
+ if (_odp_cls_classify_packet(pktio_entry,
+ (const uint8_t *)data,
+ pkt_len, pkt_len, &pool,
+ &parsed_hdr, false))
goto fail;
}
@@ -898,10 +898,10 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
rte_pktmbuf_free(mbuf);
continue;
}
- if (cls_classify_packet(pktio_entry,
- (const uint8_t *)data,
- pkt_len, pkt_len, &pool,
- &parsed_hdr, false)) {
+ if (_odp_cls_classify_packet(pktio_entry,
+ (const uint8_t *)data,
+ pkt_len, pkt_len, &pool,
+ &parsed_hdr, false)) {
ODP_ERR("Unable to classify packet\n");
rte_pktmbuf_free(mbuf);
continue;
@@ -1022,7 +1022,7 @@ static uint32_t dpdk_vdev_mtu_get(uint16_t port_id)
return 0;
}
- mtu = mtu_get_fd(sockfd, ifr.ifr_name);
+ mtu = _odp_mtu_get_fd(sockfd, ifr.ifr_name);
close(sockfd);
return mtu;
}
@@ -1073,7 +1073,7 @@ static int dpdk_vdev_promisc_mode_get(uint16_t port_id)
return -1;
}
- mode = promisc_mode_get_fd(sockfd, ifr.ifr_name);
+ mode = _odp_promisc_mode_get_fd(sockfd, ifr.ifr_name);
close(sockfd);
return mode;
}
@@ -1096,7 +1096,7 @@ static int dpdk_vdev_promisc_mode_set(uint16_t port_id, int enable)
return -1;
}
- mode = promisc_mode_set_fd(sockfd, ifr.ifr_name, enable);
+ mode = _odp_promisc_mode_set_fd(sockfd, ifr.ifr_name, enable);
close(sockfd);
return mode;
}
diff --git a/platform/linux-generic/pktio/dpdk_parse.c b/platform/linux-generic/pktio/dpdk_parse.c
index 9dad1a7ae..2b41b14a2 100644
--- a/platform/linux-generic/pktio/dpdk_parse.c
+++ b/platform/linux-generic/pktio/dpdk_parse.c
@@ -19,6 +19,10 @@
#include <rte_config.h>
#include <rte_mbuf.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
#define IP4_CSUM_RESULT(ol_flags) (ol_flags & PKT_RX_IP_CKSUM_MASK)
#define L4_CSUM_RESULT(ol_flags) (ol_flags & PKT_RX_L4_CKSUM_MASK)
diff --git a/platform/linux-generic/pktio/ethtool_rss.c b/platform/linux-generic/pktio/ethtool_rss.c
index b66a385b9..80a66420e 100644
--- a/platform/linux-generic/pktio/ethtool_rss.c
+++ b/platform/linux-generic/pktio/ethtool_rss.c
@@ -53,8 +53,8 @@ static inline int get_rss_hash_options(int fd, const char *name,
return 0;
}
-int rss_conf_get_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto)
+int _odp_rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
{
uint64_t options;
int rss_enabled = 0;
@@ -131,14 +131,14 @@ static inline int set_rss_hash(int fd, const char *name,
return 0;
}
-int rss_conf_set_fd(int fd, const char *name,
- const odp_pktin_hash_proto_t *hash_proto)
+int _odp_rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *hash_proto)
{
uint64_t options;
odp_pktin_hash_proto_t cur_hash;
/* Compare to currently set hash protocols */
- rss_conf_get_fd(fd, name, &cur_hash);
+ _odp_rss_conf_get_fd(fd, name, &cur_hash);
if (hash_proto->proto.ipv4_udp && !cur_hash.proto.ipv4_udp) {
options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
@@ -173,8 +173,8 @@ int rss_conf_set_fd(int fd, const char *name,
return 0;
}
-int rss_conf_get_supported_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto)
+int _odp_rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
{
uint64_t options;
int rss_supported = 0;
@@ -220,7 +220,7 @@ int rss_conf_get_supported_fd(int fd, const char *name,
return rss_supported;
}
-void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto)
+void _odp_rss_conf_print(const odp_pktin_hash_proto_t *hash_proto)
{ int max_len = 512;
char str[max_len];
int len = 0;
diff --git a/platform/linux-generic/pktio/io_ops.c b/platform/linux-generic/pktio/io_ops.c
index c12146b5f..cd85164e6 100644
--- a/platform/linux-generic/pktio/io_ops.c
+++ b/platform/linux-generic/pktio/io_ops.c
@@ -12,7 +12,7 @@
* will be picked.
* Array must be NULL terminated */
const pktio_if_ops_t * const pktio_if_ops[] = {
- &loopback_pktio_ops,
+ &_odp_loopback_pktio_ops,
#ifdef _ODP_PKTIO_DPDK
&dpdk_pktio_ops,
#endif
@@ -20,12 +20,12 @@ const pktio_if_ops_t * const pktio_if_ops[] = {
&netmap_pktio_ops,
#endif
#ifdef _ODP_PKTIO_PCAP
- &pcap_pktio_ops,
+ &_odp_pcap_pktio_ops,
#endif
- &ipc_pktio_ops,
- &tap_pktio_ops,
- &null_pktio_ops,
- &sock_mmap_pktio_ops,
- &sock_mmsg_pktio_ops,
+ &_odp_ipc_pktio_ops,
+ &_odp_tap_pktio_ops,
+ &_odp_null_pktio_ops,
+ &_odp_sock_mmap_pktio_ops,
+ &_odp_sock_mmsg_pktio_ops,
NULL
};
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index 972697137..cc2b7db61 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -964,7 +964,7 @@ static int ipc_close(pktio_entry_t *pktio_entry)
return 0;
}
-const pktio_if_ops_t ipc_pktio_ops = {
+const pktio_if_ops_t _odp_ipc_pktio_ops = {
.name = "ipc",
.print = NULL,
.init_global = NULL,
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index f44df208f..cc4cb1751 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -144,9 +144,9 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkt_addr = odp_packet_data(pkt);
}
- ret = cls_classify_packet(pktio_entry, pkt_addr,
- pkt_len, seg_len,
- &new_pool, pkt_hdr, true);
+ ret = _odp_cls_classify_packet(pktio_entry, pkt_addr,
+ pkt_len, seg_len,
+ &new_pool, pkt_hdr, true);
if (ret) {
failed++;
odp_packet_free(pkt);
@@ -167,9 +167,9 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkt_hdr = packet_hdr(new_pkt);
}
} else {
- packet_parse_layer(pkt_hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(pkt_hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
}
packet_set_ts(pkt_hdr, ts);
@@ -473,7 +473,7 @@ static int loop_init_global(void)
return 0;
}
-const pktio_if_ops_t loopback_pktio_ops = {
+const pktio_if_ops_t _odp_loopback_pktio_ops = {
.name = "loop",
.print = NULL,
.init_global = loop_init_global,
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 3229e69c8..0e01a9a1f 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -284,9 +284,9 @@ static int netmap_input_queues_config(pktio_entry_t *pktio_entry,
lockless = (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE);
if (p->hash_enable && num_queues > 1) {
- if (rss_conf_set_fd(pkt_priv(pktio_entry)->sockfd,
- pkt_priv(pktio_entry)->if_name,
- &p->hash_proto)) {
+ if (_odp_rss_conf_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pkt_priv(pktio_entry)->if_name,
+ &p->hash_proto)) {
ODP_ERR("Failed to configure input hash\n");
return -1;
}
@@ -357,8 +357,8 @@ static int netmap_link_status(pktio_entry_t *pktio_entry)
if (pkt_priv(pktio_entry)->is_virtual)
return ODP_PKTIO_LINK_STATUS_UP;
- return link_status_fd(pkt_priv(pktio_entry)->sockfd,
- pkt_priv(pktio_entry)->if_name);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sockfd,
+ pkt_priv(pktio_entry)->if_name);
}
static int netmap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
@@ -379,7 +379,7 @@ static int netmap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *i
return 0;
}
- return link_info_fd(pkt_nm->sockfd, pkt_nm->if_name, info);
+ return _odp_link_info_fd(pkt_nm->sockfd, pkt_nm->if_name, info);
}
/**
@@ -543,7 +543,7 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
/* Use either interface MTU or netmap buffer size as MTU,
* whichever is smaller. */
- mtu = mtu_get_fd(pkt_nm->sockfd, pkt_nm->if_name);
+ mtu = _odp_mtu_get_fd(pkt_nm->sockfd, pkt_nm->if_name);
if (mtu == 0) {
ODP_ERR("Unable to read interface MTU\n");
goto error;
@@ -552,8 +552,8 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
/* Netmap requires that interface MTU size <= nm buf size */
if (mtu > nm_buf_size) {
- if (mtu_set_fd(pkt_nm->sockfd, pkt_nm->if_name,
- nm_buf_size)) {
+ if (_odp_mtu_set_fd(pkt_nm->sockfd, pkt_nm->if_name,
+ nm_buf_size)) {
ODP_ERR("Unable to set interface MTU\n");
goto error;
}
@@ -602,8 +602,8 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
}
/* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
- if (rss_conf_get_supported_fd(pkt_nm->sockfd, netdev,
- &hash_proto) == 0) {
+ if (_odp_rss_conf_get_supported_fd(pkt_nm->sockfd, netdev,
+ &hash_proto) == 0) {
ODP_DBG("RSS not supported\n");
pktio_entry->s.capa.max_input_queues = 1;
}
@@ -614,12 +614,12 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
if ((pkt_nm->if_flags & IFF_UP) == 0)
ODP_DBG("%s is down\n", pkt_nm->if_name);
- err = mac_addr_get_fd(pkt_nm->sockfd, netdev, pkt_nm->if_mac);
+ err = _odp_mac_addr_get_fd(pkt_nm->sockfd, netdev, pkt_nm->if_mac);
if (err)
goto error;
/* netmap uses only ethtool to get statistics counters */
- err = ethtool_stats_get_fd(pkt_nm->sockfd, pkt_nm->if_name, &cur_stats);
+ err = _odp_ethtool_stats_get_fd(pkt_nm->sockfd, pkt_nm->if_name, &cur_stats);
if (err) {
ODP_ERR("netmap pktio %s does not support statistics counters\n",
pkt_nm->if_name);
@@ -832,9 +832,9 @@ static inline int netmap_pkt_to_odp(pktio_entry_t *pktio_entry,
odp_prefetch(slot.buf);
if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry,
- (const uint8_t *)slot.buf, len,
- len, &pool, &parsed_hdr, true))
+ if (_odp_cls_classify_packet(pktio_entry,
+ (const uint8_t *)slot.buf, len,
+ len, &pool, &parsed_hdr, true))
goto fail;
}
@@ -852,9 +852,9 @@ static inline int netmap_pkt_to_odp(pktio_entry_t *pktio_entry,
if (pktio_cls_enabled(pktio_entry))
copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
else
- packet_parse_layer(pkt_hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(pkt_hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
packet_set_ts(pkt_hdr, ts);
}
@@ -1178,8 +1178,8 @@ static int netmap_promisc_mode_set(pktio_entry_t *pktio_entry,
return -1;
}
- return promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
- pkt_priv(pktio_entry)->if_name, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pkt_priv(pktio_entry)->if_name, enable);
}
static int netmap_promisc_mode_get(pktio_entry_t *pktio_entry)
@@ -1187,8 +1187,8 @@ static int netmap_promisc_mode_get(pktio_entry_t *pktio_entry)
if (pkt_priv(pktio_entry)->is_virtual)
return 0;
- return promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
- pkt_priv(pktio_entry)->if_name);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pkt_priv(pktio_entry)->if_name);
}
static int netmap_capability(pktio_entry_t *pktio_entry,
@@ -1206,9 +1206,9 @@ static int netmap_stats(pktio_entry_t *pktio_entry,
return 0;
}
- return sock_stats_fd(pktio_entry,
- stats,
- pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_fd(pktio_entry,
+ stats,
+ pkt_priv(pktio_entry)->sockfd);
}
static int netmap_stats_reset(pktio_entry_t *pktio_entry)
@@ -1219,17 +1219,17 @@ static int netmap_stats_reset(pktio_entry_t *pktio_entry)
return 0;
}
- return sock_stats_reset_fd(pktio_entry,
- pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_reset_fd(pktio_entry,
+ pkt_priv(pktio_entry)->sockfd);
}
static void netmap_print(pktio_entry_t *pktio_entry)
{
odp_pktin_hash_proto_t hash_proto;
- if (rss_conf_get_fd(pkt_priv(pktio_entry)->sockfd,
- pkt_priv(pktio_entry)->if_name, &hash_proto))
- rss_conf_print(&hash_proto);
+ if (_odp_rss_conf_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pkt_priv(pktio_entry)->if_name, &hash_proto))
+ _odp_rss_conf_print(&hash_proto);
}
static int netmap_init_global(void)
diff --git a/platform/linux-generic/pktio/null.c b/platform/linux-generic/pktio/null.c
index 864276db2..be7ba4991 100644
--- a/platform/linux-generic/pktio/null.c
+++ b/platform/linux-generic/pktio/null.c
@@ -192,7 +192,7 @@ static int null_link_info(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_link_
return 0;
}
-const pktio_if_ops_t null_pktio_ops = {
+const pktio_if_ops_t _odp_null_pktio_ops = {
.name = "null",
.print = NULL,
.init_global = null_init_global,
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index 69614a375..4ca0064a3 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -39,6 +39,7 @@
#include <odp_api.h>
#include <odp/api/plat/packet_inlines.h>
+#include <odp_classification_internal.h>
#include <odp_global_data.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
@@ -226,6 +227,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
{
int i;
struct pcap_pkthdr *hdr;
+ odp_pool_t new_pool;
const u_char *data;
odp_packet_t pkt;
odp_packet_hdr_t *pkt_hdr;
@@ -276,9 +278,32 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
break;
}
- packet_parse_layer(pkt_hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ if (pktio_cls_enabled(pktio_entry)) {
+ odp_packet_t new_pkt;
+
+ ret = _odp_cls_classify_packet(pktio_entry, data,
+ pkt_len, pkt_len,
+ &new_pool, pkt_hdr, true);
+ if (ret) {
+ odp_packet_free(pkt);
+ continue;
+ }
+ if (new_pool != pcap->pool) {
+ new_pkt = odp_packet_copy(pkt, new_pool);
+
+ odp_packet_free(pkt);
+
+ if (odp_unlikely(new_pkt == ODP_PACKET_INVALID))
+ continue;
+
+ pkt = new_pkt;
+ pkt_hdr = packet_hdr(new_pkt);
+ }
+ } else {
+ _odp_packet_parse_layer(pkt_hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
+ }
pktio_entry->s.stats.in_octets += pkt_hdr->frame_len;
packet_set_ts(pkt_hdr, ts);
@@ -472,7 +497,7 @@ static int pcapif_link_info(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_lin
return 0;
}
-const pktio_if_ops_t pcap_pktio_ops = {
+const pktio_if_ops_t _odp_pcap_pktio_ops = {
.name = "pcap",
.print = NULL,
.init_global = pcapif_init_global,
diff --git a/platform/linux-generic/pktio/pktio_common.c b/platform/linux-generic/pktio/pktio_common.c
index af2d6e397..4090f8063 100644
--- a/platform/linux-generic/pktio/pktio_common.c
+++ b/platform/linux-generic/pktio/pktio_common.c
@@ -48,10 +48,10 @@ static int sock_recv_mq_tmo_select(pktio_entry_t * const *entry,
return 0;
}
-int sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
- unsigned int num_q, unsigned int *from,
- odp_packet_t packets[], int num,
- uint64_t usecs, int *trial_successful)
+int _odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ unsigned int num_q, unsigned int *from,
+ odp_packet_t packets[], int num,
+ uint64_t usecs, int *trial_successful)
{
unsigned int i;
pktio_entry_t *entry[num_q];
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index a248c839e..9d8c5b0bc 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -137,11 +137,11 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
}
if_idx = ethreq.ifr_ifindex;
- err = mac_addr_get_fd(sockfd, netdev, pkt_sock->if_mac);
+ err = _odp_mac_addr_get_fd(sockfd, netdev, pkt_sock->if_mac);
if (err != 0)
goto error;
- pkt_sock->mtu = mtu_get_fd(sockfd, netdev);
+ pkt_sock->mtu = _odp_mtu_get_fd(sockfd, netdev);
if (!pkt_sock->mtu)
goto error;
@@ -156,8 +156,8 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
goto error;
}
- pktio_entry->s.stats_type = sock_stats_type_fd(pktio_entry,
- pkt_sock->sockfd);
+ pktio_entry->s.stats_type = _odp_sock_stats_type_fd(pktio_entry,
+ pkt_sock->sockfd);
if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
ODP_DBG("pktio: %s unsupported stats\n", pktio_entry->s.name);
@@ -264,10 +264,10 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (msgvec[i].msg_hdr.msg_iov->iov_len < pkt_len)
seg_len = msgvec[i].msg_hdr.msg_iov->iov_len;
- if (cls_classify_packet(pktio_entry, base, pkt_len,
- seg_len, &pool, pkt_hdr,
- true)) {
- ODP_ERR("cls_classify_packet failed");
+ if (_odp_cls_classify_packet(pktio_entry, base, pkt_len,
+ seg_len, &pool, pkt_hdr,
+ true)) {
+ ODP_ERR("_odp_cls_classify_packet failed");
odp_packet_free(pkt);
continue;
}
@@ -291,9 +291,9 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkt_hdr->input = pktio_entry->s.handle;
if (!pktio_cls_enabled(pktio_entry))
- packet_parse_layer(pkt_hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(pkt_hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
packet_set_ts(pkt_hdr, ts);
@@ -486,25 +486,25 @@ static int sock_mac_addr_get(pktio_entry_t *pktio_entry,
static int sock_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name, enable);
}
static int sock_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name);
}
static int sock_link_status(pktio_entry_t *pktio_entry)
{
- return link_status_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name);
}
static int sock_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
{
- return link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
}
static int sock_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
@@ -533,7 +533,7 @@ static int sock_stats(pktio_entry_t *pktio_entry,
return 0;
}
- return sock_stats_fd(pktio_entry, stats, pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_fd(pktio_entry, stats, pkt_priv(pktio_entry)->sockfd);
}
static int sock_stats_reset(pktio_entry_t *pktio_entry)
@@ -544,7 +544,7 @@ static int sock_stats_reset(pktio_entry_t *pktio_entry)
return 0;
}
- return sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd);
}
static int sock_init_global(void)
@@ -560,7 +560,7 @@ static int sock_init_global(void)
return 0;
}
-const pktio_if_ops_t sock_mmsg_pktio_ops = {
+const pktio_if_ops_t _odp_sock_mmsg_pktio_ops = {
.name = "socket",
.print = NULL,
.init_global = sock_init_global,
diff --git a/platform/linux-generic/pktio/socket_common.c b/platform/linux-generic/pktio/socket_common.c
index 703ffe6b6..b9bd97411 100644
--- a/platform/linux-generic/pktio/socket_common.c
+++ b/platform/linux-generic/pktio/socket_common.c
@@ -48,7 +48,7 @@ struct ethtool_link_settings {
* ODP_PACKET_SOCKET_MMAP:
* ODP_PACKET_NETMAP:
*/
-int mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[])
+int _odp_mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[])
{
struct ifreq ethreq;
int ret;
@@ -73,7 +73,7 @@ int mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[])
* ODP_PACKET_SOCKET_MMAP:
* ODP_PACKET_NETMAP:
*/
-uint32_t mtu_get_fd(int fd, const char *name)
+uint32_t _odp_mtu_get_fd(int fd, const char *name)
{
struct ifreq ifr;
int ret;
@@ -92,7 +92,7 @@ uint32_t mtu_get_fd(int fd, const char *name)
/*
* ODP_PACKET_NETMAP:
*/
-int mtu_set_fd(int fd, const char *name, int mtu)
+int _odp_mtu_set_fd(int fd, const char *name, int mtu)
{
struct ifreq ifr;
int ret;
@@ -115,7 +115,7 @@ int mtu_set_fd(int fd, const char *name, int mtu)
* ODP_PACKET_SOCKET_MMAP:
* ODP_PACKET_NETMAP:
*/
-int promisc_mode_set_fd(int fd, const char *name, int enable)
+int _odp_promisc_mode_set_fd(int fd, const char *name, int enable)
{
struct ifreq ifr;
int ret;
@@ -149,7 +149,7 @@ int promisc_mode_set_fd(int fd, const char *name, int enable)
* ODP_PACKET_SOCKET_MMAP:
* ODP_PACKET_NETMAP:
*/
-int promisc_mode_get_fd(int fd, const char *name)
+int _odp_promisc_mode_get_fd(int fd, const char *name)
{
struct ifreq ifr;
int ret;
@@ -166,7 +166,7 @@ int promisc_mode_get_fd(int fd, const char *name)
return !!(ifr.ifr_flags & IFF_PROMISC);
}
-int link_status_fd(int fd, const char *name)
+int _odp_link_status_fd(int fd, const char *name)
{
struct ifreq ifr;
int ret;
@@ -185,7 +185,7 @@ int link_status_fd(int fd, const char *name)
return ODP_PKTIO_LINK_STATUS_DOWN;
}
-int link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info)
+int _odp_link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info)
{
struct ethtool_link_settings hcmd = {.cmd = ETHTOOL_GLINKSETTINGS};
struct ethtool_link_settings *ecmd;
@@ -193,7 +193,7 @@ int link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info)
struct ifreq ifr;
int status;
- status = link_status_fd(fd, name);
+ status = _odp_link_status_fd(fd, name);
if (status < 0)
return -1;
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index be6969e5c..9fc992d78 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -210,9 +210,9 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
}
if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry, pkt_buf, pkt_len,
- pkt_len, &pool, &parsed_hdr,
- true)) {
+ if (_odp_cls_classify_packet(pktio_entry, pkt_buf, pkt_len,
+ pkt_len, &pool, &parsed_hdr,
+ true)) {
odp_packet_free(pkt);
tp_hdr->tp_status = TP_STATUS_KERNEL;
frame_num = next_frame_num;
@@ -276,9 +276,9 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
if (pktio_cls_enabled(pktio_entry))
copy_packet_cls_metadata(&parsed_hdr, hdr);
else
- packet_parse_layer(hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
packet_set_ts(hdr, ts);
@@ -611,7 +611,7 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
if (ret != 0)
goto error;
- pkt_sock->mtu = mtu_get_fd(pkt_sock->sockfd, netdev);
+ pkt_sock->mtu = _odp_mtu_get_fd(pkt_sock->sockfd, netdev);
if (!pkt_sock->mtu)
goto error;
@@ -631,7 +631,7 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
if (ret != 0)
goto error;
- ret = mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
+ ret = _odp_mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
if (ret != 0)
goto error;
@@ -642,13 +642,13 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
goto error;
}
- pktio_entry->s.stats_type = sock_stats_type_fd(pktio_entry,
- pkt_sock->sockfd);
+ pktio_entry->s.stats_type = _odp_sock_stats_type_fd(pktio_entry,
+ pkt_sock->sockfd);
if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
ODP_DBG("pktio: %s unsupported stats\n", pktio_entry->s.name);
- ret = sock_stats_reset_fd(pktio_entry,
- pkt_priv(pktio_entry)->sockfd);
+ ret = _odp_sock_stats_reset_fd(pktio_entry,
+ pkt_priv(pktio_entry)->sockfd);
if (ret != 0)
goto error;
@@ -788,8 +788,8 @@ static int sock_mmap_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
static uint32_t sock_mmap_mtu_get(pktio_entry_t *pktio_entry)
{
- return mtu_get_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name);
+ return _odp_mtu_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name);
}
static int sock_mmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
@@ -801,25 +801,25 @@ static int sock_mmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
static int sock_mmap_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name, enable);
}
static int sock_mmap_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name);
}
static int sock_mmap_link_status(pktio_entry_t *pktio_entry)
{
- return link_status_fd(pkt_priv(pktio_entry)->sockfd,
- pktio_entry->s.name);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->s.name);
}
static int sock_mmap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
{
- return link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
}
static int sock_mmap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
@@ -848,9 +848,9 @@ static int sock_mmap_stats(pktio_entry_t *pktio_entry,
return 0;
}
- return sock_stats_fd(pktio_entry,
- stats,
- pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_fd(pktio_entry,
+ stats,
+ pkt_priv(pktio_entry)->sockfd);
}
static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry)
@@ -861,8 +861,8 @@ static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry)
return 0;
}
- return sock_stats_reset_fd(pktio_entry,
- pkt_priv(pktio_entry)->sockfd);
+ return _odp_sock_stats_reset_fd(pktio_entry,
+ pkt_priv(pktio_entry)->sockfd);
}
static int sock_mmap_init_global(void)
@@ -878,7 +878,7 @@ static int sock_mmap_init_global(void)
return 0;
}
-const pktio_if_ops_t sock_mmap_pktio_ops = {
+const pktio_if_ops_t _odp_sock_mmap_pktio_ops = {
.name = "socket_mmap",
.print = NULL,
.init_global = sock_mmap_init_global,
diff --git a/platform/linux-generic/pktio/stats/ethtool_stats.c b/platform/linux-generic/pktio/stats/ethtool_stats.c
index f4f03c162..0bd4f2c61 100644
--- a/platform/linux-generic/pktio/stats/ethtool_stats.c
+++ b/platform/linux-generic/pktio/stats/ethtool_stats.c
@@ -164,7 +164,7 @@ static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
return 0;
}
-int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
+int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
{
struct ifreq ifr;
diff --git a/platform/linux-generic/pktio/stats/packet_io_stats.c b/platform/linux-generic/pktio/stats/packet_io_stats.c
index abeec0799..b79cf17aa 100644
--- a/platform/linux-generic/pktio/stats/packet_io_stats.c
+++ b/platform/linux-generic/pktio/stats/packet_io_stats.c
@@ -10,7 +10,7 @@
#include <string.h>
-int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
+int _odp_sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
{
int err = 0;
odp_pktio_stats_t cur_stats;
@@ -24,11 +24,11 @@ int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
- (void)ethtool_stats_get_fd(fd,
- pktio_entry->s.name,
- &cur_stats);
+ (void)_odp_ethtool_stats_get_fd(fd,
+ pktio_entry->s.name,
+ &cur_stats);
} else if (pktio_entry->s.stats_type == STATS_SYSFS) {
- err = sysfs_stats(pktio_entry, &cur_stats);
+ err = _odp_sysfs_stats(pktio_entry, &cur_stats);
if (err != 0)
ODP_ERR("stats error\n");
}
@@ -40,9 +40,9 @@ int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
return err;
}
-int sock_stats_fd(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats,
- int fd)
+int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd)
{
odp_pktio_stats_t cur_stats;
int ret = 0;
@@ -52,11 +52,11 @@ int sock_stats_fd(pktio_entry_t *pktio_entry,
memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
- (void)ethtool_stats_get_fd(fd,
- pktio_entry->s.name,
- &cur_stats);
+ (void)_odp_ethtool_stats_get_fd(fd,
+ pktio_entry->s.name,
+ &cur_stats);
} else if (pktio_entry->s.stats_type == STATS_SYSFS) {
- sysfs_stats(pktio_entry, &cur_stats);
+ _odp_sysfs_stats(pktio_entry, &cur_stats);
}
stats->in_octets = cur_stats.in_octets -
@@ -82,14 +82,14 @@ int sock_stats_fd(pktio_entry_t *pktio_entry,
return ret;
}
-pktio_stats_type_t sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd)
+pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd)
{
odp_pktio_stats_t cur_stats;
- if (!ethtool_stats_get_fd(fd, pktio_entry->s.name, &cur_stats))
+ if (!_odp_ethtool_stats_get_fd(fd, pktio_entry->s.name, &cur_stats))
return STATS_ETHTOOL;
- if (!sysfs_stats(pktio_entry, &cur_stats))
+ if (!_odp_sysfs_stats(pktio_entry, &cur_stats))
return STATS_SYSFS;
return STATS_UNSUPPORTED;
diff --git a/platform/linux-generic/pktio/stats/sysfs_stats.c b/platform/linux-generic/pktio/stats/sysfs_stats.c
index 2de2bb131..474586e19 100644
--- a/platform/linux-generic/pktio/stats/sysfs_stats.c
+++ b/platform/linux-generic/pktio/stats/sysfs_stats.c
@@ -41,8 +41,8 @@ static int sysfs_get_val(const char *fname, uint64_t *val)
return 0;
}
-int sysfs_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats)
+int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
{
char fname[256];
const char *dev = pktio_entry->s.name;
diff --git a/platform/linux-generic/pktio/tap.c b/platform/linux-generic/pktio/tap.c
index 4ea95ad30..42693bd6d 100644
--- a/platform/linux-generic/pktio/tap.c
+++ b/platform/linux-generic/pktio/tap.c
@@ -167,10 +167,10 @@ static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
goto tap_err;
}
- mtu = mtu_get_fd(skfd, devname + 4);
+ mtu = _odp_mtu_get_fd(skfd, devname + 4);
if (mtu == 0) {
__odp_errno = errno;
- ODP_ERR("mtu_get_fd failed: %s\n", strerror(errno));
+ ODP_ERR("_odp_mtu_get_fd failed: %s\n", strerror(errno));
goto sock_err;
}
@@ -279,9 +279,9 @@ static odp_packet_t pack_odp_pkt(pktio_entry_t *pktio_entry, const void *data,
uint16_t frame_offset = pktio_entry->s.pktin_frame_offset;
if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry, data, len, len,
- &pkt_priv(pktio_entry)->pool,
- &parsed_hdr, true)) {
+ if (_odp_cls_classify_packet(pktio_entry, data, len, len,
+ &pkt_priv(pktio_entry)->pool,
+ &parsed_hdr, true)) {
return ODP_PACKET_INVALID;
}
}
@@ -305,9 +305,9 @@ static odp_packet_t pack_odp_pkt(pktio_entry_t *pktio_entry, const void *data,
if (pktio_cls_enabled(pktio_entry))
copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
else
- packet_parse_layer(pkt_hdr,
- pktio_entry->s.config.parser.layer,
- pktio_entry->s.in_chksums);
+ _odp_packet_parse_layer(pkt_hdr,
+ pktio_entry->s.config.parser.layer,
+ pktio_entry->s.in_chksums);
packet_set_ts(pkt_hdr, ts);
pkt_hdr->input = pktio_entry->s.handle;
@@ -430,8 +430,8 @@ static uint32_t tap_mtu_get(pktio_entry_t *pktio_entry)
{
uint32_t ret;
- ret = mtu_get_fd(pkt_priv(pktio_entry)->skfd,
- pktio_entry->s.name + 4);
+ ret = _odp_mtu_get_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->s.name + 4);
if (ret > 0)
pkt_priv(pktio_entry)->mtu = ret;
@@ -441,14 +441,14 @@ static uint32_t tap_mtu_get(pktio_entry_t *pktio_entry)
static int tap_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pkt_priv(pktio_entry)->skfd,
- pktio_entry->s.name + 4, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->s.name + 4, enable);
}
static int tap_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pkt_priv(pktio_entry)->skfd,
- pktio_entry->s.name + 4);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->s.name + 4);
}
static int tap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
@@ -469,13 +469,13 @@ static int tap_mac_addr_set(pktio_entry_t *pktio_entry, const void *mac_addr)
static int tap_link_status(pktio_entry_t *pktio_entry)
{
- return link_status_fd(pkt_priv(pktio_entry)->skfd,
- pktio_entry->s.name + 4);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->s.name + 4);
}
static int tap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
{
- return link_info_fd(pkt_priv(pktio_entry)->skfd, pktio_entry->s.name + 4, info);
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->skfd, pktio_entry->s.name + 4, info);
}
static int tap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
@@ -497,7 +497,7 @@ static int tap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
return 0;
}
-const pktio_if_ops_t tap_pktio_ops = {
+const pktio_if_ops_t _odp_tap_pktio_ops = {
.name = "tap",
.print = NULL,
.init_global = NULL,
diff --git a/platform/linux-generic/test/Makefile.am b/platform/linux-generic/test/Makefile.am
index c32bd274b..d66f5ece3 100644
--- a/platform/linux-generic/test/Makefile.am
+++ b/platform/linux-generic/test/Makefile.am
@@ -9,15 +9,21 @@ endif
SUBDIRS =
-if test_vald
-TESTS = validation/api/pktio/pktio_run.sh \
- validation/api/pktio/pktio_run_tap.sh \
- validation/api/shmem/shmem_linux$(EXEEXT) \
- ipsec/ipsec_api_example.sh \
+if WITH_EXAMPLES
+TESTS = ipsec/ipsec_api_example.sh \
ipsec/ipsec_crypto_example.sh
dist_check_SCRIPTS = ipsec/ipsec_api_example.sh \
ipsec/ipsec_crypto_example.sh
+else
+TESTS =
+dist_check_SCRIPTS =
+endif
+
+if test_vald
+TESTS += validation/api/pktio/pktio_run.sh \
+ validation/api/pktio/pktio_run_tap.sh \
+ validation/api/shmem/shmem_linux$(EXEEXT)
test_SCRIPTS = $(dist_check_SCRIPTS)
diff --git a/platform/linux-generic/test/example/Makefile.am b/platform/linux-generic/test/example/Makefile.am
index 2e6a7ce6c..22b254cd7 100644
--- a/platform/linux-generic/test/example/Makefile.am
+++ b/platform/linux-generic/test/example/Makefile.am
@@ -1,4 +1,6 @@
SUBDIRS = \
+ classifier \
+ generator \
l2fwd_simple \
l3fwd \
packet \
diff --git a/platform/linux-generic/test/example/classifier/Makefile.am b/platform/linux-generic/test/example/classifier/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/classifier/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/classifier/pktio_env b/platform/linux-generic/test/example/classifier/pktio_env
new file mode 100644
index 000000000..429fef9ae
--- /dev/null
+++ b/platform/linux-generic/test/example/classifier/pktio_env
@@ -0,0 +1,44 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create one pcap interface
+# which uses udp64.pcap to inject traffic.
+#
+# Network set-up
+# +---------+ +-----------+
+# |pcap intf| IF0<---> | Classifier|
+# +--------- +-----------+
+#
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP in=${PCAP_IN}"
+
+IF0=pcap:in=${PCAP_IN}
+TIME_OUT_VAL=10
+CPASS_COUNT_ARG1=100
+CPASS_COUNT_ARG2=100
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0;
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/generator/Makefile.am b/platform/linux-generic/test/example/generator/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/generator/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/generator/pktio_env b/platform/linux-generic/test/example/generator/pktio_env
new file mode 100644
index 000000000..06af667e8
--- /dev/null
+++ b/platform/linux-generic/test/example/generator/pktio_env
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# Generator uses null interfaces to validate udp mode.
+#
+# Network set-up
+# IF0 ---> null:0
+
+IF0=null:0
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/switch/pktio_env b/platform/linux-generic/test/example/switch/pktio_env
index cbf5c4ddc..78201cec7 100644
--- a/platform/linux-generic/test/example/switch/pktio_env
+++ b/platform/linux-generic/test/example/switch/pktio_env
@@ -35,6 +35,7 @@ validate_result()
do
if [ `stat -c %s pcapout${i}.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
echo "Error: Output file $i size not matching"
+ exit 1
fi
rm -f pcapout${i}.pcap
done
diff --git a/scripts/ci/build_riscv64.sh b/scripts/ci/build_riscv64.sh
new file mode 100755
index 000000000..474633727
--- /dev/null
+++ b/scripts/ci/build_riscv64.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -e
+
+export TARGET_ARCH=riscv64-linux-gnu
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CC="clang --target=${TARGET_ARCH}"
+ export CXX="clang++ --target=${TARGET_ARCH}"
+else
+ export CC="${TARGET_ARCH}-gcc"
+ export CXX="${TARGET_ARCH}-g++"
+fi
+
+# Use target libraries
+export PKG_CONFIG_PATH=
+export PKG_CONFIG_LIBDIR=/usr/lib/${TARGET_ARCH}/pkgconfig
+
+cd "$(dirname "$0")"/../..
+./bootstrap
+./configure \
+ --host=${TARGET_ARCH} --build=x86_64-linux-gnu \
+ --prefix=/opt/odp \
+ ${CONF}
+
+make clean
+
+make -j $(nproc)
+
+make install
+
+pushd ${HOME}
+${CC} ${CFLAGS} ${OLDPWD}/example/hello/odp_hello.c -o odp_hello_inst_dynamic `PKG_CONFIG_PATH=/opt/odp/lib/pkgconfig:${PKG_CONFIG_PATH} pkg-config --cflags --libs libodp-linux`
+popd
diff --git a/test/common/test_packet_parser.h b/test/common/test_packet_parser.h
index 78a42c391..745a620c3 100644
--- a/test/common/test_packet_parser.h
+++ b/test/common/test_packet_parser.h
@@ -63,6 +63,19 @@ static const uint8_t test_packet_ipv4_udp[] = {
0x0E, 0x0F, 0x10, 0x11
};
+/* ETH SNAP IPv4 UDP */
+static const uint8_t test_packet_snap_ipv4_udp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x00, 0x36, 0xAA, 0xAA,
+ 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x40, 0x11,
+ 0xF7, 0x6B, 0xC0, 0xA8, 0x01, 0x02, 0xC0, 0xA8,
+ 0x01, 0x01, 0x00, 0x3F, 0x00, 0x3F, 0x00, 0x1A,
+ 0x33, 0x97, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x10, 0x11
+};
+
/* VLAN IPv4 UDP
* - type 0x8100, tag 23
*/
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index 57e38a2da..4696b2c5e 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -23,7 +23,8 @@ TESTSCRIPTS = odp_l2fwd_run.sh \
odp_packet_gen_run.sh \
odp_sched_latency_run.sh \
odp_sched_pktio_run.sh \
- odp_scheduling_run.sh
+ odp_scheduling_run.sh \
+ odp_timer_perf_run.sh
if ODP_PKTIO_PCAP
TESTSCRIPTS += odp_pktio_ordered_run.sh
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 4cd2e4900..7cbc2cfab 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -41,6 +42,12 @@
/* Maximum pktio index table size */
#define MAX_PKTIO_INDEXES 1024
+/* Default vector size */
+#define DEFAULT_VEC_SIZE MAX_PKT_BURST
+
+/* Default vector timeout */
+#define DEFAULT_VEC_TMO ODP_TIME_MSEC_IN_NS
+
/* Packet input mode */
typedef enum pktin_mode_t {
DIRECT_RECV,
@@ -93,6 +100,10 @@ typedef struct {
int burst_rx; /* Receive burst size */
int pool_per_if; /* Create pool per interface */
uint32_t num_pkt; /* Number of packets per pool */
+ bool vector_mode; /* Vector mode enabled */
+ uint32_t num_vec; /* Number of vectors per pool */
+ uint64_t vec_tmo_ns; /* Vector formation timeout in ns */
+ uint32_t vec_size; /* Vector size */
int verbose; /* Verbose output */
int promisc_mode; /* Promiscuous mode enabled */
} appl_args_t;
@@ -264,9 +275,9 @@ static inline int event_queue_send(odp_queue_t queue, odp_packet_t *pkt_tbl,
while (sent < pkts) {
ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
- if (ret < 0) {
- ODPH_ERR("Failed to send packet as events\n");
- break;
+ if (odp_unlikely(ret <= 0)) {
+ if (ret < 0 || odp_atomic_load_u32(&gbl_args->exit_threads))
+ break;
}
sent += ret;
@@ -310,6 +321,187 @@ static inline int copy_packets(odp_packet_t *pkt_tbl, int pkts)
}
/*
+ * Packet IO worker thread using scheduled queues and vector mode.
+ *
+ * arg thread arguments of type 'thread_args_t *'
+ */
+static int run_worker_sched_mode_vector(void *arg)
+{
+ int thr;
+ int i;
+ int pktio, num_pktio;
+ uint16_t max_burst;
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
+ odp_queue_t tx_queue[MAX_PKTIOS];
+ thread_args_t *thr_args = arg;
+ stats_t *stats = &thr_args->stats;
+ int use_event_queue = gbl_args->appl.out_mode;
+ pktin_mode_t in_mode = gbl_args->appl.in_mode;
+
+ thr = odp_thread_id();
+ max_burst = gbl_args->appl.burst_rx;
+
+ if (gbl_args->appl.num_groups) {
+ odp_thrmask_t mask;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_groups; i++) {
+ if (odp_schedule_group_join(thr_args->group[i],
+ &mask)) {
+ ODPH_ERR("Join failed\n");
+ return -1;
+ }
+ }
+ }
+
+ num_pktio = thr_args->num_pktio;
+
+ if (num_pktio > MAX_PKTIOS) {
+ ODPH_ERR("Too many pktios %i\n", num_pktio);
+ return -1;
+ }
+
+ for (pktio = 0; pktio < num_pktio; pktio++) {
+ tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
+ pktout[pktio] = thr_args->pktio[pktio].pktout;
+ }
+
+ printf("[%02i] PKTIN_SCHED_%s_VECTOR, %s\n", thr,
+ (in_mode == SCHED_PARALLEL) ? "PARALLEL" :
+ ((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+
+ odp_barrier_wait(&gbl_args->init_barrier);
+
+ /* Loop packets */
+ while (!odp_atomic_load_u32(&gbl_args->exit_threads)) {
+ odp_event_t ev_tbl[MAX_PKT_BURST];
+ int pktvs;
+
+ pktvs = odp_schedule_multi_no_wait(NULL, ev_tbl, max_burst);
+
+ if (pktvs <= 0)
+ continue;
+
+ for (i = 0; i < pktvs; i++) {
+ odp_packet_vector_t pkt_vec;
+ odp_packet_t *pkt_tbl;
+ unsigned int tx_drops;
+ int src_idx, dst_idx;
+ int pkts, sent;
+
+ ODPH_ASSERT(odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR);
+ pkt_vec = odp_packet_vector_from_event(ev_tbl[i]);
+ pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
+
+ if (odp_unlikely(gbl_args->appl.extra_feat)) {
+ if (gbl_args->appl.packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
+ if (gbl_args->appl.chksum)
+ chksum_insert(pkt_tbl, pkts);
+
+ if (gbl_args->appl.error_check) {
+ int rx_drops;
+
+ /* Drop packets with errors */
+ rx_drops = drop_err_pkts(pkt_tbl, pkts);
+
+ if (odp_unlikely(rx_drops)) {
+ stats->s.rx_drops += rx_drops;
+ if (pkts == rx_drops) {
+ odp_packet_vector_free(pkt_vec);
+ continue;
+ }
+
+ pkts -= rx_drops;
+ odp_packet_vector_size_set(pkt_vec, pkts);
+ }
+ }
+ }
+
+ /* packets from the same queue are from the same interface */
+ src_idx = odp_packet_input_index(pkt_tbl[0]);
+ ODPH_ASSERT(src_idx >= 0);
+ dst_idx = gbl_args->dst_port_from_idx[src_idx];
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ if (odp_unlikely(use_event_queue)) {
+ odp_event_t event = odp_packet_vector_to_event(pkt_vec);
+
+ sent = odp_queue_enq(tx_queue[dst_idx], event);
+ sent = odp_likely(sent == 0) ? pkts : 0;
+ } else {
+ sent = odp_pktout_send(pktout[dst_idx], pkt_tbl, pkts);
+ sent = odp_unlikely(sent < 0) ? 0 : sent;
+ }
+
+ tx_drops = pkts - sent;
+ if (odp_unlikely(tx_drops)) {
+ int j;
+
+ stats->s.tx_drops += tx_drops;
+ /* Drop rejected packets */
+ for (j = sent; j < pkts; j++)
+ odp_packet_free(pkt_tbl[j]);
+ }
+
+ /* Free packet vector if sending failed or in direct mode. */
+ if (tx_drops || !use_event_queue)
+ odp_packet_vector_free(pkt_vec);
+
+ stats->s.packets += pkts;
+ }
+ }
+
+ /*
+ * Free prefetched packets before entering the thread barrier.
+ * Such packets can block sending of later packets in other threads
+ * that then would never enter the thread barrier and we would
+ * end up in a dead-lock.
+ */
+ odp_schedule_pause();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ /* Wait until pktio devices are stopped */
+ odp_barrier_wait(&gbl_args->term_barrier);
+
+ /* Free remaining events in queues */
+ odp_schedule_resume();
+ while (1) {
+ odp_event_t ev;
+
+ ev = odp_schedule(NULL,
+ odp_schedule_wait_time(ODP_TIME_SEC_IN_NS));
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+
+ return 0;
+}
+
+/*
* Packet IO worker thread using scheduled queues
*
* arg thread arguments of type 'thread_args_t *'
@@ -721,6 +913,57 @@ static int run_worker_direct_mode(void *arg)
return 0;
}
+static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_pool_t vec_pool,
+ odp_pktio_capability_t pktio_capa)
+{
+ uint64_t vec_tmo_ns;
+ uint32_t vec_size;
+
+ pktin_param->vector.enable = true;
+ pktin_param->vector.pool = vec_pool;
+
+ if (gbl_args->appl.vec_size == 0)
+ vec_size = DEFAULT_VEC_SIZE;
+ else
+ vec_size = gbl_args->appl.vec_size;
+
+ if (vec_size > pktio_capa.vector.max_size ||
+ vec_size < pktio_capa.vector.min_size) {
+ if (gbl_args->appl.vec_size == 0) {
+ vec_size = (vec_size > pktio_capa.vector.max_size) ?
+ pktio_capa.vector.max_size : pktio_capa.vector.min_size;
+ printf("\nWarning: Modified vector size to %u\n\n", vec_size);
+ } else {
+ ODPH_ERR("Error: Invalid pktio vector size %u, valid range [%u, %u]\n",
+ vec_size, pktio_capa.vector.min_size, pktio_capa.vector.max_size);
+ return -1;
+ }
+ }
+ pktin_param->vector.max_size = vec_size;
+
+ if (gbl_args->appl.vec_tmo_ns == 0)
+ vec_tmo_ns = DEFAULT_VEC_TMO;
+ else
+ vec_tmo_ns = gbl_args->appl.vec_tmo_ns;
+
+ if (vec_tmo_ns > pktio_capa.vector.max_tmo_ns ||
+ vec_tmo_ns < pktio_capa.vector.min_tmo_ns) {
+ if (gbl_args->appl.vec_tmo_ns == 0) {
+ vec_tmo_ns = (vec_tmo_ns > pktio_capa.vector.max_tmo_ns) ?
+ pktio_capa.vector.max_tmo_ns : pktio_capa.vector.min_tmo_ns;
+ printf("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns);
+ } else {
+ ODPH_ERR("Error: Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
+ ", %" PRIu64 "]\n", vec_tmo_ns,
+ pktio_capa.vector.min_tmo_ns, pktio_capa.vector.max_tmo_ns);
+ return -1;
+ }
+ }
+ pktin_param->vector.max_tmo_ns = vec_tmo_ns;
+
+ return 0;
+}
+
/*
* Create a pktio handle, optionally associating a default input queue.
*
@@ -730,8 +973,8 @@ static int run_worker_direct_mode(void *arg)
*
* Returns 0 on success, -1 on failure
*/
-static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
- odp_pool_t pool, odp_schedule_group_t group)
+static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_pool_t pool,
+ odp_pool_t vec_pool, odp_schedule_group_t group)
{
odp_pktio_t pktio;
odp_pktio_param_t pktio_param;
@@ -852,6 +1095,15 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
pktout_param.op_mode = mode_tx;
pktout_param.num_queues = num_tx;
+ if (gbl_args->appl.vector_mode) {
+ if (!pktio_capa.vector.supported) {
+ ODPH_ERR("Error: packet vector input not supported %s\n", dev);
+ return -1;
+ }
+ if (set_pktin_vector_params(&pktin_param, vec_pool, pktio_capa))
+ return -1;
+ }
+
if (odp_pktin_queue_config(pktio, &pktin_param)) {
ODPH_ERR("Error: input queue config failed %s\n", dev);
return -1;
@@ -1243,14 +1495,22 @@ static void usage(char *progname)
" -p, --packet_copy 0: Don't copy packet (default)\n"
" 1: Create and send copy of the received packet.\n"
" Free the original packet.\n"
- " -y, --pool_per_if 0: Share a single pool between all interfaces (default)\n"
+ " -y, --pool_per_if Create a packet (and packet vector) pool per interface.\n"
+ " 0: Share a single pool between all interfaces (default)\n"
" 1: Create a pool per interface\n"
" -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
" the maximum capability. Use 0 for the default.\n"
+ " -u, --vector_mode Enable vector mode.\n"
+ " Supported only with scheduler packet input modes (1-3).\n"
+ " -w, --num_vec <num> Number of vectors per pool.\n"
+ " Default is num_pkts divided by vec_size.\n"
+ " -x, --vec_size <num> Vector size (default %i).\n"
+ " -z, --vec_tmo_ns <ns> Vector timeout in ns (default %llu ns).\n"
" -P, --promisc_mode Enable promiscuous mode.\n"
" -v, --verbose Verbose output.\n"
" -h, --help Display help and exit.\n\n"
- "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
+ "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS, DEFAULT_VEC_SIZE,
+ DEFAULT_VEC_TMO
);
}
@@ -1286,13 +1546,17 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"packet_copy", required_argument, NULL, 'p'},
{"pool_per_if", required_argument, NULL, 'y'},
{"num_pkt", required_argument, NULL, 'n'},
+ {"num_vec", required_argument, NULL, 'w'},
+ {"vec_size", required_argument, NULL, 'x'},
+ {"vec_tmo_ns", required_argument, NULL, 'z'},
+ {"vector_mode", no_argument, NULL, 'u'},
{"promisc_mode", no_argument, NULL, 'P'},
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:p:y:n:Pvh";
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:p:y:n:w:x:z:uPvh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1308,6 +1572,10 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->pool_per_if = 0;
appl_args->num_pkt = 0;
appl_args->promisc_mode = 0;
+ appl_args->vector_mode = 0;
+ appl_args->num_vec = 0;
+ appl_args->vec_size = 0;
+ appl_args->vec_tmo_ns = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1451,6 +1719,18 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'P':
appl_args->promisc_mode = 1;
break;
+ case 'u':
+ appl_args->vector_mode = 1;
+ break;
+ case 'w':
+ appl_args->num_vec = atoi(optarg);
+ break;
+ case 'x':
+ appl_args->vec_size = atoi(optarg);
+ break;
+ case 'z':
+ appl_args->vec_tmo_ns = atoi(optarg);
+ break;
case 'v':
appl_args->verbose = 1;
break;
@@ -1574,6 +1854,54 @@ static void create_groups(int num, odp_schedule_group_t *group)
}
}
+static int set_vector_pool_params(odp_pool_param_t *params, odp_pool_capability_t pool_capa)
+{
+ uint32_t num_vec, vec_size;
+
+ if (gbl_args->appl.vec_size == 0)
+ vec_size = DEFAULT_VEC_SIZE;
+ else
+ vec_size = gbl_args->appl.vec_size;
+
+ ODPH_ASSERT(pool_capa.vector.max_size > 0);
+ if (vec_size > pool_capa.vector.max_size) {
+ if (gbl_args->appl.vec_size == 0) {
+ vec_size = pool_capa.vector.max_size;
+ printf("\nWarning: Vector size reduced to %u\n\n", vec_size);
+ } else {
+ ODPH_ERR("Error: Vector size too big %u. Maximum is %u.\n",
+ vec_size, pool_capa.vector.max_size);
+ return -1;
+ }
+ }
+
+ if (gbl_args->appl.num_vec == 0) {
+ uint32_t num_pkt = gbl_args->appl.num_pkt ?
+ gbl_args->appl.num_pkt : DEFAULT_NUM_PKT;
+
+ num_vec = (num_pkt + vec_size - 1) / vec_size;
+ } else {
+ num_vec = gbl_args->appl.num_vec;
+ }
+
+ if (pool_capa.vector.max_num && num_vec > pool_capa.vector.max_num) {
+ if (gbl_args->appl.num_vec == 0) {
+ num_vec = pool_capa.vector.max_num;
+ printf("\nWarning: number of vectors reduced to %u\n\n", num_vec);
+ } else {
+ ODPH_ERR("Error: Too many vectors (%u) per pool. Maximum is %u.\n",
+ num_vec, pool_capa.vector.max_num);
+ return -1;
+ }
+ }
+
+ params->vector.num = num_vec;
+ params->vector.max_size = vec_size;
+ params->type = ODP_POOL_VECTOR;
+
+ return 0;
+}
+
/*
* L2 forwarding main function
*/
@@ -1591,13 +1919,13 @@ int main(int argc, char *argv[])
odp_pool_param_t params;
int ret;
stats_t *stats[MAX_WORKERS];
- int if_count, num_pools;
+ int if_count, num_pools, num_vec_pools;
int (*thr_run_func)(void *);
odp_instance_t instance;
int num_groups;
odp_schedule_group_t group[MAX_PKTIOS];
- odp_pool_t pool_tbl[MAX_PKTIOS];
- odp_pool_t pool;
+ odp_pool_t pool_tbl[MAX_PKTIOS], vec_pool_tbl[MAX_PKTIOS];
+ odp_pool_t pool, vec_pool;
odp_init_t init;
odp_pool_capability_t pool_capa;
uint32_t pkt_len, num_pkt;
@@ -1755,6 +2083,41 @@ int main(int argc, char *argv[])
odp_pool_print(pool_tbl[i]);
}
+ /* Create vector pool */
+ num_vec_pools = 0;
+ if (gbl_args->appl.vector_mode) {
+ if (!sched_mode(gbl_args->appl.in_mode)) {
+ ODPH_ERR("Error: vector mode only supports scheduler pktin modes (1-3)\n");
+ return -1;
+ }
+
+ num_vec_pools = gbl_args->appl.pool_per_if ? if_count : 1;
+ if (num_vec_pools > (int)pool_capa.vector.max_pools) {
+ ODPH_ERR("Error: Too many vector pools %i\n", num_vec_pools);
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ if (set_vector_pool_params(&params, pool_capa))
+ return -1;
+
+ printf("Vectors per pool: %u\n", params.vector.num);
+ printf("Vector size: %u\n", params.vector.max_size);
+ printf("\n\n");
+
+ for (i = 0; i < num_vec_pools; i++) {
+ vec_pool_tbl[i] = odp_pool_create("vector pool", &params);
+
+ if (vec_pool_tbl[i] == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: vector pool create failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_pool_print(vec_pool_tbl[i]);
+ }
+ }
+
if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES)
ODPH_DBG("Warning: max pktio index (%u) is too large\n",
odp_pktio_max_index());
@@ -1772,6 +2135,7 @@ int main(int argc, char *argv[])
}
pool = pool_tbl[0];
+ vec_pool = vec_pool_tbl[0];
for (i = 0; i < if_count; ++i) {
const char *dev = gbl_args->appl.if_names[i];
@@ -1791,10 +2155,12 @@ int main(int argc, char *argv[])
/* Round robin pktios to groups */
grp = group[i % num_groups];
- if (gbl_args->appl.pool_per_if)
+ if (gbl_args->appl.pool_per_if) {
pool = pool_tbl[i];
+ vec_pool = vec_pool_tbl[i];
+ }
- if (create_pktio(dev, i, num_rx, num_tx, pool, grp))
+ if (create_pktio(dev, i, num_rx, num_tx, pool, vec_pool, grp))
exit(EXIT_FAILURE);
/* Save interface ethernet address */
@@ -1837,7 +2203,8 @@ int main(int argc, char *argv[])
else if (gbl_args->appl.in_mode == PLAIN_QUEUE)
thr_run_func = run_worker_plain_queue_mode;
else /* SCHED_PARALLEL / SCHED_ATOMIC / SCHED_ORDERED */
- thr_run_func = run_worker_sched_mode;
+ thr_run_func = gbl_args->appl.vector_mode ?
+ run_worker_sched_mode_vector : run_worker_sched_mode;
/* Create worker threads */
memset(thr_param, 0, sizeof(thr_param));
@@ -1927,6 +2294,13 @@ int main(int argc, char *argv[])
}
}
+ for (i = 0; i < num_vec_pools; i++) {
+ if (odp_pool_destroy(vec_pool_tbl[i])) {
+ ODPH_ERR("Error: vector pool destroy failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+
if (odp_shm_free(shm)) {
ODPH_ERR("Error: shm free\n");
exit(EXIT_FAILURE);
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index 241b3828a..c6b659aac 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -367,12 +367,12 @@ static int test_schedule(int thr, test_globals_t *globals)
odp_queue_t src_queue;
odp_queue_t dst_queue;
uint64_t latency;
- uint32_t i;
+ uint64_t i;
test_event_t *event;
test_stat_t *stats;
int dst_idx, change_queue;
int warm_up_rounds = globals->args.warm_up_rounds;
- uint64_t test_rounds = globals->args.test_rounds * 1000000;
+ uint64_t test_rounds = globals->args.test_rounds * (uint64_t)1000000;
memset(&globals->core_stat[thr], 0, sizeof(core_stat_t));
globals->core_stat[thr].prio[HI_PRIO].min = UINT64_MAX;
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index 29f6e4ac1..fa93aa18d 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -1117,7 +1117,7 @@ int main(int argc, char **argv)
odp_sys_info_print();
if (global->test_options.ctx_size) {
- uint64_t size = global->test_options.ctx_size *
+ uint64_t size = (uint64_t)global->test_options.ctx_size *
global->test_options.tot_queue;
global->ctx_shm = odp_shm_reserve("queue contexts", size,
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh
index 577922767..082dc4521 100755
--- a/test/performance/odp_scheduling_run.sh
+++ b/test/performance/odp_scheduling_run.sh
@@ -9,7 +9,6 @@
# launched by 'make check'
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-ret=0
ALL=0
run()
@@ -19,9 +18,10 @@ run()
$TEST_DIR/odp_scheduling${EXEEXT} -c $1
- if [ $? -ne 0 ]; then
+ RET_VAL=$?
+ if [ $RET_VAL -ne 0 ]; then
echo odp_scheduling FAILED
- exit $?
+ exit $RET_VAL
fi
}
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
index 443c53a2b..be0fc363a 100644
--- a/test/performance/odp_timer_perf.c
+++ b/test/performance/odp_timer_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019, Nokia
+/* Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -15,9 +15,11 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-#define MAX_TIMER_POOLS 32
-#define MAX_TIMERS 10000
-#define START_NS (100 * ODP_TIME_MSEC_IN_NS)
+#define MODE_SCHED_OVERH 0
+#define MODE_SET_CANCEL 1
+#define MAX_TIMER_POOLS 32
+#define MAX_TIMERS 10000
+#define START_NS (100 * ODP_TIME_MSEC_IN_NS)
typedef struct test_options_t {
uint32_t num_cpu;
@@ -26,6 +28,8 @@ typedef struct test_options_t {
uint64_t res_ns;
uint64_t period_ns;
int shared;
+ int mode;
+ uint64_t test_rounds;
} test_options_t;
@@ -42,28 +46,59 @@ typedef struct test_stat_t {
uint64_t nsec;
uint64_t cycles;
+ uint64_t cancels;
+ uint64_t sets;
+
time_stat_t before;
time_stat_t after;
} test_stat_t;
+typedef struct test_stat_sum_t {
+ uint64_t rounds;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles;
+
+ uint64_t cancels;
+ uint64_t sets;
+
+ time_stat_t before;
+ time_stat_t after;
+
+ double time_ave;
+ uint32_t num;
+
+} test_stat_sum_t;
+
typedef struct thread_arg_t {
void *global;
+ int worker_idx;
} thread_arg_t;
typedef struct timer_ctx_t {
uint64_t target_ns;
+ uint32_t tp_idx;
+ uint32_t timer_idx;
int last;
} timer_ctx_t;
+typedef struct timer_pool_t {
+ odp_timer_pool_t tp;
+ uint64_t start_tick;
+ uint64_t period_tick;
+
+} timer_pool_t;
+
typedef struct test_global_t {
test_options_t test_options;
odp_atomic_u32_t exit_test;
+ odp_atomic_u32_t timers_started;
odp_barrier_t barrier;
odp_cpumask_t cpumask;
- odp_timer_pool_t tp[MAX_TIMER_POOLS];
+ timer_pool_t timer_pool[MAX_TIMER_POOLS];
odp_pool_t pool[MAX_TIMER_POOLS];
odp_queue_t queue[MAX_TIMER_POOLS];
odp_timer_t timer[MAX_TIMER_POOLS][MAX_TIMERS];
@@ -71,6 +106,7 @@ typedef struct test_global_t {
odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ test_stat_sum_t stat_sum;
} test_global_t;
@@ -92,6 +128,11 @@ static void print_usage(void)
" tested only with single CPU. Default: 1\n"
" 0: Private timer pools\n"
" 1: Shared timer pools\n"
+ " -m, --mode Select test mode. Default: 0\n"
+ " 0: Measure odp_schedule() overhead when using timers\n"
+ " 1: Measure timer set + cancel performance\n"
+ " -R, --rounds Number of test rounds in timer set + cancel test.\n"
+ " Default: 100000\n"
" -h, --help This help\n"
"\n");
}
@@ -109,11 +150,13 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{"res_ns", required_argument, NULL, 'r'},
{"period_ns", required_argument, NULL, 'p'},
{"shared", required_argument, NULL, 's'},
+ {"mode", required_argument, NULL, 'm'},
+ {"rounds", required_argument, NULL, 'R'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:n:t:r:p:s:h";
+ static const char *shortopts = "+c:n:t:r:p:s:m:R:h";
test_options->num_cpu = 1;
test_options->num_tp = 1;
@@ -121,6 +164,8 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->res_ns = 10 * ODP_TIME_MSEC_IN_NS;
test_options->period_ns = 100 * ODP_TIME_MSEC_IN_NS;
test_options->shared = 1;
+ test_options->mode = 0;
+ test_options->test_rounds = 100000;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -147,6 +192,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 's':
test_options->shared = atoi(optarg);
break;
+ case 'm':
+ test_options->mode = atoi(optarg);
+ break;
+ case 'R':
+ test_options->test_rounds = atoll(optarg);
+ break;
case 'h':
/* fall through */
default:
@@ -157,7 +208,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
}
if (test_options->num_timer > MAX_TIMERS) {
- printf("Error: too many timers. Max %u\n", MAX_TIMERS);
+ ODPH_ERR("Too many timers. Max %u\n", MAX_TIMERS);
ret = -1;
}
@@ -173,20 +224,19 @@ static int set_num_cpu(test_global_t *global)
/* One thread used for the main thread */
if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
- printf("Error: Too many workers. Maximum is %i.\n",
- ODP_THREAD_COUNT_MAX - 1);
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
return -1;
}
ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
if (num_cpu && ret != num_cpu) {
- printf("Error: Too many workers. Max supported %i\n.", ret);
+ ODPH_ERR("Too many workers. Max supported %i\n.", ret);
return -1;
}
if (shared == 0 && num_cpu != 1) {
- printf("Error: Private pool test supports only single CPU\n.");
+ ODPH_ERR("Private pool test supports only single CPU\n.");
return -1;
}
@@ -207,44 +257,49 @@ static int set_num_cpu(test_global_t *global)
static int create_timer_pools(test_global_t *global)
{
odp_timer_capability_t timer_capa;
+ odp_timer_res_capability_t timer_res_capa;
odp_timer_pool_param_t timer_pool_param;
odp_timer_pool_t tp;
odp_queue_param_t queue_param;
odp_queue_t queue;
odp_pool_param_t pool_param;
odp_pool_t pool;
- uint64_t max_tmo_ns;
+ uint64_t max_tmo_ns, min_tmo_ns;
uint32_t i, j;
uint32_t max_timers;
+ int priv;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
uint32_t num_tp = test_options->num_tp;
uint32_t num_timer = test_options->num_timer;
uint64_t res_ns = test_options->res_ns;
uint64_t period_ns = test_options->period_ns;
- int priv;
+ int mode = test_options->mode;
+ char tp_name[] = "timer_pool_00";
max_tmo_ns = START_NS + (num_timer * period_ns);
+ min_tmo_ns = START_NS / 2;
priv = 0;
if (test_options->shared == 0)
priv = 1;
printf("\nTimer performance test\n");
+ printf(" mode %i\n", mode);
printf(" num cpu %u\n", num_cpu);
printf(" private pool %i\n", priv);
printf(" num timer pool %u\n", num_tp);
printf(" num timer %u\n", num_timer);
printf(" resolution %" PRIu64 " nsec\n", res_ns);
printf(" period %" PRIu64 " nsec\n", period_ns);
- printf(" first timer at %.2f sec\n", (double)START_NS /
- ODP_TIME_SEC_IN_NS);
- printf(" test duration %.2f sec\n", (double)max_tmo_ns /
- ODP_TIME_SEC_IN_NS);
- printf("\n");
+ printf(" first timer at %.2f sec\n", (double)START_NS / ODP_TIME_SEC_IN_NS);
+ if (mode == MODE_SCHED_OVERH)
+ printf(" test duration %.2f sec\n", (double)max_tmo_ns / ODP_TIME_SEC_IN_NS);
+ else
+ printf(" test rounds %" PRIu64 "\n", test_options->test_rounds);
for (i = 0; i < MAX_TIMER_POOLS; i++) {
- global->tp[i] = ODP_TIMER_POOL_INVALID;
+ global->timer_pool[i].tp = ODP_TIMER_POOL_INVALID;
global->pool[i] = ODP_POOL_INVALID;
global->queue[i] = ODP_QUEUE_INVALID;
@@ -253,41 +308,47 @@ static int create_timer_pools(test_global_t *global)
}
if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
- printf("Error: timer capability failed\n");
+ ODPH_ERR("Timer capability failed\n");
+ return -1;
+ }
+
+ memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t));
+ timer_res_capa.res_ns = res_ns;
+ if (odp_timer_res_capability(ODP_CLOCK_CPU, &timer_res_capa)) {
+ ODPH_ERR("Timer resolution capability failed\n");
return -1;
}
if (res_ns < timer_capa.max_res.res_ns) {
- printf("Error: too high resolution\n");
+ ODPH_ERR("Too high resolution\n");
return -1;
}
- if (START_NS < timer_capa.max_res.min_tmo) {
- printf("Error: too short min timeout\n");
+ if (min_tmo_ns < timer_res_capa.min_tmo) {
+ ODPH_ERR("Too short min timeout\n");
return -1;
}
- if (max_tmo_ns > timer_capa.max_res.max_tmo) {
- printf("Error: too long max timeout\n");
+ if (max_tmo_ns > timer_res_capa.max_tmo) {
+ ODPH_ERR("Too long max timeout\n");
return -1;
}
max_timers = timer_capa.max_timers;
if (max_timers && num_timer > max_timers) {
- printf("Error: too many timers (max %u)\n", max_timers);
+ ODPH_ERR("Too many timers (max %u)\n", max_timers);
return -1;
}
if (num_tp > timer_capa.max_pools) {
- printf("Error: too many timer pools (max %u)\n",
- timer_capa.max_pools);
+ ODPH_ERR("Too many timer pools (max %u)\n", timer_capa.max_pools);
return -1;
}
memset(&timer_pool_param, 0, sizeof(odp_timer_pool_param_t));
timer_pool_param.res_ns = res_ns;
- timer_pool_param.min_tmo = START_NS;
+ timer_pool_param.min_tmo = min_tmo_ns;
timer_pool_param.max_tmo = max_tmo_ns;
timer_pool_param.num_timers = num_timer;
timer_pool_param.priv = priv;
@@ -304,30 +365,43 @@ static int create_timer_pools(test_global_t *global)
queue_param.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < num_tp; i++) {
- tp = odp_timer_pool_create(NULL, &timer_pool_param);
- global->tp[i] = tp;
+ if (num_tp < 100) {
+ tp_name[11] = '0' + i / 10;
+ tp_name[12] = '0' + i % 10;
+ }
+
+ tp = odp_timer_pool_create(tp_name, &timer_pool_param);
+ global->timer_pool[i].tp = tp;
if (tp == ODP_TIMER_POOL_INVALID) {
- printf("Error: timer pool create failed (%u)\n", i);
+ ODPH_ERR("Timer pool create failed (%u)\n", i);
return -1;
}
- pool = odp_pool_create(NULL, &pool_param);
+ pool = odp_pool_create(tp_name, &pool_param);
global->pool[i] = pool;
if (pool == ODP_POOL_INVALID) {
- printf("Error: pool create failed (%u)\n", i);
+ ODPH_ERR("Pool create failed (%u)\n", i);
return -1;
}
- queue = odp_queue_create(NULL, &queue_param);
+ queue = odp_queue_create(tp_name, &queue_param);
global->queue[i] = queue;
if (queue == ODP_QUEUE_INVALID) {
- printf("Error: queue create failed (%u)\n", i);
+ ODPH_ERR("Queue create failed (%u)\n", i);
return -1;
}
+
+ global->timer_pool[i].period_tick = odp_timer_ns_to_tick(tp,
+ test_options->period_ns);
+ global->timer_pool[i].start_tick = odp_timer_ns_to_tick(tp, START_NS);
}
odp_timer_pool_start();
+ printf(" start %" PRIu64 " tick\n", global->timer_pool[0].start_tick);
+ printf(" period %" PRIu64 " ticks\n", global->timer_pool[0].period_tick);
+ printf("\n");
+
return 0;
}
@@ -350,7 +424,7 @@ static int set_timers(test_global_t *global)
max_tmo_ns = START_NS + (num_timer * period_ns);
for (i = 0; i < num_tp; i++) {
- tp = global->tp[i];
+ tp = global->timer_pool[i].tp;
pool = global->pool[i];
queue = global->queue[i];
@@ -371,6 +445,8 @@ static int set_timers(test_global_t *global)
ctx->last = 1;
ctx->target_ns = time_ns + nsec;
+ ctx->tp_idx = i;
+ ctx->timer_idx = j;
timeout = odp_timeout_alloc(pool);
ev = odp_timeout_to_event(timeout);
@@ -385,14 +461,13 @@ static int set_timers(test_global_t *global)
&ev);
if (status != ODP_TIMER_SUCCESS) {
- printf("Error: Timer set %i/%i (ret %i)\n",
- i, j, status);
+ ODPH_ERR("Timer set %i/%i (ret %i)\n", i, j, status);
return -1;
}
}
if (odp_timer_pool_info(tp, &timer_pool_info)) {
- printf("Error: timer pool info failed\n");
+ ODPH_ERR("Timer pool info failed\n");
return -1;
}
@@ -427,7 +502,8 @@ static int destroy_timer_pool(test_global_t *global)
ev = odp_timer_free(timer);
if (ev != ODP_EVENT_INVALID) {
- printf("Event from timer free %i/%i\n", i, j);
+ if (test_options->mode == MODE_SCHED_OVERH)
+ printf("Event from timer free %i/%i\n", i, j);
odp_event_free(ev);
}
}
@@ -440,7 +516,7 @@ static int destroy_timer_pool(test_global_t *global)
if (pool != ODP_POOL_INVALID)
odp_pool_destroy(pool);
- tp = global->tp[i];
+ tp = global->timer_pool[i].tp;
if (tp != ODP_TIMER_POOL_INVALID)
odp_timer_pool_destroy(tp);
}
@@ -448,7 +524,7 @@ static int destroy_timer_pool(test_global_t *global)
return 0;
}
-static int test_worker(void *arg)
+static int sched_mode_worker(void *arg)
{
int thr;
uint32_t exit_test;
@@ -545,6 +621,171 @@ static int test_worker(void *arg)
return ret;
}
+static void cancel_timers(test_global_t *global, uint32_t worker_idx)
+{
+ uint32_t i, j;
+ odp_timer_t timer;
+ odp_event_t ev;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint32_t num_worker = test_options->num_cpu;
+
+ for (i = 0; i < num_tp; i++) {
+ for (j = 0; j < num_timer; j++) {
+ if ((j % num_worker) != worker_idx)
+ continue;
+
+ timer = global->timer[i][j];
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ if (odp_timer_cancel(timer, &ev) == 0)
+ odp_event_free(ev);
+ }
+ }
+}
+
+static int set_cancel_mode_worker(void *arg)
+{
+ uint64_t tick, start_tick, period_tick, nsec;
+ uint64_t c1, c2, diff;
+ int thr, status;
+ uint32_t i, j, worker_idx;
+ odp_event_t ev;
+ odp_time_t t1, t2;
+ odp_timer_t timer;
+ odp_timer_pool_t tp;
+ odp_timeout_t tmo;
+ thread_arg_t *thread_arg = arg;
+ test_global_t *global = thread_arg->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_tp = test_options->num_tp;
+ uint32_t num_timer = test_options->num_timer;
+ uint32_t num_worker = test_options->num_cpu;
+ int ret = 0;
+ int started = 0;
+ uint64_t test_rounds = test_options->test_rounds;
+ uint64_t num_tmo = 0;
+ uint64_t num_cancel = 0;
+ uint64_t num_set = 0;
+
+ thr = odp_thread_id();
+ worker_idx = thread_arg->worker_idx;
+ t1 = ODP_TIME_NULL;
+ c1 = 0;
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (odp_unlikely(ev != ODP_EVENT_INVALID)) {
+ /* Timeout, set timer again. When start_tick is large enough, this should
+ * not happen. */
+ timer_ctx_t *ctx;
+
+ tmo = odp_timeout_from_event(ev);
+ ctx = odp_timeout_user_ptr(tmo);
+ i = ctx->tp_idx;
+ j = ctx->timer_idx;
+ timer = global->timer[i][j];
+ start_tick = global->timer_pool[i].start_tick;
+ period_tick = global->timer_pool[i].period_tick;
+ tick = start_tick + j * period_tick;
+
+ status = odp_timer_set_rel(timer, tick, &ev);
+ num_tmo++;
+ num_set++;
+
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer set (tmo) failed (ret %i)\n", status);
+ ret = -1;
+ break;
+ }
+
+ continue;
+ }
+
+ if (odp_unlikely(odp_atomic_load_u32(&global->exit_test)))
+ break;
+
+ if (odp_unlikely(started == 0)) {
+ /* Run schedule loop while waiting for timers to be created */
+ if (odp_atomic_load_acq_u32(&global->timers_started) == 0)
+ continue;
+
+ /* Start measurements */
+ started = 1;
+ t1 = odp_time_local();
+ c1 = odp_cpu_cycles();
+ }
+
+ /* Cancel and set timers again */
+ for (i = 0; i < num_tp; i++) {
+ tp = global->timer_pool[i].tp;
+ if (tp == ODP_TIMER_POOL_INVALID)
+ continue;
+
+ start_tick = global->timer_pool[i].start_tick;
+ period_tick = global->timer_pool[i].period_tick;
+
+ tick = odp_timer_current_tick(tp) + start_tick;
+
+ for (j = 0; j < num_timer; j++) {
+ if ((j % num_worker) != worker_idx)
+ continue;
+
+ timer = global->timer[i][j];
+ if (timer == ODP_TIMER_INVALID)
+ continue;
+
+ status = odp_timer_cancel(timer, &ev);
+ num_cancel++;
+
+ if (status < 0)
+ continue;
+
+ status = odp_timer_set_abs(timer, tick + j * period_tick, &ev);
+ num_set++;
+
+ if (status != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer (%u/%u) set failed (ret %i)\n", i, j,
+ status);
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ if (test_rounds) {
+ test_rounds--;
+ if (test_rounds == 0)
+ break;
+ }
+ }
+
+ t2 = odp_time_local();
+ c2 = odp_cpu_cycles();
+ nsec = odp_time_diff_ns(t2, t1);
+ diff = odp_cpu_cycles_diff(c2, c1);
+
+ /* Cancel all timers that belong to this thread */
+ cancel_timers(global, worker_idx);
+
+ /* Update stats */
+ global->stat[thr].events = num_tmo;
+ global->stat[thr].rounds = test_options->test_rounds - test_rounds;
+ global->stat[thr].nsec = nsec;
+ global->stat[thr].cycles = diff;
+
+ global->stat[thr].cancels = num_cancel;
+ global->stat[thr].sets = num_set;
+
+ return ret;
+}
+
static int start_workers(test_global_t *global, odp_instance_t instance)
{
odph_thread_common_param_t thr_common;
@@ -561,7 +802,11 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
thr_common.cpumask = &global->cpumask;
for (i = 0; i < num_cpu; i++) {
- thr_param[i].start = test_worker;
+ if (test_options->mode == MODE_SCHED_OVERH)
+ thr_param[i].start = sched_mode_worker;
+ else
+ thr_param[i].start = set_cancel_mode_worker;
+
thr_param[i].arg = &global->thread_arg[i];
thr_param[i].thr_type = ODP_THREAD_WORKER;
}
@@ -570,30 +815,57 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
num_cpu);
if (ret != num_cpu) {
- printf("Error: thread create failed %i\n", ret);
+ ODPH_ERR("Thread create failed %i\n", ret);
return -1;
}
return 0;
}
-static void print_stat(test_global_t *global)
+static void sum_stat(test_global_t *global)
{
int i;
- time_stat_t before, after;
- double time_ave = 0.0;
+ test_stat_sum_t *sum = &global->stat_sum;
+
+ memset(sum, 0, sizeof(test_stat_sum_t));
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds == 0)
+ continue;
+
+ sum->num++;
+ sum->events += global->stat[i].events;
+ sum->rounds += global->stat[i].rounds;
+ sum->cycles += global->stat[i].cycles;
+ sum->nsec += global->stat[i].nsec;
+ sum->cancels += global->stat[i].cancels;
+ sum->sets += global->stat[i].sets;
+
+ sum->before.num += global->stat[i].before.num;
+ sum->before.sum_ns += global->stat[i].before.sum_ns;
+ sum->after.num += global->stat[i].after.num;
+ sum->after.sum_ns += global->stat[i].after.sum_ns;
+
+ if (global->stat[i].before.max_ns > sum->before.max_ns)
+ sum->before.max_ns = global->stat[i].before.max_ns;
+
+ if (global->stat[i].after.max_ns > sum->after.max_ns)
+ sum->after.max_ns = global->stat[i].after.max_ns;
+ }
+
+ if (sum->num)
+ sum->time_ave = ((double)sum->nsec / sum->num) / ODP_TIME_SEC_IN_NS;
+}
+
+static void print_stat_sched_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
double round_ave = 0.0;
double before_ave = 0.0;
double after_ave = 0.0;
- uint64_t time_sum = 0;
- uint64_t event_sum = 0;
- uint64_t round_sum = 0;
- uint64_t cycle_sum = 0;
int num = 0;
- memset(&before, 0, sizeof(time_stat_t));
- memset(&after, 0, sizeof(time_stat_t));
-
printf("\n");
printf("RESULTS - schedule() cycles per thread:\n");
printf("----------------------------------------------\n");
@@ -604,53 +876,69 @@ static void print_stat(test_global_t *global)
if ((num % 10) == 0)
printf("\n ");
- printf("%6.1f ", (double)global->stat[i].cycles /
- global->stat[i].rounds);
+ printf("%6.1f ", (double)global->stat[i].cycles / global->stat[i].rounds);
num++;
}
}
printf("\n\n");
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
- event_sum += global->stat[i].events;
- round_sum += global->stat[i].rounds;
- cycle_sum += global->stat[i].cycles;
- time_sum += global->stat[i].nsec;
- before.num += global->stat[i].before.num;
- before.sum_ns += global->stat[i].before.sum_ns;
- after.num += global->stat[i].after.num;
- after.sum_ns += global->stat[i].after.sum_ns;
+ if (sum->num)
+ round_ave = (double)sum->rounds / sum->num;
- if (global->stat[i].before.max_ns > before.max_ns)
- before.max_ns = global->stat[i].before.max_ns;
+ if (sum->before.num)
+ before_ave = (double)sum->before.sum_ns / sum->before.num;
- if (global->stat[i].after.max_ns > after.max_ns)
- after.max_ns = global->stat[i].after.max_ns;
- }
+ if (sum->after.num)
+ after_ave = (double)sum->after.sum_ns / sum->after.num;
- if (num) {
- time_ave = ((double)time_sum / num) / ODP_TIME_SEC_IN_NS;
- round_ave = (double)round_sum / num;
- }
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" events: %" PRIu64 "\n", sum->events);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" ave rounds per sec: %.2fM\n", (round_ave / sum->time_ave) / 1000000.0);
+ printf(" num before: %" PRIu64 "\n", sum->before.num);
+ printf(" ave before: %.1f nsec\n", before_ave);
+ printf(" max before: %" PRIu64 " nsec\n", sum->before.max_ns);
+ printf(" num after: %" PRIu64 "\n", sum->after.num);
+ printf(" ave after: %.1f nsec\n", after_ave);
+ printf(" max after: %" PRIu64 " nsec\n", sum->after.max_ns);
+ printf("\n");
+}
- if (before.num)
- before_ave = (double)before.sum_ns / before.num;
+static void print_stat_set_cancel_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double set_ave = 0.0;
+ int num = 0;
- if (after.num)
- after_ave = (double)after.sum_ns / after.num;
+ printf("\n");
+ printf("RESULTS - timer cancel + set cycles per thread:\n");
+ printf("-----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
- printf("TOTAL (%i workers)\n", num);
- printf(" events: %" PRIu64 "\n", event_sum);
- printf(" ave time: %.2f sec\n", time_ave);
- printf(" ave rounds per sec: %.2fM\n",
- (round_ave / time_ave) / 1000000.0);
- printf(" num before: %" PRIu64 "\n", before.num);
- printf(" ave before: %.1f nsec\n", before_ave);
- printf(" max before: %" PRIu64 " nsec\n", before.max_ns);
- printf(" num after: %" PRIu64 "\n", after.num);
- printf(" ave after: %.1f nsec\n", after_ave);
- printf(" max after: %" PRIu64 " nsec\n", after.max_ns);
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles / global->stat[i].sets);
+ num++;
+ }
+ }
+
+ if (sum->num)
+ set_ave = (double)sum->sets / sum->num;
+
+ printf("\n\n");
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" rounds: %" PRIu64 "\n", sum->rounds);
+ printf(" timeouts: %" PRIu64 "\n", sum->events);
+ printf(" timer cancels: %" PRIu64 "\n", sum->cancels);
+ printf(" cancels failed: %" PRIu64 "\n", sum->cancels - sum->sets);
+ printf(" timer sets: %" PRIu64 "\n", sum->sets);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" cancel+set per cpu: %.2fM per sec\n", (set_ave / sum->time_ave) / 1000000.0);
printf("\n");
}
@@ -667,14 +955,17 @@ int main(int argc, char **argv)
odp_init_t init;
test_global_t *global;
test_options_t *test_options;
- int i, shared;
+ int i, shared, mode;
global = &test_global;
memset(global, 0, sizeof(test_global_t));
odp_atomic_init_u32(&global->exit_test, 0);
+ odp_atomic_init_u32(&global->timers_started, 0);
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
global->thread_arg[i].global = global;
+ global->thread_arg[i].worker_idx = i;
+ }
signal(SIGINT, sig_handler);
@@ -683,6 +974,7 @@ int main(int argc, char **argv)
test_options = &global->test_options;
shared = test_options->shared;
+ mode = test_options->mode;
/* List features not to be used */
odp_init_param_init(&init);
@@ -694,13 +986,13 @@ int main(int argc, char **argv)
/* Init ODP before calling anything else */
if (odp_init_global(&instance, &init, NULL)) {
- printf("Error: Global init failed.\n");
+ ODPH_ERR("Global init failed.\n");
return -1;
}
/* Init this thread */
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- printf("Error: Local init failed.\n");
+ ODPH_ERR("Local init failed.\n");
return -1;
}
@@ -729,12 +1021,21 @@ int main(int argc, char **argv)
/* Set timers. Force workers to exit on failure. */
if (set_timers(global))
odp_atomic_add_u32(&global->exit_test, MAX_TIMER_POOLS);
+ else
+ odp_atomic_store_rel_u32(&global->timers_started, 1);
if (!shared) {
/* Test private pools on the master thread */
- if (test_worker(&global->thread_arg[0])) {
- printf("Error: test loop failed\n");
- return -1;
+ if (mode == MODE_SCHED_OVERH) {
+ if (sched_mode_worker(&global->thread_arg[0])) {
+ ODPH_ERR("Sched_mode_worker failed\n");
+ return -1;
+ }
+ } else {
+ if (set_cancel_mode_worker(&global->thread_arg[0])) {
+ ODPH_ERR("Set_cancel_mode_worker failed\n");
+ return -1;
+ }
}
} else {
/* Wait workers to exit */
@@ -742,17 +1043,22 @@ int main(int argc, char **argv)
global->test_options.num_cpu);
}
- print_stat(global);
+ sum_stat(global);
+
+ if (mode == MODE_SCHED_OVERH)
+ print_stat_sched_mode(global);
+ else
+ print_stat_set_cancel_mode(global);
destroy_timer_pool(global);
if (odp_term_local()) {
- printf("Error: term local failed.\n");
+ ODPH_ERR("Term local failed.\n");
return -1;
}
if (odp_term_global(instance)) {
- printf("Error: term global failed.\n");
+ ODPH_ERR("Term global failed.\n");
return -1;
}
diff --git a/test/performance/odp_timer_perf_run.sh b/test/performance/odp_timer_perf_run.sh
new file mode 100755
index 000000000..4f1cd6977
--- /dev/null
+++ b/test/performance/odp_timer_perf_run.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+echo odp_timer_perf: odp_schedule overhead mode
+echo ===============================================
+
+$TEST_DIR/odp_timer_perf${EXEEXT} -m 0 -c 1 -s 0
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_timer_perf -m 0: FAILED
+ exit $RET_VAL
+fi
+
+echo odp_timer_perf: timer set + cancel mode
+echo ===============================================
+
+$TEST_DIR/odp_timer_perf${EXEEXT} -m 1 -c 1 -t 10 -R 50 -s 0
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ]; then
+ echo odp_timer_perf -m 1: FAILED
+ exit $RET_VAL
+fi
+
+exit 0
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index 914e0257e..36484295f 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -16,8 +16,8 @@
#define ADD_SUB_CNT 5
-#define CNT 10
-#define U32_INIT_VAL (1UL << 10)
+#define CNT 50000
+#define U32_INIT_VAL (1UL << 28)
#define U64_INIT_VAL (1ULL << 33)
#define U32_MAGIC 0xa23f65b2
#define U64_MAGIC 0xf2e1c5430cb6a52e
@@ -45,6 +45,8 @@ typedef struct {
uint32_t g_num_threads;
uint32_t g_iterations;
uint32_t g_verbose;
+
+ odp_barrier_t global_barrier;
} global_shared_mem_t;
/* Per-thread memory */
@@ -95,6 +97,8 @@ static void test_atomic_inc_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_inc_u32(&global_mem->a32u);
}
@@ -103,6 +107,8 @@ static void test_atomic_inc_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_inc_u64(&global_mem->a64u);
}
@@ -111,6 +117,8 @@ static void test_atomic_dec_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_dec_u32(&global_mem->a32u);
}
@@ -119,6 +127,8 @@ static void test_atomic_dec_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_dec_u64(&global_mem->a64u);
}
@@ -127,6 +137,8 @@ static void test_atomic_fetch_inc_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_inc_u32(&global_mem->a32u);
}
@@ -135,6 +147,8 @@ static void test_atomic_fetch_inc_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_inc_u64(&global_mem->a64u);
}
@@ -143,6 +157,8 @@ static void test_atomic_fetch_dec_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_dec_u32(&global_mem->a32u);
}
@@ -151,6 +167,8 @@ static void test_atomic_fetch_dec_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_dec_u64(&global_mem->a64u);
}
@@ -159,6 +177,8 @@ static void test_atomic_add_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_add_u32(&global_mem->a32u, ADD_SUB_CNT);
}
@@ -167,6 +187,8 @@ static void test_atomic_add_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_add_u64(&global_mem->a64u, ADD_SUB_CNT);
}
@@ -175,6 +197,8 @@ static void test_atomic_sub_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
}
@@ -183,6 +207,8 @@ static void test_atomic_sub_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
}
@@ -191,6 +217,8 @@ static void test_atomic_fetch_add_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_add_u32(&global_mem->a32u, ADD_SUB_CNT);
}
@@ -199,6 +227,8 @@ static void test_atomic_fetch_add_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_add_u64(&global_mem->a64u, ADD_SUB_CNT);
}
@@ -207,6 +237,8 @@ static void test_atomic_fetch_sub_32(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
}
@@ -215,6 +247,8 @@ static void test_atomic_fetch_sub_64(void)
{
int i;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++)
odp_atomic_fetch_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
}
@@ -224,6 +258,8 @@ static void test_atomic_min_32(void)
int i;
uint32_t tmp;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_fetch_dec_u32(&global_mem->a32u);
odp_atomic_min_u32(&global_mem->a32u_min, tmp);
@@ -235,6 +271,8 @@ static void test_atomic_min_64(void)
int i;
uint64_t tmp;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_fetch_dec_u64(&global_mem->a64u);
odp_atomic_min_u64(&global_mem->a64u_min, tmp);
@@ -246,6 +284,8 @@ static void test_atomic_max_32(void)
int i;
uint32_t tmp;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_fetch_inc_u32(&global_mem->a32u);
odp_atomic_max_u32(&global_mem->a32u_max, tmp);
@@ -257,6 +297,8 @@ static void test_atomic_max_64(void)
int i;
uint64_t tmp;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_fetch_inc_u64(&global_mem->a64u);
odp_atomic_max_u64(&global_mem->a64u_max, tmp);
@@ -269,6 +311,8 @@ static void test_atomic_cas_inc_32(void)
uint32_t old;
odp_atomic_u32_t *a32u = &global_mem->a32u;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
old = odp_atomic_load_u32(a32u);
@@ -283,6 +327,8 @@ static void test_atomic_cas_dec_32(void)
uint32_t old;
odp_atomic_u32_t *a32u = &global_mem->a32u;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
old = odp_atomic_load_u32(a32u);
@@ -297,6 +343,8 @@ static void test_atomic_cas_inc_64(void)
uint64_t old;
odp_atomic_u64_t *a64u = &global_mem->a64u;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
old = odp_atomic_load_u64(a64u);
@@ -311,6 +359,8 @@ static void test_atomic_cas_dec_64(void)
uint64_t old;
odp_atomic_u64_t *a64u = &global_mem->a64u;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
old = odp_atomic_load_u64(a64u);
@@ -326,6 +376,8 @@ static void test_atomic_xchg_32(void)
odp_atomic_u32_t *a32u = &global_mem->a32u;
odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
new = odp_atomic_fetch_inc_u32(a32u);
old = odp_atomic_xchg_u32(a32u_xchg, new);
@@ -347,6 +399,8 @@ static void test_atomic_xchg_64(void)
odp_atomic_u64_t *a64u = &global_mem->a64u;
odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
new = odp_atomic_fetch_inc_u64(a64u);
old = odp_atomic_xchg_u64(a64u_xchg, new);
@@ -370,6 +424,8 @@ static void test_atomic_non_relaxed_32(void)
odp_atomic_u32_t *a32u_max = &global_mem->a32u_max;
odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_load_acq_u32(a32u);
odp_atomic_store_rel_u32(a32u, tmp);
@@ -405,6 +461,8 @@ static void test_atomic_non_relaxed_64(void)
odp_atomic_u64_t *a64u_max = &global_mem->a64u_max;
odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
+ odp_barrier_wait(&global_mem->global_barrier);
+
for (i = 0; i < CNT; i++) {
tmp = odp_atomic_load_acq_u64(a64u);
odp_atomic_store_rel_u64(a64u, tmp);
@@ -604,6 +662,8 @@ static int atomic_init(odp_instance_t *inst)
printf("Num of threads used = %" PRIu32 "\n",
global_mem->g_num_threads);
+ odp_barrier_init(&global_mem->global_barrier, global_mem->g_num_threads);
+
return ret;
}
diff --git a/test/validation/api/classification/classification.c b/test/validation/api/classification/classification.c
index 59d9ec00e..ef35377dc 100644
--- a/test/validation/api/classification/classification.c
+++ b/test/validation/api/classification/classification.c
@@ -23,6 +23,11 @@ odp_suiteinfo_t classification_suites[] = {
.init_func = classification_suite_init,
.term_func = classification_suite_term,
},
+ { .name = "classification packet vector tests",
+ .testinfo_tbl = classification_suite_pktv,
+ .init_func = classification_suite_pktv_init,
+ .term_func = classification_suite_pktv_term,
+ },
ODP_SUITE_INFO_NULL,
};
diff --git a/test/validation/api/classification/classification.h b/test/validation/api/classification/classification.h
index c16b340c9..4c8613555 100644
--- a/test/validation/api/classification/classification.h
+++ b/test/validation/api/classification/classification.h
@@ -21,6 +21,7 @@
#define CLS_DEFAULT_DPORT 2048
#define CLS_DEFAULT_DMAC {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
#define CLS_DEFAULT_SMAC {0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c}
+#define CLS_MAGIC_VAL 0xdeadbeef
/* Config values for Error CoS */
#define TEST_ERROR 1
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index bb5d94ff1..b7a4b84ba 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -24,6 +25,8 @@ static uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 0, 0, 100
};
+#define ODP_GTPU_UDP_PORT 2152
+
odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
odp_bool_t cls_enable)
{
@@ -45,7 +48,7 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
if (pktio == ODP_PKTIO_INVALID) {
ret = odp_pool_destroy(pool);
if (ret)
- fprintf(stderr, "unable to destroy pool.\n");
+ ODPH_ERR("Unable to destroy pool\n");
return ODP_PKTIO_INVALID;
}
@@ -55,12 +58,12 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
pktin_param.hash_enable = false;
if (odp_pktin_queue_config(pktio, &pktin_param)) {
- fprintf(stderr, "pktin queue config failed.\n");
+ ODPH_ERR("Pktin queue config failed\n");
return ODP_PKTIO_INVALID;
}
if (odp_pktout_queue_config(pktio, NULL)) {
- fprintf(stderr, "pktout queue config failed.\n");
+ ODPH_ERR("Pktout queue config failed\n");
return ODP_PKTIO_INVALID;
}
@@ -72,7 +75,7 @@ int stop_pktio(odp_pktio_t pktio)
odp_event_t ev;
if (odp_pktio_stop(pktio)) {
- fprintf(stderr, "pktio stop failed.\n");
+ ODPH_ERR("Pktio stop failed\n");
return -1;
}
@@ -95,6 +98,9 @@ int cls_pkt_set_seq(odp_packet_t pkt)
uint32_t offset;
odph_ipv4hdr_t *ip;
odph_tcphdr_t *tcp;
+ odph_udphdr_t *udp;
+ uint16_t port = 0;
+ uint32_t hlen = 0;
int status;
data.magic = DATA_MAGIC;
@@ -104,10 +110,30 @@ int cls_pkt_set_seq(odp_packet_t pkt)
offset = odp_packet_l4_offset(pkt);
CU_ASSERT_FATAL(offset != ODP_PACKET_OFFSET_INVALID);
- if (ip->proto == ODPH_IPPROTO_UDP)
- status = odp_packet_copy_from_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ if (ip->proto == ODPH_IPPROTO_IGMP) {
+ status = odp_packet_copy_from_mem(pkt, offset + ODP_IGMP_HLEN,
+ sizeof(data), &data);
+ } else if (ip->proto == ODPH_IPPROTO_ICMPV4) {
+ status = odp_packet_copy_from_mem(pkt, offset + ODPH_ICMPHDR_LEN,
+ sizeof(data), &data);
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ /* Create some invalid SCTP packet for testing under the assumption that
+ * no implementation really cares
+ */
+ status = odp_packet_copy_from_mem(pkt, offset + ODPH_SCTPHDR_LEN,
sizeof(data), &data);
- else {
+ } else if (ip->proto == ODPH_IPPROTO_UDP) {
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ port = odp_be_to_cpu_16(udp->dst_port);
+ if (port == ODP_GTPU_UDP_PORT) {
+ hlen = offset + ODPH_UDPHDR_LEN + ODP_GTP_HLEN;
+ status = odp_packet_copy_from_mem(pkt, hlen,
+ sizeof(data), &data);
+ } else {
+ status = odp_packet_copy_from_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ sizeof(data), &data);
+ }
+ } else {
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
status = odp_packet_copy_from_mem(pkt, offset + tcp->hl * 4,
sizeof(data), &data);
@@ -122,6 +148,9 @@ uint32_t cls_pkt_get_seq(odp_packet_t pkt)
cls_test_packet_t data;
odph_ipv4hdr_t *ip;
odph_tcphdr_t *tcp;
+ odph_udphdr_t *udp;
+ uint32_t hlen = 0;
+ uint16_t port = 0;
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
offset = odp_packet_l4_offset(pkt);
@@ -129,10 +158,27 @@ uint32_t cls_pkt_get_seq(odp_packet_t pkt)
if (offset == ODP_PACKET_OFFSET_INVALID || ip == NULL)
return TEST_SEQ_INVALID;
- if (ip->proto == ODPH_IPPROTO_UDP)
- odp_packet_copy_to_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ if (ip->proto == ODPH_IPPROTO_IGMP) {
+ odp_packet_copy_to_mem(pkt, offset + ODP_IGMP_HLEN,
+ sizeof(data), &data);
+
+ } else if (ip->proto == ODPH_IPPROTO_ICMPV4) {
+ odp_packet_copy_to_mem(pkt, offset + ODPH_ICMPHDR_LEN,
sizeof(data), &data);
- else {
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ odp_packet_copy_to_mem(pkt, offset + ODPH_SCTPHDR_LEN,
+ sizeof(data), &data);
+ } else if (ip->proto == ODPH_IPPROTO_UDP) {
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ port = odp_be_to_cpu_16(udp->dst_port);
+ if (port == ODP_GTPU_UDP_PORT) {
+ hlen = offset + ODPH_UDPHDR_LEN + ODP_GTP_HLEN;
+ odp_packet_copy_to_mem(pkt, hlen, sizeof(data), &data);
+ } else {
+ odp_packet_copy_to_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ sizeof(data), &data);
+ }
+ } else {
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
odp_packet_copy_to_mem(pkt, offset + tcp->hl * 4,
sizeof(data), &data);
@@ -190,13 +236,38 @@ void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio)
CU_ASSERT(odp_pktout_send(pktout, &pkt, 1) == 1);
}
-odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns)
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns, odp_bool_t enable_pktv)
{
odp_event_t ev;
uint64_t wait = odp_schedule_wait_time(ns);
ev = odp_schedule(queue, wait);
- return odp_packet_from_event(ev);
+ if (ev == ODP_EVENT_INVALID)
+ return ODP_PACKET_INVALID;
+
+ if (!enable_pktv && odp_event_type(ev) == ODP_EVENT_PACKET) {
+ return odp_packet_from_event(ev);
+ } else if (enable_pktv && odp_event_type(ev) == ODP_EVENT_PACKET_VECTOR) {
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkt_tbl;
+ odp_packet_t pkt;
+ uint32_t pktv_len;
+
+ pktv = odp_packet_vector_from_event(ev);
+ pktv_len = odp_packet_vector_tbl(pktv, &pkt_tbl);
+
+ CU_ASSERT_FATAL(pktv_len > 0);
+
+ pkt = pkt_tbl[0];
+ if (pktv_len > 1)
+ odp_packet_free_multi(&pkt_tbl[1], pktv_len - 1);
+ odp_packet_vector_free(pktv);
+ return pkt;
+ }
+
+ odp_event_free(ev);
+ return ODP_PACKET_INVALID;
+
}
odp_queue_t queue_create(const char *queuename, bool sched)
@@ -232,14 +303,48 @@ odp_pool_t pool_create(const char *poolname)
return odp_pool_create(poolname, &param);
}
+odp_pool_t pktv_pool_create(const char *poolname)
+{
+ odp_pool_capability_t capa;
+ odp_pool_param_t param;
+
+ if (odp_pool_capability(&capa)) {
+ ODPH_ERR("Pool capability failed\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (capa.vector.max_pools == 0) {
+ ODPH_ERR("No packet vector pools available\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (capa.vector.max_num && capa.vector.max_num < SHM_PKT_NUM_BUFS) {
+ ODPH_ERR("Unable to create large enough (%d) packet vector pool\n",
+ SHM_PKT_NUM_BUFS);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = SHM_PKT_NUM_BUFS;
+ param.vector.max_size = capa.vector.max_size;
+
+ return odp_pool_create(poolname, &param);
+}
+
odp_packet_t create_packet(cls_packet_info_t pkt_info)
{
uint32_t seqno;
odph_ethhdr_t *ethhdr;
odph_udphdr_t *udp;
odph_tcphdr_t *tcp;
+ odph_sctphdr_t *sctp;
+ odph_icmphdr_t *icmp;
odph_ipv4hdr_t *ip;
odph_ipv6hdr_t *ipv6;
+ odph_gtphdr_t *gtpu;
+ odph_igmphdr_t *igmp;
+ uint8_t *hlen = 0;
uint16_t payload_len;
uint32_t addr = 0;
uint32_t mask;
@@ -258,14 +363,44 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
uint8_t dst_mac[] = CLS_DEFAULT_DMAC;
payload_len = sizeof(cls_test_packet_t) + pkt_info.len;
+ if (pkt_info.l4_type == CLS_PKT_L4_GTP)
+ payload_len += sizeof(odph_gtphdr_t);
+
seqno = odp_atomic_fetch_inc_u32(pkt_info.seq);
vlan_hdr_len = pkt_info.vlan ? ODPH_VLANHDR_LEN : 0;
vlan_hdr_len = pkt_info.vlan_qinq ? 2 * vlan_hdr_len : vlan_hdr_len;
l3_hdr_len = pkt_info.ipv6 ? ODPH_IPV6HDR_LEN : ODPH_IPV4HDR_LEN;
- l4_hdr_len = pkt_info.udp ? ODPH_UDPHDR_LEN : ODPH_TCPHDR_LEN;
eth_type = pkt_info.ipv6 ? ODPH_ETHTYPE_IPV6 : ODPH_ETHTYPE_IPV4;
- next_hdr = pkt_info.udp ? ODPH_IPPROTO_UDP : ODPH_IPPROTO_TCP;
+ next_hdr = ODPH_IPPROTO_TCP;
+ l4_hdr_len = ODPH_TCPHDR_LEN;
+
+ switch (pkt_info.l4_type) {
+ case CLS_PKT_L4_TCP:
+ next_hdr = ODPH_IPPROTO_TCP;
+ l4_hdr_len = ODPH_TCPHDR_LEN;
+ break;
+ case CLS_PKT_L4_GTP:
+ case CLS_PKT_L4_UDP:
+ next_hdr = ODPH_IPPROTO_UDP;
+ l4_hdr_len = ODPH_UDPHDR_LEN;
+ break;
+ case CLS_PKT_L4_SCTP:
+ next_hdr = ODPH_IPPROTO_SCTP;
+ l4_hdr_len = ODPH_SCTPHDR_LEN;
+ break;
+ case CLS_PKT_L4_ICMP:
+ next_hdr = ODPH_IPPROTO_ICMPV4;
+ l4_hdr_len = ODPH_ICMPHDR_LEN;
+ break;
+ case CLS_PKT_L4_IGMP:
+ next_hdr = ODPH_IPPROTO_IGMP;
+ l4_hdr_len = ODP_IGMP_HLEN;
+ break;
+ default:
+ ODPH_ASSERT(0);
+ }
+
l2_hdr_len = ODPH_ETHHDR_LEN + vlan_hdr_len;
l4_len = l4_hdr_len + payload_len;
l3_len = l3_hdr_len + l4_len;
@@ -344,9 +479,33 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odp_packet_l4_offset_set(pkt, l4_offset);
tcp = (odph_tcphdr_t *)(buf + l4_offset);
udp = (odph_udphdr_t *)(buf + l4_offset);
-
- /* udp */
- if (pkt_info.udp) {
+ sctp = (odph_sctphdr_t *)(buf + l4_offset);
+ icmp = (odph_icmphdr_t *)(buf + l4_offset);
+
+ if (pkt_info.l4_type == CLS_PKT_L4_IGMP) {
+ igmp = (odph_igmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ igmp->group = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ igmp->type = 0x12;
+ igmp->code = 0;
+ igmp->csum = 0;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_ICMP) {
+ icmp->type = ICMP_ECHO;
+ icmp->code = 0;
+ icmp->un.echo.id = 0;
+ icmp->un.echo.sequence = 0;
+ icmp->chksum = 0;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_SCTP) {
+ sctp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ sctp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ sctp->tag = 0;
+ sctp->chksum = 0;
+ odp_packet_has_sctp_set(pkt, 1);
+ if (odph_sctp_chksum_set(pkt) != 0) {
+ ODPH_ERR("odph_sctp_chksum failed\n");
+ return ODP_PACKET_INVALID;
+ }
+ } else if (pkt_info.l4_type == CLS_PKT_L4_UDP) {
+ /* udp */
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
@@ -356,6 +515,24 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
ODPH_ERR("odph_udp_tcp_chksum failed\n");
return ODP_PACKET_INVALID;
}
+ } else if (pkt_info.l4_type == CLS_PKT_L4_GTP) {
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ udp->dst_port = odp_cpu_to_be_16(ODP_GTPU_UDP_PORT);
+ udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
+ udp->chksum = 0;
+ odp_packet_has_udp_set(pkt, 1);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + sizeof(odph_udphdr_t));
+ gtpu->teid = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ /* GTPv1 without optional headers */
+ gtpu->gtp_hdr_info = 0x30;
+ /* GTP echo request */
+ gtpu->msg_type = 1;
+ gtpu->plen = sizeof(cls_test_packet_t);
+ if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0) {
+ ODPH_ERR("odph_udp_tcp_chksum failed\n");
+ return ODP_PACKET_INVALID;
+ }
} else {
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index b5a68058f..068e2112c 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -8,6 +8,7 @@
#include "odp_classification_testsuites.h"
#include "classification.h"
#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
#define MAX_NUM_UDP 4
#define MARK_IP 1
@@ -25,13 +26,13 @@ int classification_suite_pmr_init(void)
memset(&cls_capa, 0, sizeof(odp_cls_capability_t));
if (odp_cls_capability(&cls_capa)) {
- fprintf(stderr, "Classifier capability call failed.\n");
+ ODPH_ERR("Classifier capability call failed\n");
return -1;
}
pkt_pool = pool_create("classification_pmr_pool");
if (ODP_POOL_INVALID == pkt_pool) {
- fprintf(stderr, "Packet pool creation failed.\n");
+ ODPH_ERR("Packet pool creation failed\n");
return -1;
}
@@ -47,7 +48,7 @@ int classification_suite_pmr_init(void)
static int start_pktio(odp_pktio_t pktio)
{
if (odp_pktio_start(pktio)) {
- fprintf(stderr, "unable to start loop\n");
+ ODPH_ERR("Unable to start loop\n");
return -1;
}
@@ -92,7 +93,7 @@ int classification_suite_pmr_term(void)
int ret = 0;
if (0 != odp_pool_destroy(pkt_pool)) {
- fprintf(stderr, "pkt_pool destroy failed.\n");
+ ODPH_ERR("Packet pool destroy failed\n");
ret += -1;
}
@@ -177,7 +178,7 @@ static void classification_test_pktin_classifier_flag(void)
/* since classifier flag is disabled in pktin queue configuration
packet will not be delivered in classifier queues */
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
pool_recv = odp_packet_pool(pkt);
/* since classifier is disabled packet should not be received in
@@ -272,7 +273,7 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
}
for (i = 0; i < num_pkt; i++) {
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
pool_recv = odp_packet_pool(pkt);
CU_ASSERT(pool == pool_recv);
@@ -299,7 +300,7 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
}
for (i = 0; i < num_pkt; i++) {
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno[i] == cls_pkt_get_seq(pkt));
CU_ASSERT(retqueue == default_queue);
@@ -337,7 +338,7 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
recv_default = 0;
for (i = 0; i < 2 * num_pkt; i++) {
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(retqueue == queue || retqueue == default_queue);
@@ -368,13 +369,22 @@ static void _classification_test_pmr_term_tcp_dport(int num_pkt)
odp_pktio_close(pktio);
}
-static void classification_test_pmr_term_tcp_sport(void)
+typedef enum match_t {
+ MATCH,
+ NO_MATCH
+} match_t;
+
+/*
+ * Test that PMR created using the given parameters matches or does not match
+ * given packet. The packet, that gets consumed, must have been created using
+ * create_packet() so that it contains the testing sequence number.
+ *
+ * Ethernet addresses of the packet will be overwritten.
+ */
+static void test_pmr(const odp_pmr_param_t *pmr_param, odp_packet_t pkt,
+ match_t match)
{
- odp_packet_t pkt;
- odph_tcphdr_t *tcp;
uint32_t seqno;
- uint16_t val;
- uint16_t mask;
int retval;
odp_pktio_t pktio;
odp_queue_t queue;
@@ -386,15 +396,9 @@ static void classification_test_pmr_term_tcp_sport(void)
odp_pool_t recvpool;
odp_pmr_t pmr;
odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
- odp_pmr_param_t pmr_param;
odph_ethhdr_t *eth;
- val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
- mask = odp_cpu_to_be_16(0xffff);
- seqno = 0;
-
pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
retval = start_pktio(pktio);
@@ -403,75 +407,49 @@ static void classification_test_pmr_term_tcp_sport(void)
configure_default_cos(pktio, &default_cos,
&default_queue, &default_pool);
- queue = queue_create("tcp_sport", true);
+ queue = queue_create("PMR test queue", true);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
- pool = pool_create("tcp_sport");
+ pool = pool_create("PMR test pool");
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- sprintf(cosname, "tcp_sport");
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos = odp_cls_cos_create(cosname, &cls_param);
+ cos = odp_cls_cos_create("PMR test cos", &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
- odp_cls_pmr_param_init(&pmr_param);
- pmr_param.term = ODP_PMR_TCP_SPORT;
- pmr_param.match.value = &val;
- pmr_param.match.mask = &mask;
- pmr_param.val_sz = sizeof(val);
-
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ pmr = odp_cls_pmr_create(pmr_param, 1, default_cos, cos);
CU_ASSERT(pmr != ODP_PMR_INVALID);
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->src_port = val;
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
-
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
enqueue_pktio_interface(pkt, pktio);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
+
+ if (match == MATCH) {
+ CU_ASSERT(retqueue == queue);
+ CU_ASSERT(recvpool == pool);
+ } else {
+ CU_ASSERT(retqueue == default_queue);
+ CU_ASSERT(recvpool == default_pool);
+ }
odp_packet_free(pkt);
odp_cos_destroy(cos);
odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
+ odp_cls_pmr_destroy(pmr); /* XXX ordering */
stop_pktio(pktio);
odp_pool_destroy(default_pool);
odp_pool_destroy(pool);
@@ -480,56 +458,49 @@ static void classification_test_pmr_term_tcp_sport(void)
odp_pktio_close(pktio);
}
-static void classification_test_pmr_term_udp_dport(void)
+static void classification_test_pmr_term_tcp_sport(void)
{
odp_packet_t pkt;
- odph_udphdr_t *udp;
- uint32_t seqno;
+ odph_tcphdr_t *tcp;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ethhdr_t *eth;
- cls_packet_info_t pkt_info;
- val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
mask = odp_cpu_to_be_16(0xffff);
- seqno = 0;
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_SPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = val;
- queue = queue_create("udp_dport", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ test_pmr(&pmr_param, pkt, MATCH);
- pool = pool_create("udp_dport");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
- sprintf(cosname, "udp_dport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+static void classification_test_pmr_term_udp_dport(void)
+{
+ odp_packet_t pkt;
+ odph_udphdr_t *udp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_UDP_DPORT;
@@ -537,115 +508,34 @@ static void classification_test_pmr_term_udp_dport(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = val;
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets received in default queue */
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
-
- odp_packet_free(pkt);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_udp_sport(void)
{
odp_packet_t pkt;
odph_udphdr_t *udp;
- uint32_t seqno;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
mask = odp_cpu_to_be_16(0xffff);
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("udp_sport", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("udp_sport");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "udp_sport");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_UDP_SPORT;
@@ -653,113 +543,33 @@ static void classification_test_pmr_term_udp_sport(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = val;
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- odp_packet_free(pkt);
-
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_ipproto(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint8_t val;
uint8_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipproto", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipproto");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipproto");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_IPPROTO;
@@ -767,57 +577,17 @@ static void classification_test_pmr_term_ipproto(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- enqueue_pktio_interface(pkt, pktio);
+ test_pmr(&pmr_param, pkt, MATCH);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_dmac(void)
@@ -877,7 +647,7 @@ static void classification_test_pmr_term_dmac(void)
CU_ASSERT(pmr != ODP_PMR_INVALID);
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
@@ -887,7 +657,7 @@ static void classification_test_pmr_term_dmac(void)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
recvpool = odp_packet_pool(pkt);
@@ -903,7 +673,7 @@ static void classification_test_pmr_term_dmac(void)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
recvpool = odp_packet_pool(pkt);
@@ -925,53 +695,14 @@ static void classification_test_pmr_term_dmac(void)
static void classification_test_pmr_term_packet_len(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint32_t val;
uint32_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
val = 1024;
/*Mask value will match any packet of length 1000 - 1099*/
mask = 0xff00;
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("packet_len", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("packet_len");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "packet_len");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_LEN;
@@ -979,80 +710,26 @@ static void classification_test_pmr_term_packet_len(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
/* create packet of payload length 1024 */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt_info.len = 1024;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- enqueue_pktio_interface(pkt, pktio);
+ test_pmr(&pmr_param, pkt, MATCH);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_vlan_id_0(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
odph_ethhdr_t *eth;
odph_vlanhdr_t *vlan_0;
@@ -1060,30 +737,6 @@ static void classification_test_pmr_term_vlan_id_0(void)
val = odp_cpu_to_be_16(0x123);
mask = odp_cpu_to_be_16(0xfff);
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("vlan_id_0", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("vlan_id_0");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "vlan_id_0");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_VLAN_ID_0;
@@ -1091,79 +744,27 @@ static void classification_test_pmr_term_vlan_id_0(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
pkt_info.vlan = true;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
vlan_0 = (odph_vlanhdr_t *)(eth + 1);
vlan_0->tci = val;
- enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_vlan_id_x(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
odph_ethhdr_t *eth;
odph_vlanhdr_t *vlan_x;
@@ -1171,30 +772,6 @@ static void classification_test_pmr_term_vlan_id_x(void)
val = odp_cpu_to_be_16(0x345);
mask = odp_cpu_to_be_16(0xfff);
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("vlan_id_x", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("vlan_id_x");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "vlan_id_x");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_VLAN_ID_X;
@@ -1202,111 +779,34 @@ static void classification_test_pmr_term_vlan_id_x(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
pkt_info.vlan = true;
pkt_info.vlan_qinq = true;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
vlan_x = (odph_vlanhdr_t *)(eth + 1);
vlan_x++;
vlan_x->tci = val;
- enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_eth_type_0(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
val = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV6);
mask = odp_cpu_to_be_16(0xffff);
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("eth_type_0", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("eth_type_0");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "eth_type_0");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_ETHTYPE_0;
@@ -1314,77 +814,24 @@ static void classification_test_pmr_term_eth_type_0(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
pkt_info.ipv6 = true;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_eth_type_x(void)
{
odp_packet_t pkt;
- uint32_t seqno;
uint16_t val;
uint16_t mask;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_cos_t default_cos;
- odp_pool_t default_pool;
- odp_pool_t pool;
- odp_pool_t recvpool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- char cosname[ODP_COS_NAME_LEN];
- odp_cls_cos_param_t cls_param;
odp_pmr_param_t pmr_param;
odph_ethhdr_t *eth;
odph_vlanhdr_t *vlan_x;
@@ -1392,30 +839,6 @@ static void classification_test_pmr_term_eth_type_x(void)
val = odp_cpu_to_be_16(0x0800);
mask = odp_cpu_to_be_16(0xffff);
- seqno = 0;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("eth_type_x", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("eth_type_x");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "eth_type_x");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_ETHTYPE_X;
@@ -1423,62 +846,23 @@ static void classification_test_pmr_term_eth_type_x(void)
pmr_param.match.mask = &mask;
pmr_param.val_sz = sizeof(val);
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT(pmr != ODP_PMR_INVALID);
-
pkt_info = default_pkt_info;
pkt_info.vlan = true;
pkt_info.vlan_qinq = true;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
vlan_x = (odph_vlanhdr_t *)(eth + 1);
vlan_x++;
vlan_x->tci = odp_cpu_to_be_16(0x123);
vlan_x->type = val;
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == pool);
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_pool_set(void)
@@ -1549,7 +933,7 @@ static void classification_test_pmr_pool_set(void)
CU_ASSERT(pmr != ODP_PMR_INVALID);
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -1560,7 +944,7 @@ static void classification_test_pmr_pool_set(void)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
recvpool = odp_packet_pool(pkt);
@@ -1647,7 +1031,7 @@ static void classification_test_pmr_queue_set(void)
pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
CU_ASSERT(pmr != ODP_PMR_INVALID);
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -1658,7 +1042,7 @@ static void classification_test_pmr_queue_set(void)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
recvpool = odp_packet_pool(pkt);
@@ -1681,48 +1065,12 @@ static void classification_test_pmr_queue_set(void)
static void test_pmr_term_ipv4_addr(int dst)
{
odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
uint32_t dst_addr, src_addr;
uint32_t dst_mask, src_mask;
- char cosname[ODP_QUEUE_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
odph_ipv4hdr_t *ip;
const char *src_str = "10.0.0.88/32";
const char *dst_str = "10.0.0.99/32";
- odph_ethhdr_t *eth;
-
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipv4 addr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipv4 addr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipv4 addr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
parse_ipv4_string(src_str, &src_addr, &src_mask);
parse_ipv4_string(dst_str, &dst_addr, &dst_mask);
@@ -1745,58 +1093,19 @@ static void test_pmr_term_ipv4_addr(int dst)
pmr_param.val_sz = sizeof(src_addr);
}
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
-
- /* packet with IP address matching PMR rule to be
- * received in the CoS queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
ip->src_addr = src_addr;
ip->dst_addr = dst_addr;
odph_ipv4_csum_update(pkt);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_ipv4_saddr(void)
@@ -1812,22 +1121,8 @@ static void classification_test_pmr_term_ipv4_daddr(void)
static void classification_test_pmr_term_ipv6daddr(void)
{
odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
- char cosname[ODP_QUEUE_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
odph_ipv6hdr_t *ip;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
@@ -1838,107 +1133,32 @@ static void classification_test_pmr_term_ipv6daddr(void)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("daddr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("daddr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "daddr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_DIP6_ADDR;
pmr_param.match.value = IPV6_DST_ADDR;
pmr_param.match.mask = ipv6_mask;
pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
-
- /* packet with dst ip address matching PMR rule to be
- received in the CoS queue*/
pkt_info = default_pkt_info;
pkt_info.ipv6 = true;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
memcpy(ip->dst_addr, IPV6_DST_ADDR, ODPH_IPV6ADDR_LEN);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
+ test_pmr(&pmr_param, pkt, MATCH);
- /* Other packets delivered to default queue */
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_ipv6saddr(void)
{
odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
- char cosname[ODP_QUEUE_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
odph_ipv6hdr_t *ip;
- odph_ethhdr_t *eth;
cls_packet_info_t pkt_info;
uint8_t IPV6_SRC_ADDR[ODPH_IPV6ADDR_LEN] = {
/* I.e. ::ffff:10.0.0.100 */
@@ -1948,87 +1168,25 @@ static void classification_test_pmr_term_ipv6saddr(void)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("saddr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("saddr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "saddr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
odp_cls_pmr_param_init(&pmr_param);
pmr_param.term = ODP_PMR_SIP6_ADDR;
pmr_param.match.value = IPV6_SRC_ADDR;
pmr_param.match.mask = ipv6_mask;
pmr_param.val_sz = ODPH_IPV6ADDR_LEN;
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
-
- /* packet with dst ip address matching PMR rule to be
- received in the CoS queue*/
pkt_info = default_pkt_info;
pkt_info.ipv6 = true;
-
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
ip = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
memcpy(ip->src_addr, IPV6_SRC_ADDR, ODPH_IPV6ADDR_LEN);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
-
- enqueue_pktio_interface(pkt, pktio);
+ test_pmr(&pmr_param, pkt, MATCH);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
static void classification_test_pmr_term_tcp_dport(void)
@@ -2044,52 +1202,16 @@ static void classification_test_pmr_term_tcp_dport_multi(void)
static void test_pmr_term_custom(int custom_l3)
{
odp_packet_t pkt;
- uint32_t seqno;
- int retval;
- odp_pktio_t pktio;
- odp_queue_t queue;
- odp_queue_t retqueue;
- odp_queue_t default_queue;
- odp_pool_t pool;
- odp_pool_t default_pool;
- odp_pmr_t pmr;
- odp_cos_t cos;
- odp_cos_t default_cos;
uint32_t dst_addr, src_addr;
uint32_t addr_be, mask_be;
uint32_t dst_mask, src_mask;
- char cosname[ODP_QUEUE_NAME_LEN];
odp_pmr_param_t pmr_param;
- odp_cls_cos_param_t cls_param;
odph_ipv4hdr_t *ip;
- odph_ethhdr_t *eth;
const char *pmr_src_str = "10.0.8.0/24";
const char *pmr_dst_str = "10.0.9.0/24";
const char *pkt_src_str = "10.0.8.88/32";
const char *pkt_dst_str = "10.0.9.99/32";
- pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
- retval = start_pktio(pktio);
- CU_ASSERT(retval == 0);
-
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
-
- queue = queue_create("ipv4 addr", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- pool = pool_create("ipv4 addr");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- sprintf(cosname, "ipv4 addr");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- cos = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
-
/* Match values for custom PRM rules are passed in network endian */
parse_ipv4_string(pmr_src_str, &src_addr, &src_mask);
parse_ipv4_string(pmr_dst_str, &dst_addr, &dst_mask);
@@ -2117,59 +1239,22 @@ static void test_pmr_term_custom(int custom_l3)
pmr_param.offset = 26;
}
- pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
- CU_ASSERT_FATAL(pmr != ODP_PMR_INVALID);
-
/* IPv4 packet with matching addresses */
parse_ipv4_string(pkt_src_str, &src_addr, NULL);
parse_ipv4_string(pkt_dst_str, &dst_addr, NULL);
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
ip->src_addr = odp_cpu_to_be_32(src_addr);
ip->dst_addr = odp_cpu_to_be_32(dst_addr);
odph_ipv4_csum_update(pkt);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ test_pmr(&pmr_param, pkt, MATCH);
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == queue);
- odp_packet_free(pkt);
-
- /* Other packets delivered to default queue */
pkt = create_packet(default_pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
-
- enqueue_pktio_interface(pkt, pktio);
-
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- odp_cos_destroy(cos);
- odp_cos_destroy(default_cos);
- odp_cls_pmr_destroy(pmr);
- odp_packet_free(pkt);
- stop_pktio(pktio);
- odp_pool_destroy(default_pool);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
- odp_queue_destroy(default_queue);
- odp_pktio_close(pktio);
+ test_pmr(&pmr_param, pkt, NO_MATCH);
}
/*
@@ -2296,7 +1381,7 @@ static void test_pmr_series(const int num_udp, int marking)
/* Matching TCP/IP packet */
pkt_info = default_pkt_info;
- pkt_info.udp = false;
+ pkt_info.l4_type = CLS_PKT_L4_TCP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
@@ -2314,7 +1399,7 @@ static void test_pmr_series(const int num_udp, int marking)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
CU_ASSERT(retqueue == queue_ip);
@@ -2332,7 +1417,7 @@ static void test_pmr_series(const int num_udp, int marking)
/* Matching UDP/IP packets */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
for (i = 0; i < num_udp; i++) {
pkt = create_packet(pkt_info);
@@ -2353,7 +1438,7 @@ static void test_pmr_series(const int num_udp, int marking)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
CU_ASSERT(retqueue == queue_udp[i]);
@@ -2379,7 +1464,7 @@ static void test_pmr_series(const int num_udp, int marking)
enqueue_pktio_interface(pkt, pktio);
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
CU_ASSERT(retqueue == default_queue);
@@ -2406,6 +1491,268 @@ static void test_pmr_series(const int num_udp, int marking)
odp_pktio_close(pktio);
}
+static void classification_test_pmr_term_sctp(bool is_dport)
+{
+ odp_packet_t pkt;
+ odph_sctphdr_t *sctp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ if (is_dport)
+ val = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_SCTP_SPORT;
+ if (is_dport)
+ pmr_param.term = ODP_PMR_SCTP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_SCTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ if (is_dport)
+ sctp->dst_port = val;
+ else
+ sctp->src_port = val;
+ CU_ASSERT(odph_sctp_chksum_set(pkt) == 0);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ if (is_dport)
+ sctp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+ else
+ sctp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+ CU_ASSERT(odph_sctp_chksum_set(pkt) == 0);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_sctp_sport(void)
+{
+ classification_test_pmr_term_sctp(0);
+}
+
+static void classification_test_pmr_term_sctp_dport(void)
+{
+ classification_test_pmr_term_sctp(1);
+}
+
+static void classification_test_pmr_term_icmp_type(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = ICMP_ECHO;
+ mask = 0xff;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_TYPE;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->type = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->type = ICMP_ECHOREPLY;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_icmp_code(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = 0x1;
+ mask = 0xff;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_CODE;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->code = 0x1;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->code = 0;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_icmp_id(void)
+{
+ odp_packet_t pkt;
+ odph_icmphdr_t *icmp;
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_16(0x1234);
+ mask = odp_cpu_to_be_16(0xffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_ICMP_ID;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ICMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->un.echo.id = odp_cpu_to_be_16(0x1234);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ icmp = (odph_icmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ icmp->un.echo.id = 0x4567;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_gtpu_teid(void)
+{
+ odp_packet_t pkt;
+ odph_gtphdr_t *gtpu;
+ odph_udphdr_t *udp;
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ uint8_t *hlen = 0;
+
+ val = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_GTPV1_TEID;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ /* Check packet with wrong UDP port, packets should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+
+ /* Check GTPv2 packets, should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + ODPH_UDPHDR_LEN);
+ /* Version:2, piggybacking:1, teid:1 */
+ gtpu->gtp_hdr_info = 0x58;
+ CU_ASSERT(odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) == 0);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+
+ /* All other packets should goto default cos */
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_GTP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ hlen = (uint8_t *)odp_packet_l4_ptr(pkt, NULL);
+ gtpu = (odph_gtphdr_t *)(hlen + ODPH_UDPHDR_LEN);
+ gtpu->teid = odp_cpu_to_be_32(CLS_MAGIC_VAL + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_igmp_grpaddr(void)
+{
+ odp_packet_t pkt;
+ odph_igmphdr_t *igmp;
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = odp_cpu_to_be_32(CLS_MAGIC_VAL);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IGMP_GRP_ADDR;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_IGMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_IGMP;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ igmp = (odph_igmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ igmp->group = odp_cpu_to_be_32(CLS_MAGIC_VAL + 1);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
static void classification_test_pmr_serial(void)
{
test_pmr_series(1, 0);
@@ -2540,6 +1887,41 @@ static int check_capa_pmr_marking(void)
return 0;
}
+static int check_capa_sctp_sport(void)
+{
+ return cls_capa.supported_terms.bit.sctp_sport;
+}
+
+static int check_capa_sctp_dport(void)
+{
+ return cls_capa.supported_terms.bit.sctp_dport;
+}
+
+static int check_capa_icmp_type(void)
+{
+ return cls_capa.supported_terms.bit.icmp_type;
+}
+
+static int check_capa_icmp_code(void)
+{
+ return cls_capa.supported_terms.bit.icmp_code;
+}
+
+static int check_capa_icmp_id(void)
+{
+ return cls_capa.supported_terms.bit.icmp_id;
+}
+
+static int check_capa_gtpu_teid(void)
+{
+ return cls_capa.supported_terms.bit.gtpv1_teid;
+}
+
+static int check_capa_igmp_grpaddr(void)
+{
+ return cls_capa.supported_terms.bit.igmp_grp_addr;
+}
+
odp_testinfo_t classification_suite_pmr[] = {
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_tcp_dport,
check_capa_tcp_dport),
@@ -2549,12 +1931,28 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_udp_dport),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_udp_sport,
check_capa_udp_sport),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_gtpu_teid,
+ check_capa_gtpu_teid),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_igmp_grpaddr,
+ check_capa_igmp_grpaddr),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_sctp_sport,
+ check_capa_sctp_sport),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_sctp_dport,
+ check_capa_sctp_dport),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_icmp_type,
+ check_capa_icmp_type),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_icmp_code,
+ check_capa_icmp_code),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_icmp_id,
+ check_capa_icmp_id),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipproto,
check_capa_ip_proto),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_dmac,
check_capa_dmac),
- ODP_TEST_INFO(classification_test_pmr_pool_set),
- ODP_TEST_INFO(classification_test_pmr_queue_set),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_pool_set,
+ check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_queue_set,
+ check_capa_ip_proto),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_saddr,
check_capa_ipv4_saddr),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_daddr,
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index b24d23df9..d9c44aea7 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,6 +8,7 @@
#include "odp_classification_testsuites.h"
#include "classification.h"
#include <odp_cunit_common.h>
+#include <odp/helper/odph_api.h>
static odp_cos_t cos_list[CLS_ENTRIES];
static odp_pmr_t pmr_list[CLS_ENTRIES];
@@ -25,23 +27,29 @@ static int global_num_l2_qos;
#define NUM_COS_L2_PRIO CLS_L2_QOS_MAX
#define NUM_COS_PMR 1
#define NUM_COS_COMPOSITE 1
+#define PKTV_DEFAULT_SIZE 8
+
/** sequence number of IP packets */
static odp_atomic_u32_t seq;
/* default packet info */
static cls_packet_info_t default_pkt_info;
-int classification_suite_init(void)
+/* Packet vector configuration */
+static odp_pktin_vector_config_t pktv_config;
+
+static int classification_suite_common_init(odp_bool_t enable_pktv)
{
int i;
int ret;
odp_pktio_param_t pktio_param;
odp_pktin_queue_param_t pktin_param;
+
tc.all_bits = 0;
pool_default = pool_create("classification_pool");
if (ODP_POOL_INVALID == pool_default) {
- fprintf(stderr, "Packet pool creation failed.\n");
+ ODPH_ERR("Packet pool creation failed\n");
return -1;
}
@@ -52,7 +60,7 @@ int classification_suite_init(void)
if (pktio_loop == ODP_PKTIO_INVALID) {
ret = odp_pool_destroy(pool_default);
if (ret)
- fprintf(stderr, "unable to destroy pool.\n");
+ ODPH_ERR("Unable to destroy pool\n");
return -1;
}
@@ -71,13 +79,44 @@ int classification_suite_init(void)
pktin_param.classifier_enable = true;
pktin_param.hash_enable = false;
+ if (enable_pktv) {
+ odp_pktio_capability_t capa;
+ odp_pool_t pktv_pool;
+
+ pktv_pool = pktv_pool_create("packet_vector_pool");
+ if (pktv_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Packet vector pool creation failed\n");
+ return -1;
+ }
+
+ if (odp_pktio_capability(pktio_loop, &capa)) {
+ ODPH_ERR("Pktio capability failed\n");
+ return -1;
+ }
+
+ if (!capa.vector.supported) {
+ printf("Packet vector mode is not supported. Test suite skipped.\n");
+ pktv_config.enable = false;
+ pktv_config.pool = pktv_pool;
+ } else {
+ pktin_param.vector.enable = true;
+ pktin_param.vector.pool = pktv_pool;
+ pktin_param.vector.max_size = capa.vector.max_size < PKTV_DEFAULT_SIZE ?
+ capa.vector.max_size : PKTV_DEFAULT_SIZE;
+ pktin_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+
+ /* Copy packet vector config for global access */
+ pktv_config = pktin_param.vector;
+ }
+ }
+
if (odp_pktin_queue_config(pktio_loop, &pktin_param)) {
- fprintf(stderr, "pktin queue config failed.\n");
+ ODPH_ERR("Pktin queue config failed\n");
return -1;
}
if (odp_pktout_queue_config(pktio_loop, NULL)) {
- fprintf(stderr, "pktout queue config failed.\n");
+ ODPH_ERR("Pktout queue config failed\n");
return -1;
}
@@ -97,33 +136,40 @@ int classification_suite_init(void)
ret = odp_pktio_start(pktio_loop);
if (ret) {
- fprintf(stderr, "unable to start loop\n");
+ ODPH_ERR("Unable to start loop\n");
return -1;
}
return 0;
}
-int classification_suite_term(void)
+static int classification_suite_common_term(odp_bool_t enable_pktv)
{
int i;
int retcode = 0;
if (0 > stop_pktio(pktio_loop)) {
- fprintf(stderr, "stop pktio failed.\n");
+ ODPH_ERR("Stop pktio failed\n");
retcode = -1;
}
if (0 > odp_pktio_close(pktio_loop)) {
- fprintf(stderr, "pktio close failed.\n");
+ ODPH_ERR("Pktio close failed\n");
retcode = -1;
}
if (0 != odp_pool_destroy(pool_default)) {
- fprintf(stderr, "pool_default destroy failed.\n");
+ ODPH_ERR("Pool_default destroy failed\n");
retcode = -1;
}
+ if (enable_pktv) {
+ if (odp_pool_destroy(pktv_config.pool)) {
+ ODPH_ERR("Packet vector pool destroy failed\n");
+ retcode = -1;
+ }
+ }
+
for (i = 0; i < CLS_ENTRIES; i++) {
if (cos_list[i] != ODP_COS_INVALID)
odp_cos_destroy(cos_list[i]);
@@ -150,7 +196,27 @@ int classification_suite_term(void)
return retcode;
}
-void configure_cls_pmr_chain(void)
+int classification_suite_init(void)
+{
+ return classification_suite_common_init(false);
+}
+
+int classification_suite_term(void)
+{
+ return classification_suite_common_term(false);
+}
+
+int classification_suite_pktv_init(void)
+{
+ return classification_suite_common_init(true);
+}
+
+int classification_suite_pktv_term(void)
+{
+ return classification_suite_common_term(true);
+}
+
+void configure_cls_pmr_chain(odp_bool_t enable_pktv)
{
/* PKTIO --> PMR_SRC(SRC IP ADDR) --> PMR_DST (TCP SPORT) */
@@ -194,6 +260,13 @@ void configure_cls_pmr_chain(void)
cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR_CHAIN_SRC] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_SRC] != ODP_COS_INVALID);
@@ -216,6 +289,14 @@ void configure_cls_pmr_chain(void)
cls_param.pool = pool_list[CLS_PMR_CHAIN_DST];
cls_param.queue = queue_list[CLS_PMR_CHAIN_DST];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR_CHAIN_DST] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_DST] != ODP_COS_INVALID);
@@ -246,7 +327,7 @@ void configure_cls_pmr_chain(void)
CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_DST] != ODP_PMR_INVALID);
}
-void test_cls_pmr_chain(void)
+void test_cls_pmr_chain(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odph_ipv4hdr_t *ip;
@@ -258,7 +339,7 @@ void test_cls_pmr_chain(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -273,7 +354,7 @@ void test_cls_pmr_chain(void)
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_DST]);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
@@ -292,7 +373,7 @@ void test_cls_pmr_chain(void)
odph_ipv4_csum_update(pkt);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_SRC]);
CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
@@ -301,7 +382,7 @@ void test_cls_pmr_chain(void)
odp_packet_free(pkt);
}
-void configure_pktio_default_cos(void)
+void configure_pktio_default_cos(odp_bool_t enable_pktv)
{
int retval;
odp_queue_param_t qparam;
@@ -328,6 +409,14 @@ void configure_pktio_default_cos(void)
cls_param.pool = pool_list[CLS_DEFAULT];
cls_param.queue = queue_list[CLS_DEFAULT];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_DEFAULT] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_DEFAULT] != ODP_COS_INVALID);
@@ -335,7 +424,7 @@ void configure_pktio_default_cos(void)
CU_ASSERT(retval == 0);
}
-void test_pktio_default_cos(void)
+void test_pktio_default_cos(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odp_queue_t queue;
@@ -345,7 +434,7 @@ void test_pktio_default_cos(void)
/* create a default packet */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -353,7 +442,7 @@ void test_pktio_default_cos(void)
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
/* Default packet should be received in default queue */
CU_ASSERT(queue == queue_list[CLS_DEFAULT]);
@@ -364,7 +453,7 @@ void test_pktio_default_cos(void)
odp_packet_free(pkt);
}
-void configure_pktio_error_cos(void)
+void configure_pktio_error_cos(odp_bool_t enable_pktv)
{
int retval;
odp_queue_param_t qparam;
@@ -392,6 +481,14 @@ void configure_pktio_error_cos(void)
cls_param.pool = pool_list[CLS_ERROR];
cls_param.queue = queue_list[CLS_ERROR];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_ERROR] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_ERROR] != ODP_COS_INVALID);
@@ -399,7 +496,7 @@ void configure_pktio_error_cos(void)
CU_ASSERT(retval == 0);
}
-void test_pktio_error_cos(void)
+void test_pktio_error_cos(odp_bool_t enable_pktv)
{
odp_queue_t queue;
odp_packet_t pkt;
@@ -408,7 +505,7 @@ void test_pktio_error_cos(void)
/*Create an error packet */
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
@@ -418,7 +515,7 @@ void test_pktio_error_cos(void)
ip->chksum = 0;
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
/* Error packet should be received in error queue */
CU_ASSERT(queue == queue_list[CLS_ERROR]);
@@ -458,7 +555,7 @@ static void classification_test_pktio_set_headroom(void)
CU_ASSERT(retval < 0);
}
-void configure_cos_with_l2_priority(void)
+void configure_cos_with_l2_priority(odp_bool_t enable_pktv)
{
uint8_t num_qos = CLS_L2_QOS_MAX;
odp_cos_t cos_tbl[CLS_L2_QOS_MAX];
@@ -503,6 +600,14 @@ void configure_cos_with_l2_priority(void)
cls_param.pool = pool;
cls_param.queue = queue_tbl[i];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_tbl[i] = odp_cls_cos_create(cosname, &cls_param);
if (cos_tbl[i] == ODP_COS_INVALID)
break;
@@ -516,7 +621,7 @@ void configure_cos_with_l2_priority(void)
CU_ASSERT(retval == 0);
}
-void test_cos_with_l2_priority(void)
+void test_cos_with_l2_priority(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odph_ethhdr_t *ethhdr;
@@ -528,7 +633,7 @@ void test_cos_with_l2_priority(void)
uint8_t i;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt_info.vlan = true;
for (i = 0; i < global_num_l2_qos; i++) {
@@ -540,7 +645,7 @@ void test_cos_with_l2_priority(void)
vlan = (odph_vlanhdr_t *)(ethhdr + 1);
vlan->tci = odp_cpu_to_be_16(i << 13);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_L2_QOS_0 + i]);
pool = odp_packet_pool(pkt);
@@ -550,7 +655,7 @@ void test_cos_with_l2_priority(void)
}
}
-void configure_pmr_cos(void)
+void configure_pmr_cos(odp_bool_t enable_pktv)
{
uint16_t val;
uint16_t mask;
@@ -580,6 +685,14 @@ void configure_pmr_cos(void)
cls_param.pool = pool_list[CLS_PMR];
cls_param.queue = queue_list[CLS_PMR];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR] != ODP_COS_INVALID);
@@ -597,7 +710,7 @@ void configure_pmr_cos(void)
CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVALID);
}
-void test_pmr_cos(void)
+void test_pmr_cos(odp_bool_t enable_pktv)
{
odp_packet_t pkt;
odp_queue_t queue;
@@ -606,14 +719,14 @@ void test_pmr_cos(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
set_first_supported_pmr_port(pkt, CLS_PMR_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR]);
pool = odp_packet_pool(pkt);
@@ -622,7 +735,7 @@ void test_pmr_cos(void)
odp_packet_free(pkt);
}
-void configure_pktio_pmr_composite(void)
+void configure_pktio_pmr_composite(odp_bool_t enable_pktv)
{
odp_pmr_param_t pmr_params[2];
uint16_t val;
@@ -655,6 +768,14 @@ void configure_pktio_pmr_composite(void)
cls_param.pool = pool_list[CLS_PMR_SET];
cls_param.queue = queue_list[CLS_PMR_SET];
cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ if (enable_pktv) {
+ cls_param.vector.enable = true;
+ cls_param.vector.pool = pktv_config.pool;
+ cls_param.vector.max_size = pktv_config.max_size;
+ cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
+ }
+
cos_list[CLS_PMR_SET] = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_list[CLS_PMR_SET] != ODP_COS_INVALID);
@@ -683,7 +804,7 @@ void configure_pktio_pmr_composite(void)
CU_ASSERT_FATAL(pmr_list[CLS_PMR_SET] != ODP_PMR_INVALID);
}
-void test_pktio_pmr_composite_cos(void)
+void test_pktio_pmr_composite_cos(odp_bool_t enable_pktv)
{
uint32_t addr = 0;
uint32_t mask;
@@ -695,7 +816,7 @@ void test_pktio_pmr_composite_cos(void)
cls_packet_info_t pkt_info;
pkt_info = default_pkt_info;
- pkt_info.udp = true;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -708,7 +829,7 @@ void test_pktio_pmr_composite_cos(void)
set_first_supported_pmr_port(pkt, CLS_PMR_SET_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT(queue == queue_list[CLS_PMR_SET]);
pool = odp_packet_pool(pkt);
@@ -717,7 +838,7 @@ void test_pktio_pmr_composite_cos(void)
odp_packet_free(pkt);
}
-static void classification_test_pktio_configure(void)
+static void classification_test_pktio_configure_common(odp_bool_t enable_pktv)
{
odp_cls_capability_t capa;
int num_cos;
@@ -727,53 +848,78 @@ static void classification_test_pktio_configure(void)
/* Configure the Different CoS for the pktio interface */
if (num_cos >= NUM_COS_DEFAULT && TEST_DEFAULT) {
- configure_pktio_default_cos();
+ configure_pktio_default_cos(enable_pktv);
tc.default_cos = 1;
num_cos -= NUM_COS_DEFAULT;
}
if (num_cos >= NUM_COS_ERROR && TEST_ERROR) {
- configure_pktio_error_cos();
+ configure_pktio_error_cos(enable_pktv);
tc.error_cos = 1;
num_cos -= NUM_COS_ERROR;
}
if (num_cos >= NUM_COS_PMR_CHAIN && TEST_PMR_CHAIN) {
- configure_cls_pmr_chain();
+ configure_cls_pmr_chain(enable_pktv);
tc.pmr_chain = 1;
num_cos -= NUM_COS_PMR_CHAIN;
}
if (num_cos >= NUM_COS_L2_PRIO && TEST_L2_QOS) {
- configure_cos_with_l2_priority();
+ configure_cos_with_l2_priority(enable_pktv);
tc.l2_priority = 1;
num_cos -= NUM_COS_L2_PRIO;
}
if (num_cos >= NUM_COS_PMR && TEST_PMR) {
- configure_pmr_cos();
+ configure_pmr_cos(enable_pktv);
tc.pmr_cos = 1;
num_cos -= NUM_COS_PMR;
}
if (num_cos >= NUM_COS_COMPOSITE && TEST_PMR_SET) {
- configure_pktio_pmr_composite();
+ configure_pktio_pmr_composite(enable_pktv);
tc.pmr_composite_cos = 1;
num_cos -= NUM_COS_COMPOSITE;
}
}
-static void classification_test_pktio_test(void)
+static void classification_test_pktio_configure(void)
+{
+ classification_test_pktio_configure_common(false);
+}
+
+static void classification_test_pktio_configure_pktv(void)
+{
+ classification_test_pktio_configure_common(true);
+}
+
+static void classification_test_pktio_test_common(odp_bool_t enable_pktv)
{
/* Test Different CoS on the pktio interface */
if (tc.default_cos && TEST_DEFAULT)
- test_pktio_default_cos();
+ test_pktio_default_cos(enable_pktv);
if (tc.error_cos && TEST_ERROR)
- test_pktio_error_cos();
+ test_pktio_error_cos(enable_pktv);
if (tc.pmr_chain && TEST_PMR_CHAIN)
- test_cls_pmr_chain();
+ test_cls_pmr_chain(enable_pktv);
if (tc.l2_priority && TEST_L2_QOS)
- test_cos_with_l2_priority();
+ test_cos_with_l2_priority(enable_pktv);
if (tc.pmr_cos && TEST_PMR)
- test_pmr_cos();
+ test_pmr_cos(enable_pktv);
if (tc.pmr_composite_cos && TEST_PMR_SET)
- test_pktio_pmr_composite_cos();
+ test_pktio_pmr_composite_cos(enable_pktv);
+}
+
+static void classification_test_pktio_test(void)
+{
+ classification_test_pktio_test_common(false);
+}
+
+static void classification_test_pktio_test_pktv(void)
+{
+ classification_test_pktio_test_common(true);
+}
+
+static int classification_check_pktv(void)
+{
+ return pktv_config.enable ? ODP_TEST_ACTIVE : ODP_TEST_INACTIVE;
}
static int check_capa_skip_offset(void)
@@ -789,3 +935,11 @@ odp_testinfo_t classification_suite[] = {
ODP_TEST_INFO(classification_test_pktio_test),
ODP_TEST_INFO_NULL,
};
+
+odp_testinfo_t classification_suite_pktv[] = {
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pktio_configure_pktv,
+ classification_check_pktv),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pktio_test_pktv,
+ classification_check_pktv),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 2c6153678..2b5da94e2 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -12,12 +12,21 @@
#include <odp_cunit_common.h>
#include <stdbool.h>
+typedef enum cls_packet_l4_info {
+ CLS_PKT_L4_TCP,
+ CLS_PKT_L4_UDP,
+ CLS_PKT_L4_SCTP,
+ CLS_PKT_L4_ICMP,
+ CLS_PKT_L4_GTP,
+ CLS_PKT_L4_IGMP,
+} cls_packet_l4_info;
+
typedef struct cls_packet_info {
odp_pool_t pool;
bool vlan;
bool vlan_qinq;
odp_atomic_u32_t *seq;
- bool udp;
+ cls_packet_l4_info l4_type;
bool ipv6;
uint32_t len;
} cls_packet_info_t;
@@ -37,6 +46,7 @@ typedef union odp_cls_testcase {
extern odp_testinfo_t classification_suite[];
extern odp_testinfo_t classification_suite_basic[];
extern odp_testinfo_t classification_suite_pmr[];
+extern odp_testinfo_t classification_suite_pktv[];
int classification_suite_init(void);
int classification_suite_term(void);
@@ -44,6 +54,9 @@ int classification_suite_term(void);
int classification_suite_pmr_term(void);
int classification_suite_pmr_init(void);
+int classification_suite_pktv_init(void);
+int classification_suite_pktv_term(void);
+
odp_packet_t create_packet(cls_packet_info_t pkt_info);
int cls_pkt_set_seq(odp_packet_t pkt);
uint32_t cls_pkt_get_seq(odp_packet_t pkt);
@@ -53,21 +66,22 @@ void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
odp_queue_t *queue, odp_pool_t *pool);
int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask);
void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio);
-odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns);
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns, odp_bool_t enable_pktv);
odp_pool_t pool_create(const char *poolname);
+odp_pool_t pktv_pool_create(const char *poolname);
odp_queue_t queue_create(const char *queuename, bool sched);
-void configure_pktio_default_cos(void);
-void test_pktio_default_cos(void);
-void configure_pktio_error_cos(void);
-void test_pktio_error_cos(void);
-void configure_cls_pmr_chain(void);
-void test_cls_pmr_chain(void);
-void configure_cos_with_l2_priority(void);
-void test_cos_with_l2_priority(void);
-void configure_pmr_cos(void);
-void test_pmr_cos(void);
-void configure_pktio_pmr_composite(void);
-void test_pktio_pmr_composite_cos(void);
+void configure_pktio_default_cos(odp_bool_t enable_pktv);
+void test_pktio_default_cos(odp_bool_t enable_pktv);
+void configure_pktio_error_cos(odp_bool_t enable_pktv);
+void test_pktio_error_cos(odp_bool_t enable_pktv);
+void configure_cls_pmr_chain(odp_bool_t enable_pktv);
+void test_cls_pmr_chain(odp_bool_t enable_pktv);
+void configure_cos_with_l2_priority(odp_bool_t enable_pktv);
+void test_cos_with_l2_priority(odp_bool_t enable_pktv);
+void configure_pmr_cos(odp_bool_t enable_pktv);
+void test_pmr_cos(odp_bool_t enable_pktv);
+void configure_pktio_pmr_composite(odp_bool_t enable_pktv);
+void test_pktio_pmr_composite_cos(odp_bool_t enable_pktv);
int stop_pktio(odp_pktio_t pktio);
odp_cls_pmr_term_t find_first_supported_l3_pmr(void);
int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index b04c4d830..dd5813c61 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -213,6 +213,7 @@ static int alg_op(odp_packet_t pkt,
event = odp_queue_deq(suite_context.queue);
} while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_is_valid(event) == 1);
CU_ASSERT(ODP_EVENT_CRYPTO_COMPL == odp_event_type(event));
CU_ASSERT(ODP_EVENT_NO_SUBTYPE == odp_event_subtype(event));
CU_ASSERT(ODP_EVENT_CRYPTO_COMPL ==
diff --git a/test/validation/api/event/event.c b/test/validation/api/event/event.c
index fbd360350..1d6422f96 100644
--- a/test/validation/api/event/event.c
+++ b/test/validation/api/event/event.c
@@ -33,6 +33,7 @@ static void event_test_free(void)
for (i = 0; i < EVENT_BURST; i++) {
buf = odp_buffer_alloc(pool);
+ CU_ASSERT(odp_event_is_valid(odp_buffer_to_event(buf)) == 1);
CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
event[i] = odp_buffer_to_event(buf);
CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_BUFFER);
@@ -58,6 +59,7 @@ static void event_test_free(void)
for (i = 0; i < EVENT_BURST; i++) {
pkt = odp_packet_alloc(pool, EVENT_SIZE);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt)) == 1);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
event[i] = odp_packet_to_event(pkt);
CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_PACKET);
@@ -83,6 +85,7 @@ static void event_test_free(void)
for (i = 0; i < EVENT_BURST; i++) {
tmo = odp_timeout_alloc(pool);
+ CU_ASSERT(odp_event_is_valid(odp_timeout_to_event(tmo)) == 1);
CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
event[i] = odp_timeout_to_event(tmo);
CU_ASSERT(odp_event_type(event[i]) == ODP_EVENT_TIMEOUT);
@@ -384,12 +387,21 @@ static void event_test_filter_packet(void)
CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
}
+static void event_test_is_valid(void)
+{
+ CU_ASSERT(odp_event_is_valid(ODP_EVENT_INVALID) == 0);
+ CU_ASSERT(odp_buffer_is_valid(ODP_BUFFER_INVALID) == 0);
+ CU_ASSERT(odp_packet_is_valid(ODP_PACKET_INVALID) == 0);
+ CU_ASSERT(odp_packet_vector_valid(ODP_PACKET_VECTOR_INVALID) == 0);
+}
+
odp_testinfo_t event_suite[] = {
ODP_TEST_INFO(event_test_free),
ODP_TEST_INFO(event_test_free_multi),
ODP_TEST_INFO(event_test_free_multi_mixed),
ODP_TEST_INFO(event_test_type_multi),
ODP_TEST_INFO(event_test_filter_packet),
+ ODP_TEST_INFO(event_test_is_valid),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index 5ae429272..8f53ecc41 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2017-2018, Linaro Limited
* Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -308,12 +309,30 @@ int ipsec_check_esp_aes_cbc_128_null(void)
ODP_AUTH_ALG_NULL, 0);
}
+int ipsec_check_esp_aes_cbc_128_sha1(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA1_HMAC, 160);
+}
+
int ipsec_check_esp_aes_cbc_128_sha256(void)
{
return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
ODP_AUTH_ALG_SHA256_HMAC, 256);
}
+int ipsec_check_esp_aes_cbc_128_sha384(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA384_HMAC, 384);
+}
+
+int ipsec_check_esp_aes_cbc_128_sha512(void)
+{
+ return ipsec_check_esp(ODP_CIPHER_ALG_AES_CBC, 128,
+ ODP_AUTH_ALG_SHA512_HMAC, 512);
+}
+
int ipsec_check_esp_aes_ctr_128_null(void)
{
return ipsec_check_esp(ODP_CIPHER_ALG_AES_CTR, 128,
@@ -326,12 +345,6 @@ int ipsec_check_esp_aes_gcm_128(void)
ODP_AUTH_ALG_AES_GCM, 0);
}
-int ipsec_check_esp_aes_gcm_192(void)
-{
- return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 192,
- ODP_AUTH_ALG_AES_GCM, 0);
-}
-
int ipsec_check_esp_aes_gcm_256(void)
{
return ipsec_check_esp(ODP_CIPHER_ALG_AES_GCM, 256,
@@ -374,24 +387,6 @@ int ipsec_check_esp_null_aes_gmac_256(void)
ODP_AUTH_ALG_AES_GMAC, 256);
}
-int ipsec_check_esp_aes_ccm_128(void)
-{
- return ipsec_check_esp(ODP_CIPHER_ALG_AES_CCM, 128,
- ODP_AUTH_ALG_AES_CCM, 0);
-}
-
-int ipsec_check_esp_aes_ccm_192(void)
-{
- return ipsec_check_esp(ODP_CIPHER_ALG_AES_CCM, 192,
- ODP_AUTH_ALG_AES_CCM, 0);
-}
-
-int ipsec_check_esp_aes_ccm_256(void)
-{
- return ipsec_check_esp(ODP_CIPHER_ALG_AES_CCM, 256,
- ODP_AUTH_ALG_AES_CCM, 0);
-}
-
int ipsec_check_esp_chacha20_poly1305(void)
{
return ipsec_check_esp(ODP_CIPHER_ALG_CHACHA20_POLY1305, 256,
@@ -463,6 +458,7 @@ void ipsec_sa_destroy(odp_ipsec_sa_t sa)
event = odp_queue_deq(suite_context.queue);
} while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_is_valid(event) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(event));
ret = odp_ipsec_status(&status, event);
@@ -506,6 +502,28 @@ odp_packet_t ipsec_packet(const ipsec_test_packet *itp)
return pkt;
}
+static void check_l2_header(const ipsec_test_packet *itp, odp_packet_t pkt)
+{
+ uint32_t len = odp_packet_len(pkt);
+ uint8_t data[len];
+ uint32_t l2 = odp_packet_l2_offset(pkt);
+ uint32_t l3 = odp_packet_l3_offset(pkt);
+ uint32_t hdr_len;
+
+ if (!itp)
+ return;
+
+ hdr_len = itp->l3_offset - itp->l2_offset;
+
+ CU_ASSERT_FATAL(l2 != ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT_FATAL(l3 != ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT_EQUAL(l3 - l2, hdr_len);
+ odp_packet_copy_to_mem(pkt, 0, len, data);
+ CU_ASSERT_EQUAL(0, memcmp(data + l2,
+ itp->data + itp->l2_offset,
+ hdr_len));
+}
+
/*
* Compare packages ignoring everything before L3 header
*/
@@ -600,14 +618,14 @@ static int ipsec_send_in_one(const ipsec_test_part *part,
odp_packet_t *pkto)
{
odp_ipsec_in_param_t param;
- int num_out = part->out_pkt;
+ int num_out = part->num_pkt;
odp_packet_t pkt;
int i;
pkt = ipsec_packet(part->pkt_in);
memset(&param, 0, sizeof(param));
- if (!part->lookup) {
+ if (!part->flags.lookup) {
param.num_sa = 1;
param.sa = &sa;
} else {
@@ -616,10 +634,10 @@ static int ipsec_send_in_one(const ipsec_test_part *part,
}
if (ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode) {
- CU_ASSERT_EQUAL(part->out_pkt, odp_ipsec_in(&pkt, 1,
+ CU_ASSERT_EQUAL(part->num_pkt, odp_ipsec_in(&pkt, 1,
pkto, &num_out,
&param));
- CU_ASSERT_EQUAL(num_out, part->out_pkt);
+ CU_ASSERT_EQUAL(num_out, part->num_pkt);
CU_ASSERT(odp_packet_subtype(*pkto) == ODP_EVENT_PACKET_IPSEC);
} else if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode) {
num_out = odp_ipsec_in_enq(&pkt, 1, &param);
@@ -635,6 +653,7 @@ static int ipsec_send_in_one(const ipsec_test_part *part,
event = odp_queue_deq(suite_context.queue);
} while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_is_valid(event) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(event, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
@@ -663,11 +682,12 @@ static int ipsec_send_in_one(const ipsec_test_part *part,
ev = odp_queue_deq(queue);
if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(ev, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
subtype);
- CU_ASSERT(part->out[i].status.error.sa_lookup);
+ CU_ASSERT(part->in[i].status.error.sa_lookup);
pkto[i++] = odp_ipsec_packet_from_event(ev);
continue;
@@ -675,11 +695,12 @@ static int ipsec_send_in_one(const ipsec_test_part *part,
ev = odp_queue_deq(suite_context.queue);
if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(ev, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC,
subtype);
- CU_ASSERT(!part->out[i].status.error.sa_lookup);
+ CU_ASSERT(!part->in[i].status.error.sa_lookup);
pkto[i] = odp_ipsec_packet_from_event(ev);
CU_ASSERT(odp_packet_subtype(pkto[i]) ==
@@ -698,7 +719,7 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
odp_packet_t *pkto)
{
odp_ipsec_out_param_t param;
- int num_out = part->out_pkt;
+ int num_out = part->num_pkt;
odp_packet_t pkt;
int i;
@@ -732,6 +753,7 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
event = odp_queue_deq(suite_context.queue);
} while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_is_valid(event) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(event, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
@@ -745,10 +767,15 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
uint8_t hdr[32];
odp_queue_t queue = ODP_QUEUE_INVALID;
- if (NULL != part->out[0].pkt_out) {
- hdr_len = part->out[0].pkt_out->l3_offset;
+ if (NULL != part->out[0].pkt_res) {
+ /*
+ * Take L2 header from the expected result.
+ * This way ethertype will be correct for input
+ * processing even with IPv4-in-IPv6-tunnels etc.
+ */
+ hdr_len = part->out[0].pkt_res->l3_offset;
CU_ASSERT_FATAL(hdr_len <= sizeof(hdr));
- memcpy(hdr, part->out[0].pkt_out->data, hdr_len);
+ memcpy(hdr, part->out[0].pkt_res->data, hdr_len);
} else if (part->pkt_in->l3_offset !=
ODP_PACKET_OFFSET_INVALID) {
hdr_len = part->pkt_in->l3_offset;
@@ -759,9 +786,39 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
hdr_len = 14;
memset(hdr, 0xff, hdr_len);
}
+
+ if (part->flags.inline_hdr_in_packet) {
+ /*
+ * Provide the to-be-prepended header to ODP in the
+ * the packet data. Use nonzero L2 offset for better
+ * test coverage.
+ */
+ uint32_t new_l2_offset = 100;
+ uint32_t l3_offset = odp_packet_l3_offset(pkt);
+ uint32_t new_l3_offset = new_l2_offset + hdr_len;
+ uint32_t l4_offset = odp_packet_l4_offset(pkt);
+ int ret;
+
+ ret = odp_packet_trunc_head(&pkt, l3_offset,
+ NULL, NULL);
+ CU_ASSERT_FATAL(ret >= 0);
+ ret = odp_packet_extend_head(&pkt, new_l3_offset,
+ NULL, NULL);
+ CU_ASSERT_FATAL(ret >= 0);
+ odp_packet_l2_offset_set(pkt, new_l2_offset);
+ odp_packet_l3_offset_set(pkt, new_l3_offset);
+ odp_packet_copy_from_mem(pkt, new_l2_offset, hdr_len, hdr);
+ if (l4_offset != ODP_PACKET_OFFSET_INVALID)
+ odp_packet_l4_offset_set(pkt, new_l3_offset +
+ l4_offset - l3_offset);
+
+ inline_param.outer_hdr.ptr = NULL;
+ } else {
+ inline_param.outer_hdr.ptr = hdr;
+ }
+
inline_param.pktio = suite_context.pktio;
inline_param.tm_queue = ODP_TM_INVALID;
- inline_param.outer_hdr.ptr = hdr;
inline_param.outer_hdr.len = hdr_len;
CU_ASSERT_EQUAL(1, odp_ipsec_out_inline(&pkt, 1, &param,
@@ -777,6 +834,7 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
ev = odp_queue_deq(queue);
if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(ev, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
@@ -789,6 +847,7 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
ev = odp_queue_deq(suite_context.queue);
if (ODP_EVENT_INVALID != ev) {
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
odp_event_types(ev, &subtype));
CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC,
@@ -807,9 +866,38 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
return num_out;
}
+static void ipsec_pkt_proto_err_set(odp_packet_t pkt)
+{
+ uint32_t l3_off = odp_packet_l3_offset(pkt);
+ odph_ipv4hdr_t ip;
+
+ /* Simulate proto error by corrupting protocol field */
+
+ odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip), &ip);
+
+ if (ip.proto == ODPH_IPPROTO_ESP)
+ ip.proto = ODPH_IPPROTO_AH;
+ else
+ ip.proto = ODPH_IPPROTO_ESP;
+
+ odp_packet_copy_from_mem(pkt, l3_off, sizeof(ip), &ip);
+}
+
+static void ipsec_pkt_auth_err_set(odp_packet_t pkt)
+{
+ uint32_t data, len;
+
+ /* Simulate auth error by corrupting ICV */
+
+ len = odp_packet_len(pkt);
+ odp_packet_copy_to_mem(pkt, len - sizeof(data), sizeof(data), &data);
+ data = ~data;
+ odp_packet_copy_from_mem(pkt, len - sizeof(data), sizeof(data), &data);
+}
+
void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
{
- int num_out = part->out_pkt;
+ int num_out = part->num_pkt;
odp_packet_t pkto[num_out];
int i;
@@ -826,13 +914,20 @@ void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
if (ODP_EVENT_PACKET_IPSEC !=
odp_event_subtype(odp_packet_to_event(pkto[i]))) {
/* Inline packet went through loop */
- CU_ASSERT_EQUAL(1, part->out[i].status.error.sa_lookup);
+ CU_ASSERT_EQUAL(1, part->in[i].status.error.sa_lookup);
} else {
CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
- CU_ASSERT_EQUAL(part->out[i].status.error.all,
+ CU_ASSERT_EQUAL(part->in[i].status.error.all,
result.status.error.all);
- CU_ASSERT(!result.status.error.all ==
- !odp_packet_has_error(pkto[i]));
+
+ if (part->in[i].status.error.all != 0) {
+ odp_packet_free(pkto[i]);
+ return;
+ }
+
+ if (0 == result.status.error.all)
+ CU_ASSERT_EQUAL(0,
+ odp_packet_has_error(pkto[i]));
CU_ASSERT_EQUAL(suite_context.inbound_op_mode ==
ODP_IPSEC_OP_MODE_INLINE,
result.flag.inline_mode);
@@ -841,16 +936,16 @@ void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
CU_ASSERT_EQUAL(IPSEC_SA_CTX,
odp_ipsec_sa_context(sa));
}
- ipsec_check_packet(part->out[i].pkt_out,
+ ipsec_check_packet(part->in[i].pkt_res,
pkto[i],
false);
- if (part->out[i].pkt_out != NULL &&
- part->out[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
- CU_ASSERT_EQUAL(part->out[i].l3_type,
+ if (part->in[i].pkt_res != NULL &&
+ part->in[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
+ CU_ASSERT_EQUAL(part->in[i].l3_type,
odp_packet_l3_type(pkto[i]));
- if (part->out[i].pkt_out != NULL &&
- part->out[i].l4_type != _ODP_PROTO_L4_TYPE_UNDEF)
- CU_ASSERT_EQUAL(part->out[i].l4_type,
+ if (part->in[i].pkt_res != NULL &&
+ part->in[i].l4_type != _ODP_PROTO_L4_TYPE_UNDEF)
+ CU_ASSERT_EQUAL(part->in[i].l4_type,
odp_packet_l4_type(pkto[i]));
odp_packet_free(pkto[i]);
}
@@ -858,7 +953,7 @@ void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
{
- int num_out = part->out_pkt;
+ int num_out = part->num_pkt;
odp_packet_t pkto[num_out];
int i;
@@ -876,18 +971,21 @@ void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
odp_event_subtype(odp_packet_to_event(pkto[i]))) {
/* Inline packet went through loop */
CU_ASSERT_EQUAL(0, part->out[i].status.error.all);
+ /* L2 header must match the requested one */
+ check_l2_header(part->out[i].pkt_res, pkto[i]);
} else {
/* IPsec packet */
CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
CU_ASSERT_EQUAL(part->out[i].status.error.all,
result.status.error.all);
- CU_ASSERT(!result.status.error.all ==
- !odp_packet_has_error(pkto[i]));
+ if (0 == result.status.error.all)
+ CU_ASSERT_EQUAL(0,
+ odp_packet_has_error(pkto[i]));
CU_ASSERT_EQUAL(sa, result.sa);
CU_ASSERT_EQUAL(IPSEC_SA_CTX,
odp_ipsec_sa_context(sa));
}
- ipsec_check_packet(part->out[i].pkt_out,
+ ipsec_check_packet(part->out[i].pkt_res,
pkto[i],
true);
odp_packet_free(pkto[i]);
@@ -898,7 +996,7 @@ void ipsec_check_out_in_one(const ipsec_test_part *part,
odp_ipsec_sa_t sa,
odp_ipsec_sa_t sa_in)
{
- int num_out = part->out_pkt;
+ int num_out = part->num_pkt;
odp_packet_t pkto[num_out];
int i;
@@ -918,6 +1016,8 @@ void ipsec_check_out_in_one(const ipsec_test_part *part,
odp_event_subtype(odp_packet_to_event(pkto[i]))) {
/* Inline packet went through loop */
CU_ASSERT_EQUAL(0, part->out[i].status.error.all);
+ /* L2 header must match that of input packet */
+ check_l2_header(part->out[i].pkt_res, pkto[i]);
} else {
/* IPsec packet */
CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
@@ -930,6 +1030,12 @@ void ipsec_check_out_in_one(const ipsec_test_part *part,
CU_ASSERT_FATAL(odp_packet_len(pkto[i]) <=
sizeof(pkt_in.data));
+ if (part->flags.stats == IPSEC_TEST_STATS_PROTO_ERR)
+ ipsec_pkt_proto_err_set(pkto[i]);
+
+ if (part->flags.stats == IPSEC_TEST_STATS_AUTH_ERR)
+ ipsec_pkt_auth_err_set(pkto[i]);
+
pkt_in.len = odp_packet_len(pkto[i]);
pkt_in.l2_offset = odp_packet_l2_offset(pkto[i]);
pkt_in.l3_offset = odp_packet_l3_offset(pkto[i]);
@@ -1093,6 +1199,7 @@ int ipsec_config(odp_instance_t ODP_UNUSED inst)
ipsec_config.inbound.default_queue = suite_context.queue;
ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_ALL;
ipsec_config.inbound.chksums.all_chksum = ~0;
+ ipsec_config.stats_en = true;
if (ODP_IPSEC_OK != odp_ipsec_config(&ipsec_config))
return -1;
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index f36608ac5..a9213b420 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
* Copyright (c) 2020, Nokia
* All rights reserved.
*
@@ -46,18 +47,39 @@ typedef struct {
#define _ODP_PROTO_L3_TYPE_UNDEF ((odp_proto_l3_type_t)-1)
#define _ODP_PROTO_L4_TYPE_UNDEF ((odp_proto_l4_type_t)-1)
+enum ipsec_test_stats {
+ IPSEC_TEST_STATS_NONE = 0,
+ IPSEC_TEST_STATS_SUCCESS,
+ IPSEC_TEST_STATS_PROTO_ERR,
+ IPSEC_TEST_STATS_AUTH_ERR,
+};
+
typedef struct {
- const ipsec_test_packet *pkt_in;
+ odp_bool_t display_algo;
odp_bool_t lookup;
+ odp_bool_t ah;
+ odp_bool_t inline_hdr_in_packet;
+ enum ipsec_test_stats stats;
+} ipsec_test_flags;
+
+typedef struct {
+ const ipsec_test_packet *pkt_in;
+ ipsec_test_flags flags;
int num_opt;
odp_ipsec_out_opt_t opt;
- int out_pkt;
+ int num_pkt;
struct {
odp_ipsec_op_status_t status;
- const ipsec_test_packet *pkt_out;
+ const ipsec_test_packet *pkt_res;
odp_proto_l3_type_t l3_type;
odp_proto_l4_type_t l4_type;
} out[1];
+ struct {
+ odp_ipsec_op_status_t status;
+ const ipsec_test_packet *pkt_res;
+ odp_proto_l3_type_t l3_type;
+ odp_proto_l4_type_t l4_type;
+ } in[1];
} ipsec_test_part;
void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
@@ -92,10 +114,12 @@ int ipsec_check(odp_bool_t ah,
int ipsec_check_ah_sha256(void);
int ipsec_check_esp_null_sha256(void);
int ipsec_check_esp_aes_cbc_128_null(void);
+int ipsec_check_esp_aes_cbc_128_sha1(void);
int ipsec_check_esp_aes_cbc_128_sha256(void);
+int ipsec_check_esp_aes_cbc_128_sha384(void);
+int ipsec_check_esp_aes_cbc_128_sha512(void);
int ipsec_check_esp_aes_ctr_128_null(void);
int ipsec_check_esp_aes_gcm_128(void);
-int ipsec_check_esp_aes_gcm_192(void);
int ipsec_check_esp_aes_gcm_256(void);
int ipsec_check_ah_aes_gmac_128(void);
int ipsec_check_ah_aes_gmac_192(void);
@@ -103,9 +127,6 @@ int ipsec_check_ah_aes_gmac_256(void);
int ipsec_check_esp_null_aes_gmac_128(void);
int ipsec_check_esp_null_aes_gmac_192(void);
int ipsec_check_esp_null_aes_gmac_256(void);
-int ipsec_check_esp_aes_ccm_128(void);
-int ipsec_check_esp_aes_ccm_192(void);
-int ipsec_check_esp_aes_ccm_256(void);
int ipsec_check_esp_chacha20_poly1305(void);
#endif
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
index 0e6497887..f31f77244 100644
--- a/test/validation/api/ipsec/ipsec_test_in.c
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -25,13 +26,13 @@ static void test_in_ipv4_ah_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -60,13 +61,13 @@ static void test_in_ipv4_ah_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -95,13 +96,13 @@ static void test_in_ipv4_ah_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -127,14 +128,14 @@ static void test_in_ipv4_ah_sha256_tun_ipv4_notun(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
/* It is L4_TYPE_IPV4 */
.l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
- .pkt_out = &pkt_ipv4_icmp_0_ipip },
+ .pkt_res = &pkt_ipv4_icmp_0_ipip },
},
};
@@ -160,13 +161,13 @@ static void test_in_ipv4_esp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -192,13 +193,45 @@ static void test_in_ipv4_esp_aes_cbc_null(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_null_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha1(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ true, false, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha1_1,
+ .num_pkt = 1,
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -224,13 +257,77 @@ static void test_in_ipv4_esp_aes_cbc_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha384(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ true, false, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA384_HMAC, &key_5a_384,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha384_1,
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_in_one(&test, sa);
+
+ ipsec_sa_destroy(sa);
+}
+
+static void test_in_ipv4_esp_aes_cbc_sha512(void)
+{
+ odp_ipsec_sa_param_t param;
+ odp_ipsec_sa_t sa;
+
+ ipsec_sa_param_fill(&param,
+ true, false, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA512_HMAC, &key_5a_512,
+ NULL, NULL);
+
+ sa = odp_ipsec_sa_create(&param);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha512_1,
+ .num_pkt = 1,
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -256,13 +353,13 @@ static void test_in_ipv4_esp_aes_ctr_null(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_ctr_null_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -288,14 +385,16 @@ static void test_in_ipv4_ah_sha256_lookup(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
- .lookup = 1,
- .out_pkt = 1,
- .out = {
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -321,14 +420,16 @@ static void test_in_ipv4_esp_null_sha256_lookup(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
- .lookup = 1,
- .out_pkt = 1,
- .out = {
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -357,13 +458,13 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -392,13 +493,13 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -425,13 +526,13 @@ static void test_in_ipv4_esp_udp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -458,14 +559,16 @@ static void test_in_ipv4_esp_udp_null_sha256_lookup(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
- .lookup = 1,
- .out_pkt = 1,
- .out = {
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -492,25 +595,25 @@ static void test_in_ipv4_ah_sha256_noreplay(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
ipsec_test_part test_1235 = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1235,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -543,29 +646,29 @@ static void test_in_ipv4_ah_sha256_replay(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
test_repl.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
- test_repl.out_pkt = 1;
- test_repl.out[0].status.error.antireplay = 1;
+ test_repl.num_pkt = 1;
+ test_repl.in[0].status.error.antireplay = 1;
ipsec_test_part test_1235 = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1235,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -595,25 +698,25 @@ static void test_in_ipv4_esp_null_sha256_noreplay(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
ipsec_test_part test_1235 = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1235,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -646,29 +749,43 @@ static void test_in_ipv4_esp_null_sha256_replay(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
test_repl.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
- test_repl.out_pkt = 1;
- test_repl.out[0].status.error.antireplay = 1;
+ test_repl.num_pkt = 1;
+ test_repl.in[0].status.error.antireplay = 1;
ipsec_test_part test_1235 = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1235,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -704,8 +821,8 @@ static void test_in_ipv4_ah_esp_pkt(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
- test.out_pkt = 1;
- test.out[0].status.error.proto = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.proto = 1;
ipsec_check_in_one(&test, sa);
@@ -736,8 +853,8 @@ static void test_in_ipv4_esp_ah_pkt(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
- test.out_pkt = 1;
- test.out[0].status.error.proto = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.proto = 1;
ipsec_check_in_one(&test, sa);
@@ -763,9 +880,9 @@ static void test_in_ipv4_ah_esp_pkt_lookup(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
- test.lookup = 1;
- test.out_pkt = 1;
- test.out[0].status.error.sa_lookup = 1;
+ test.flags.lookup = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.sa_lookup = 1;
ipsec_check_in_one(&test, ODP_IPSEC_SA_INVALID);
@@ -791,9 +908,9 @@ static void test_in_ipv4_esp_ah_pkt_lookup(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
- test.lookup = 1;
- test.out_pkt = 1;
- test.out[0].status.error.sa_lookup = 1;
+ test.flags.lookup = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.sa_lookup = 1;
ipsec_check_in_one(&test, ODP_IPSEC_SA_INVALID);
@@ -819,8 +936,8 @@ static void test_in_ipv4_ah_sha256_bad1(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad1;
- test.out_pkt = 1;
- test.out[0].status.error.auth = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.auth = 1;
ipsec_check_in_one(&test, sa);
@@ -846,8 +963,8 @@ static void test_in_ipv4_ah_sha256_bad2(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad2;
- test.out_pkt = 1;
- test.out[0].status.error.auth = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.auth = 1;
ipsec_check_in_one(&test, sa);
@@ -873,8 +990,8 @@ static void test_in_ipv4_esp_null_sha256_bad1(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1_bad1;
- test.out_pkt = 1;
- test.out[0].status.error.auth = 1;
+ test.num_pkt = 1;
+ test.in[0].status.error.auth = 1;
ipsec_check_in_one(&test, sa);
@@ -898,13 +1015,13 @@ static void test_in_ipv4_rfc3602_5_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_5_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_rfc3602_5 },
+ .pkt_res = &pkt_rfc3602_5 },
},
};
@@ -930,13 +1047,13 @@ static void test_in_ipv4_rfc3602_6_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_6_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_rfc3602_6 },
+ .pkt_res = &pkt_rfc3602_6 },
},
};
@@ -965,13 +1082,13 @@ static void test_in_ipv4_rfc3602_7_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_7_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_rfc3602_7 },
+ .pkt_res = &pkt_rfc3602_7 },
},
};
@@ -1000,13 +1117,13 @@ static void test_in_ipv4_rfc3602_8_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_8_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_rfc3602_8 },
+ .pkt_res = &pkt_rfc3602_8 },
},
};
@@ -1035,13 +1152,13 @@ static void test_in_ipv4_mcgrew_gcm_2_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_2_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_UDP,
- .pkt_out = &pkt_mcgrew_gcm_test_2},
+ .pkt_res = &pkt_mcgrew_gcm_test_2},
},
};
@@ -1070,13 +1187,13 @@ static void test_in_ipv4_mcgrew_gcm_3_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_3_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
- .pkt_out = &pkt_mcgrew_gcm_test_3},
+ .pkt_res = &pkt_mcgrew_gcm_test_3},
},
};
@@ -1105,13 +1222,13 @@ static void test_in_ipv4_mcgrew_gcm_4_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_4_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_mcgrew_gcm_test_4},
+ .pkt_res = &pkt_mcgrew_gcm_test_4},
},
};
@@ -1145,13 +1262,13 @@ static void test_in_ipv4_mcgrew_gcm_12_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_12_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_NONE,
.l4_type = _ODP_PROTO_L4_TYPE_UNDEF,
- .pkt_out = &pkt_mcgrew_gcm_test_12},
+ .pkt_res = &pkt_mcgrew_gcm_test_12},
},
};
@@ -1177,13 +1294,13 @@ static void test_in_ipv4_mcgrew_gcm_12_esp_notun(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_12_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_NO_NEXT,
- .pkt_out = &pkt_mcgrew_gcm_test_12_notun },
+ .pkt_res = &pkt_mcgrew_gcm_test_12_notun },
},
};
@@ -1212,13 +1329,13 @@ static void test_in_ipv4_mcgrew_gcm_15_esp(void)
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_15_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_mcgrew_gcm_test_15},
+ .pkt_res = &pkt_mcgrew_gcm_test_15},
},
};
@@ -1247,13 +1364,13 @@ static void test_in_ipv4_rfc7634_chacha(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_rfc7634_esp,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_rfc7634},
+ .pkt_res = &pkt_ipv4_rfc7634},
},
};
@@ -1279,13 +1396,13 @@ static void test_in_ipv4_ah_aes_gmac_128(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_aes_gmac_128_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -1311,13 +1428,13 @@ static void test_in_ipv4_esp_null_aes_gmac_128(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_aes_gmac_128_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
@@ -1343,13 +1460,13 @@ static void test_in_ipv6_ah_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1378,13 +1495,13 @@ static void test_in_ipv6_ah_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1413,13 +1530,13 @@ static void test_in_ipv6_ah_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1445,13 +1562,13 @@ static void test_in_ipv6_esp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1480,13 +1597,13 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1515,13 +1632,13 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1548,13 +1665,13 @@ static void test_in_ipv6_esp_udp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
- .out_pkt = 1,
- .out = {
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1581,14 +1698,16 @@ static void test_in_ipv6_esp_udp_null_sha256_lookup(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
- .lookup = 1,
- .out_pkt = 1,
- .out = {
+ .flags = {
+ .lookup = 1,
+ },
+ .num_pkt = 1,
+ .in = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV6,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV6,
- .pkt_out = &pkt_ipv6_icmp_0 },
+ .pkt_res = &pkt_ipv6_icmp_0 },
},
};
@@ -1597,6 +1716,31 @@ static void test_in_ipv6_esp_udp_null_sha256_lookup(void)
ipsec_sa_destroy(sa);
}
+static void test_ipsec_print(void)
+{
+ odp_ipsec_print();
+}
+
+static void test_ipsec_sa_print(void)
+{
+ odp_ipsec_sa_param_t param_in;
+ odp_ipsec_sa_t in_sa;
+
+ ipsec_sa_param_fill(&param_in,
+ true, false, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ in_sa = odp_ipsec_sa_create(&param_in);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+
+ odp_ipsec_sa_print(in_sa);
+
+ ipsec_sa_destroy(in_sa);
+}
+
static void ipsec_test_capability(void)
{
odp_ipsec_capability_t capa;
@@ -1642,8 +1786,14 @@ odp_testinfo_t ipsec_in_suite[] = {
ipsec_check_esp_null_sha256),
ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_null,
ipsec_check_esp_aes_cbc_128_null),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha1,
+ ipsec_check_esp_aes_cbc_128_sha1),
ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha256,
ipsec_check_esp_aes_cbc_128_sha256),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha384,
+ ipsec_check_esp_aes_cbc_128_sha384),
+ ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_cbc_sha512,
+ ipsec_check_esp_aes_cbc_128_sha512),
ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_esp_aes_ctr_null,
ipsec_check_esp_aes_ctr_128_null),
ODP_TEST_INFO_CONDITIONAL(test_in_ipv4_ah_sha256_lookup,
@@ -1700,5 +1850,8 @@ odp_testinfo_t ipsec_in_suite[] = {
ipsec_check_esp_null_sha256),
ODP_TEST_INFO_CONDITIONAL(test_in_ipv6_esp_udp_null_sha256_lookup,
ipsec_check_esp_null_sha256),
+ ODP_TEST_INFO(test_ipsec_print),
+ ODP_TEST_INFO_CONDITIONAL(test_ipsec_sa_print,
+ ipsec_check_esp_aes_cbc_128_sha1),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 9a3ba2bd3..b4065d667 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
* Copyright (c) 2020, Nokia
* All rights reserved.
*
@@ -55,6 +56,54 @@ static struct auth_param auths[] = {
ALG(ODP_AUTH_ALG_AES_XCBC_MAC, &key_5a_128, NULL)
};
+struct cipher_auth_comb_param {
+ struct cipher_param cipher;
+ struct auth_param auth;
+};
+
+static struct cipher_auth_comb_param cipher_auth_comb[] = {
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_128, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_192, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_GCM, &key_a5_256, &key_mcgrew_gcm_salt_2),
+ ALG(ODP_AUTH_ALG_AES_GCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_128, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_192, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_NULL, NULL, NULL),
+ ALG(ODP_AUTH_ALG_AES_GMAC, &key_a5_256, &key_mcgrew_gcm_salt_2),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_128, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_192, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_AES_CCM, &key_a5_256, &key_3byte_salt),
+ ALG(ODP_AUTH_ALG_AES_CCM, NULL, NULL),
+ },
+ {
+ ALG(ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634, &key_rfc7634_salt),
+ ALG(ODP_AUTH_ALG_CHACHA20_POLY1305, NULL, NULL),
+ },
+};
+
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
static void test_out_ipv4_ah_sha256(void)
@@ -74,11 +123,11 @@ static void test_out_ipv4_ah_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_ah_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1 },
},
};
@@ -118,11 +167,11 @@ static void test_out_ipv4_ah_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1 },
},
};
@@ -163,11 +212,11 @@ static void test_out_ipv4_ah_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1 },
},
};
@@ -193,11 +242,11 @@ static void test_out_ipv4_esp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
},
};
@@ -232,11 +281,11 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out =
+ .pkt_res =
&pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1 },
},
};
@@ -278,11 +327,11 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out =
+ .pkt_res =
&pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1 },
},
};
@@ -292,7 +341,53 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
ipsec_sa_destroy(sa);
}
-static void test_out_in_common(odp_bool_t ah,
+static void test_ipsec_stats_zero_assert(odp_ipsec_stats_t *stats)
+{
+ CU_ASSERT_EQUAL(stats->success, 0);
+ CU_ASSERT_EQUAL(stats->proto_err, 0);
+ CU_ASSERT_EQUAL(stats->auth_err, 0);
+ CU_ASSERT_EQUAL(stats->antireplay_err, 0);
+ CU_ASSERT_EQUAL(stats->alg_err, 0);
+ CU_ASSERT_EQUAL(stats->mtu_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+}
+
+static void test_ipsec_stats_test_assert(odp_ipsec_stats_t *stats,
+ enum ipsec_test_stats test)
+{
+ if (test == IPSEC_TEST_STATS_SUCCESS) {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->success, 1);
+ } else {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->success, 0);
+ }
+
+ if (test == IPSEC_TEST_STATS_PROTO_ERR) {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->proto_err, 1);
+ } else {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->proto_err, 0);
+ }
+
+ if (test == IPSEC_TEST_STATS_AUTH_ERR) {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->auth_err, 1);
+ } else {
+ /* Braces needed by CU macro */
+ CU_ASSERT_EQUAL(stats->auth_err, 0);
+ }
+
+ CU_ASSERT_EQUAL(stats->antireplay_err, 0);
+ CU_ASSERT_EQUAL(stats->alg_err, 0);
+ CU_ASSERT_EQUAL(stats->mtu_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
+ CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+}
+
+static void test_out_in_common(ipsec_test_flags *flags,
odp_cipher_alg_t cipher,
const odp_crypto_key_t *cipher_key,
odp_auth_alg_t auth,
@@ -301,11 +396,19 @@ static void test_out_in_common(odp_bool_t ah,
const odp_crypto_key_t *auth_key_extra)
{
odp_ipsec_sa_param_t param;
+ odp_ipsec_stats_t stats;
odp_ipsec_sa_t sa_out;
odp_ipsec_sa_t sa_in;
+ CU_ASSERT_NOT_EQUAL_FATAL(flags, NULL);
+
+ /* ICV won't be generated for NULL AUTH */
+ if ((flags->stats == IPSEC_TEST_STATS_AUTH_ERR) &&
+ (auth == ODP_AUTH_ALG_NULL))
+ return;
+
ipsec_sa_param_fill(&param,
- false, ah, 123, NULL,
+ false, flags->ah, 123, NULL,
cipher, cipher_key,
auth, auth_key,
cipher_key_extra, auth_key_extra);
@@ -315,7 +418,7 @@ static void test_out_in_common(odp_bool_t ah,
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
ipsec_sa_param_fill(&param,
- true, ah, 123, NULL,
+ true, flags->ah, 123, NULL,
cipher, cipher_key,
auth, auth_key,
cipher_key_extra, auth_key_extra);
@@ -326,24 +429,62 @@ static void test_out_in_common(odp_bool_t ah,
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
.l3_type = ODP_PROTO_L3_TYPE_IPV4,
.l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
- .pkt_out = &pkt_ipv4_icmp_0 },
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ .in = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
},
};
+ test.flags = *flags;
+
+ if (flags->stats == IPSEC_TEST_STATS_PROTO_ERR)
+ test.in[0].status.error.proto = 1;
+ if (flags->stats == IPSEC_TEST_STATS_AUTH_ERR)
+ test.in[0].status.error.auth = 1;
+
+ if (flags->stats != IPSEC_TEST_STATS_NONE) {
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ test_ipsec_stats_zero_assert(&stats);
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ test_ipsec_stats_zero_assert(&stats);
+ }
+
ipsec_check_out_in_one(&test, sa_out, sa_in);
+ if (flags->stats == IPSEC_TEST_STATS_SUCCESS) {
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ test_ipsec_stats_test_assert(&stats, flags->stats);
+ }
+
+ if (flags->stats != IPSEC_TEST_STATS_NONE) {
+ /* All stats tests have outbound operation success and inbound
+ * varying.
+ */
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ test_ipsec_stats_test_assert(&stats, IPSEC_TEST_STATS_SUCCESS);
+
+ CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ test_ipsec_stats_test_assert(&stats, flags->stats);
+ }
+
ipsec_sa_destroy(sa_out);
ipsec_sa_destroy(sa_in);
}
static void test_esp_out_in(struct cipher_param *cipher,
- struct auth_param *auth)
+ struct auth_param *auth,
+ ipsec_test_flags *flags)
{
int cipher_keylen = cipher->key ? 8 * cipher->key->length : 0;
int auth_keylen = auth->key ? 8 * auth->key->length : 0;
@@ -352,43 +493,75 @@ static void test_esp_out_in(struct cipher_param *cipher,
auth->algo, auth_keylen) != ODP_TEST_ACTIVE)
return;
- printf("\n %s (keylen %d) %s (keylen %d) ",
- cipher->name, cipher_keylen, auth->name, auth_keylen);
+ if (flags->display_algo)
+ printf("\n %s (keylen %d) %s (keylen %d) ",
+ cipher->name, cipher_keylen, auth->name, auth_keylen);
- test_out_in_common(false /* ESP */,
- cipher->algo, cipher->key,
+ test_out_in_common(flags, cipher->algo, cipher->key,
auth->algo, auth->key,
cipher->key_extra, auth->key_extra);
}
-/*
- * Test ESP output followed by input with all combinations of normal
- * mode (not combined mode) ciphers and integrity algorithms.
- *
- * Combined mode algorithms are tested one-by-one in their own test cases.
- */
-static void test_esp_out_in_all(void)
+static void test_esp_out_in_all(ipsec_test_flags *flags)
{
uint32_t c;
uint32_t a;
+ flags->ah = false;
+
for (c = 0; c < ARRAY_SIZE(ciphers); c++)
for (a = 0; a < ARRAY_SIZE(auths); a++)
- test_esp_out_in(&ciphers[c], &auths[a]);
+ test_esp_out_in(&ciphers[c], &auths[a], flags);
+
+ for (c = 0; c < ARRAY_SIZE(cipher_auth_comb); c++)
+ test_esp_out_in(&cipher_auth_comb[c].cipher,
+ &cipher_auth_comb[c].auth,
+ flags);
+}
+
+/*
+ * Test ESP output followed by input with all combinations of ciphers and
+ * integrity algorithms.
+ */
+static void test_esp_out_in_all_basic(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.display_algo = true;
+
+ test_esp_out_in_all(&flags);
+
printf("\n ");
}
+static int is_out_mode_inline(void)
+{
+ return suite_context.outbound_op_mode == ODP_IPSEC_OP_MODE_INLINE;
+}
+
+static void test_esp_out_in_all_hdr_in_packet(void)
+{
+ ipsec_test_flags flags = {
+ .inline_hdr_in_packet = true,
+ };
+ test_esp_out_in_all(&flags);
+}
+
static void test_ah_out_in(struct auth_param *auth)
{
int auth_keylen = auth->key ? 8 * auth->key->length : 0;
+ ipsec_test_flags flags;
if (ipsec_check_ah(auth->algo, auth_keylen) != ODP_TEST_ACTIVE)
return;
printf("\n %s (keylen %d) ", auth->name, auth_keylen);
- test_out_in_common(true /* AH */,
- ODP_CIPHER_ALG_NULL, NULL,
+ memset(&flags, 0, sizeof(flags));
+ flags.ah = true;
+
+ test_out_in_common(&flags, ODP_CIPHER_ALG_NULL, NULL,
auth->algo, auth->key,
NULL, auth->key_extra);
}
@@ -420,11 +593,11 @@ static void test_out_ipv4_esp_udp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1 },
},
};
@@ -433,110 +606,42 @@ static void test_out_ipv4_esp_udp_null_sha256(void)
ipsec_sa_destroy(sa);
}
-static void test_out_ipv4_esp_aes_gcm128(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_GCM, &key_a5_128,
- ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_2, NULL);
-}
-
-static void test_out_ipv4_esp_aes_gcm192(void)
+static void test_out_ipv4_ah_aes_gmac_128(void)
{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_GCM, &key_a5_192,
- ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_2, NULL);
-}
+ ipsec_test_flags flags;
-static void test_out_ipv4_esp_aes_gcm256(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_GCM, &key_a5_256,
- ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_2, NULL);
-}
+ memset(&flags, 0, sizeof(flags));
+ flags.ah = true;
-static void test_out_ipv4_ah_aes_gmac_128(void)
-{
- test_out_in_common(true,
- ODP_CIPHER_ALG_NULL, NULL,
+ test_out_in_common(&flags, ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
NULL, &key_mcgrew_gcm_salt_2);
}
static void test_out_ipv4_ah_aes_gmac_192(void)
{
- test_out_in_common(true,
- ODP_CIPHER_ALG_NULL, NULL,
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.ah = true;
+
+ test_out_in_common(&flags, ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_192,
NULL, &key_mcgrew_gcm_salt_2);
}
static void test_out_ipv4_ah_aes_gmac_256(void)
{
- test_out_in_common(true,
- ODP_CIPHER_ALG_NULL, NULL,
- ODP_AUTH_ALG_AES_GMAC, &key_a5_256,
- NULL, &key_mcgrew_gcm_salt_2);
-}
-
-static void test_out_ipv4_esp_null_aes_gmac_128(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_NULL, NULL,
- ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
- NULL, &key_mcgrew_gcm_salt_2);
-}
+ ipsec_test_flags flags;
-static void test_out_ipv4_esp_null_aes_gmac_192(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_NULL, NULL,
- ODP_AUTH_ALG_AES_GMAC, &key_a5_192,
- NULL, &key_mcgrew_gcm_salt_2);
-}
+ memset(&flags, 0, sizeof(flags));
+ flags.ah = true;
-static void test_out_ipv4_esp_null_aes_gmac_256(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_NULL, NULL,
+ test_out_in_common(&flags, ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_256,
NULL, &key_mcgrew_gcm_salt_2);
}
-static void test_out_ipv4_esp_aes_ccm128(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_CCM, &key_a5_128,
- ODP_AUTH_ALG_AES_CCM, NULL,
- &key_3byte_salt, NULL);
-}
-
-static void test_out_ipv4_esp_aes_ccm192(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_CCM, &key_a5_192,
- ODP_AUTH_ALG_AES_CCM, NULL,
- &key_3byte_salt, NULL);
-}
-
-static void test_out_ipv4_esp_aes_ccm256(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_AES_CCM, &key_a5_256,
- ODP_AUTH_ALG_AES_CCM, NULL,
- &key_3byte_salt, NULL);
-}
-
-static void test_out_ipv4_esp_chacha20_poly1305(void)
-{
- test_out_in_common(false,
- ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634,
- ODP_AUTH_ALG_CHACHA20_POLY1305, NULL,
- &key_rfc7634_salt, NULL);
-}
-
static void test_out_ipv4_ah_sha256_frag_check(void)
{
odp_ipsec_sa_param_t param;
@@ -560,15 +665,15 @@ static void test_out_ipv4_ah_sha256_frag_check(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].status.error.mtu = 1;
test2.pkt_in = &pkt_ipv4_icmp_0;
test2.num_opt = 1;
test2.opt.flag.frag_mode = 1;
test2.opt.frag_mode = ODP_IPSEC_FRAG_DISABLED;
- test2.out_pkt = 1;
- test2.out[0].pkt_out = &pkt_ipv4_icmp_0_ah_sha256_1;
+ test2.num_pkt = 1;
+ test2.out[0].pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1;
ipsec_check_out_one(&test, sa);
@@ -598,16 +703,16 @@ static void test_out_ipv4_ah_sha256_frag_check_2(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].status.error.mtu = 1;
ipsec_test_part test2 = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_ah_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_ah_sha256_1 },
},
};
@@ -644,15 +749,15 @@ static void test_out_ipv4_esp_null_sha256_frag_check(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].status.error.mtu = 1;
test2.pkt_in = &pkt_ipv4_icmp_0;
test2.num_opt = 1;
test2.opt.flag.frag_mode = 1;
test2.opt.frag_mode = ODP_IPSEC_FRAG_DISABLED;
- test2.out_pkt = 1;
- test2.out[0].pkt_out = &pkt_ipv4_icmp_0_esp_null_sha256_1;
+ test2.num_pkt = 1;
+ test2.out[0].pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1;
ipsec_check_out_one(&test, sa);
@@ -683,16 +788,16 @@ static void test_out_ipv4_esp_null_sha256_frag_check_2(void)
CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
test.pkt_in = &pkt_ipv4_icmp_0;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].status.error.mtu = 1;
ipsec_test_part test2 = {
.pkt_in = &pkt_ipv4_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
+ .pkt_res = &pkt_ipv4_icmp_0_esp_null_sha256_1 },
},
};
@@ -722,11 +827,11 @@ static void test_out_ipv6_ah_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv6_icmp_0_ah_sha256_1 },
+ .pkt_res = &pkt_ipv6_icmp_0_ah_sha256_1 },
},
};
@@ -761,11 +866,11 @@ static void test_out_ipv6_ah_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1 },
+ .pkt_res = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1 },
},
};
@@ -806,11 +911,11 @@ static void test_out_ipv6_ah_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1 },
+ .pkt_res = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1 },
},
};
@@ -836,11 +941,11 @@ static void test_out_ipv6_esp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv6_icmp_0_esp_null_sha256_1 },
+ .pkt_res = &pkt_ipv6_icmp_0_esp_null_sha256_1 },
},
};
@@ -875,11 +980,11 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv4(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out =
+ .pkt_res =
&pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1 },
},
};
@@ -921,11 +1026,11 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv6(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out =
+ .pkt_res =
&pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1 },
},
};
@@ -953,11 +1058,11 @@ static void test_out_ipv6_esp_udp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1 },
+ .pkt_res = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1 },
},
};
@@ -1015,7 +1120,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv4(void)
test.num_opt = 1;
test.opt .flag.tfc_dummy = 1;
test.opt.tfc_pad_len = 16;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
test.out[0].l4_type = ODP_PROTO_L4_TYPE_NO_NEXT;
@@ -1023,7 +1128,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv4(void)
test_empty.num_opt = 1;
test_empty.opt.flag.tfc_dummy = 1;
test_empty.opt.tfc_pad_len = 16;
- test_empty.out_pkt = 1;
+ test_empty.num_pkt = 1;
test_empty.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
test_empty.out[0].l4_type = ODP_PROTO_L4_TYPE_NO_NEXT;
@@ -1089,7 +1194,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv6(void)
test.num_opt = 1;
test.opt .flag.tfc_dummy = 1;
test.opt.tfc_pad_len = 16;
- test.out_pkt = 1;
+ test.num_pkt = 1;
test.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
test.out[0].l4_type = ODP_PROTO_L4_TYPE_NO_NEXT;
@@ -1097,7 +1202,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv6(void)
test_empty.num_opt = 1;
test_empty.opt.flag.tfc_dummy = 1;
test_empty.opt.tfc_pad_len = 16;
- test_empty.out_pkt = 1;
+ test_empty.num_pkt = 1;
test_empty.out[0].l3_type = ODP_PROTO_L3_TYPE_IPV4;
test_empty.out[0].l4_type = ODP_PROTO_L4_TYPE_NO_NEXT;
@@ -1125,11 +1230,11 @@ static void test_out_ipv4_udp_esp_null_sha256(void)
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_udp,
- .out_pkt = 1,
+ .num_pkt = 1,
.out = {
{ .status.warn.all = 0,
.status.error.all = 0,
- .pkt_out = &pkt_ipv4_udp_esp_null_sha256 },
+ .pkt_res = &pkt_ipv4_udp_esp_null_sha256 },
},
};
@@ -1138,6 +1243,127 @@ static void test_out_ipv4_udp_esp_null_sha256(void)
ipsec_sa_destroy(sa);
}
+static void test_sa_info(void)
+{
+ uint32_t src = IPV4ADDR(10, 0, 111, 2);
+ uint32_t dst = IPV4ADDR(10, 0, 222, 2);
+ odp_ipsec_tunnel_param_t tunnel_out;
+ odp_ipsec_tunnel_param_t tunnel_in;
+ odp_ipsec_sa_param_t param_out;
+ odp_ipsec_sa_param_t param_in;
+ odp_ipsec_sa_info_t info_out;
+ odp_ipsec_sa_info_t info_in;
+ odp_ipsec_capability_t capa;
+ odp_ipsec_sa_t sa_out;
+ odp_ipsec_sa_t sa_in;
+
+ CU_ASSERT_EQUAL(0, odp_ipsec_capability(&capa));
+
+ memset(&tunnel_out, 0, sizeof(tunnel_out));
+ memset(&tunnel_in, 0, sizeof(tunnel_in));
+
+ tunnel_out.type = ODP_IPSEC_TUNNEL_IPV4;
+ tunnel_out.ipv4.src_addr = &src;
+ tunnel_out.ipv4.dst_addr = &dst;
+
+ ipsec_sa_param_fill(&param_out,
+ false, false, 123, &tunnel_out,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ sa_out = odp_ipsec_sa_create(&param_out);
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
+
+ ipsec_sa_param_fill(&param_in,
+ true, false, 123, &tunnel_in,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+
+ param_in.inbound.antireplay_ws = 32;
+ sa_in = odp_ipsec_sa_create(&param_in);
+
+ memset(&info_out, 0, sizeof(info_out));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
+
+ CU_ASSERT_EQUAL(info_out.param.dir, param_out.dir);
+ CU_ASSERT_EQUAL(info_out.param.proto, param_out.proto);
+ CU_ASSERT_EQUAL(info_out.param.mode, param_out.mode);
+
+ CU_ASSERT_EQUAL(info_out.param.crypto.cipher_alg,
+ param_out.crypto.cipher_alg);
+ CU_ASSERT_EQUAL(info_out.param.crypto.auth_alg,
+ param_out.crypto.auth_alg);
+ CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
+ CU_ASSERT_EQUAL(info_out.param.spi, param_out.spi);
+ CU_ASSERT_EQUAL(info_out.param.opt.esn, param_out.opt.esn);
+ CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_dscp, param_out.opt.copy_dscp);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_flabel, param_out.opt.copy_flabel);
+ CU_ASSERT_EQUAL(info_out.param.opt.copy_df, param_out.opt.copy_df);
+
+ CU_ASSERT_EQUAL(ODP_IPSEC_MODE_TUNNEL, info_out.param.mode);
+
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.type,
+ param_out.outbound.tunnel.type);
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.dscp,
+ param_out.outbound.tunnel.ipv4.dscp);
+ CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.df,
+ param_out.outbound.tunnel.ipv4.df);
+ CU_ASSERT_NOT_EQUAL_FATAL(NULL,
+ info_out.param.outbound.tunnel.ipv4.src_addr);
+ CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.src_addr,
+ param_out.outbound.tunnel.ipv4.src_addr,
+ ODP_IPV4_ADDR_SIZE));
+ CU_ASSERT_NOT_EQUAL_FATAL(NULL,
+ info_out.param.outbound.tunnel.ipv4.dst_addr);
+ CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.dst_addr,
+ param_out.outbound.tunnel.ipv4.dst_addr,
+ ODP_IPV4_ADDR_SIZE));
+
+ CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.bytes,
+ param_out.lifetime.soft_limit.bytes);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.bytes,
+ param_out.lifetime.hard_limit.bytes);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.packets,
+ param_out.lifetime.soft_limit.packets);
+ CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.packets,
+ param_out.lifetime.hard_limit.packets);
+
+ CU_ASSERT_EQUAL(0, info_out.outbound.seq_num);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT_EQUAL(0, info_in.inbound.antireplay_window_top);
+
+ ipsec_test_part test = {
+ .pkt_in = &pkt_ipv4_icmp_0,
+ .num_pkt = 1,
+ .out = {
+ { .status.warn.all = 0,
+ .status.error.all = 0,
+ .l3_type = ODP_PROTO_L3_TYPE_IPV4,
+ .l4_type = ODP_PROTO_L4_TYPE_ICMPV4,
+ .pkt_res = &pkt_ipv4_icmp_0 },
+ },
+ };
+
+ ipsec_check_out_in_one(&test, sa_out, sa_in);
+
+ memset(&info_out, 0, sizeof(info_out));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
+ CU_ASSERT_EQUAL(1, info_out.outbound.seq_num);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT_EQUAL(1, info_in.inbound.antireplay_window_top);
+
+ ipsec_sa_destroy(sa_out);
+ ipsec_sa_destroy(sa_in);
+}
+
static void ipsec_test_capability(void)
{
odp_ipsec_capability_t capa;
@@ -1145,6 +1371,27 @@ static void ipsec_test_capability(void)
CU_ASSERT(odp_ipsec_capability(&capa) == 0);
}
+static void test_ipsec_stats(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ printf("\n Stats : success");
+ flags.stats = IPSEC_TEST_STATS_SUCCESS;
+ test_esp_out_in_all(&flags);
+
+ printf("\n Stats : proto err");
+ flags.stats = IPSEC_TEST_STATS_PROTO_ERR;
+ test_esp_out_in_all(&flags);
+
+ printf("\n Stats : auth err");
+ flags.stats = IPSEC_TEST_STATS_AUTH_ERR;
+ test_esp_out_in_all(&flags);
+
+ printf("\n ");
+}
+
odp_testinfo_t ipsec_out_suite[] = {
ODP_TEST_INFO(ipsec_test_capability),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256,
@@ -1161,32 +1408,12 @@ odp_testinfo_t ipsec_out_suite[] = {
ipsec_check_esp_null_sha256),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_udp_null_sha256,
ipsec_check_esp_null_sha256),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_gcm128,
- ipsec_check_esp_aes_gcm_128),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_gcm192,
- ipsec_check_esp_aes_gcm_192),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_gcm256,
- ipsec_check_esp_aes_gcm_256),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_aes_gmac_128,
ipsec_check_ah_aes_gmac_128),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_aes_gmac_192,
ipsec_check_ah_aes_gmac_192),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_aes_gmac_256,
ipsec_check_ah_aes_gmac_256),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_aes_gmac_128,
- ipsec_check_esp_null_aes_gmac_128),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_aes_gmac_192,
- ipsec_check_esp_null_aes_gmac_192),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_null_aes_gmac_256,
- ipsec_check_esp_null_aes_gmac_256),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_ccm128,
- ipsec_check_esp_aes_ccm_128),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_ccm192,
- ipsec_check_esp_aes_ccm_192),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_aes_ccm256,
- ipsec_check_esp_aes_ccm_256),
- ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_esp_chacha20_poly1305,
- ipsec_check_esp_chacha20_poly1305),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_frag_check,
ipsec_check_ah_sha256),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_frag_check_2,
@@ -1215,7 +1442,12 @@ odp_testinfo_t ipsec_out_suite[] = {
ipsec_check_esp_null_sha256),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_udp_esp_null_sha256,
ipsec_check_esp_null_sha256),
- ODP_TEST_INFO(test_esp_out_in_all),
+ ODP_TEST_INFO_CONDITIONAL(test_sa_info,
+ ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO(test_esp_out_in_all_basic),
+ ODP_TEST_INFO_CONDITIONAL(test_esp_out_in_all_hdr_in_packet,
+ is_out_mode_inline),
ODP_TEST_INFO(test_ah_out_in_all),
+ ODP_TEST_INFO(test_ipsec_stats),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/ipsec/test_vectors.h b/test/validation/api/ipsec/test_vectors.h
index f16956d1f..136794c75 100644
--- a/test/validation/api/ipsec/test_vectors.h
+++ b/test/validation/api/ipsec/test_vectors.h
@@ -726,6 +726,51 @@ static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_esp_aes_cbc_null_1 = {
};
static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha1_1 = {
+ .len = 182,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xa8, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xce, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0x17, 0xc3, 0xfa, 0xaf, 0x1d, 0xeb, 0x94, 0x06,
+ 0x4e, 0xf8, 0x62, 0xb4, 0x1f, 0xa0, 0x17, 0x62,
+
+ /* data */
+ 0xba, 0xf3, 0xfb, 0x10, 0x86, 0xee, 0x80, 0x6f,
+ 0x44, 0xff, 0x94, 0x7f, 0xee, 0xd8, 0x50, 0x62,
+ 0x40, 0x3f, 0x7c, 0x76, 0xb4, 0x65, 0xca, 0x32,
+ 0x91, 0x0e, 0xba, 0xf2, 0xc1, 0x9d, 0x3b, 0xcb,
+ 0x0f, 0xc9, 0xc9, 0xae, 0x33, 0x42, 0x16, 0x36,
+ 0xd3, 0xc8, 0x6c, 0x23, 0xac, 0xbf, 0x98, 0xf2,
+ 0xda, 0x10, 0x95, 0xbc, 0xe8, 0x38, 0xbf, 0x4b,
+ 0x19, 0xd0, 0x58, 0x67, 0xd9, 0xab, 0xd0, 0xf5,
+ 0x59, 0xc9, 0xdc, 0xbb, 0x46, 0xcc, 0x34, 0x26,
+ 0xe6, 0xd6, 0xee, 0x5c, 0xc8, 0xe2, 0x46, 0xc9,
+ 0x14, 0xe9, 0x98, 0xe4, 0xb9, 0xec, 0xf0, 0xa7,
+ 0x12, 0x94, 0x54, 0x4e, 0x56, 0xfd, 0xe8, 0x07,
+ 0xd8, 0x83, 0xf9, 0x78, 0x5f, 0xa6, 0x1a, 0xce,
+ 0xbb, 0xda, 0xbc, 0x7c, 0xd8, 0xb6, 0x7b, 0x4f,
+
+ /* ICV */
+ 0x78, 0x4e, 0xfe, 0xbd, 0x42, 0x7f, 0x42, 0x96,
+ 0x65, 0xe7, 0x60, 0x2f,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
pkt_ipv4_icmp_0_esp_aes_cbc_sha256_1 = {
.len = 186,
.l2_offset = 0,
@@ -770,6 +815,99 @@ static const ODP_UNUSED ipsec_test_packet
},
};
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha384_1 = {
+ .len = 194,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xc2, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0xcd, 0xc7, 0x8d, 0x99, 0xf7, 0x65, 0x21, 0xf6,
+ 0x40, 0xe3, 0x4c, 0x5e, 0x90, 0x84, 0x4c, 0xf3,
+
+ /* data */
+ 0xb5, 0x9c, 0xa2, 0x3d, 0xb6, 0x09, 0x4f, 0x40,
+ 0x73, 0x4a, 0x33, 0x12, 0x90, 0xb2, 0xf1, 0x24,
+ 0x1f, 0xd3, 0xa3, 0x89, 0x53, 0x12, 0xb0, 0x98,
+ 0x6e, 0xec, 0xde, 0xb8, 0xf2, 0xbb, 0xe0, 0x03,
+ 0xee, 0x86, 0x1c, 0x2c, 0xe2, 0x12, 0x26, 0x89,
+ 0x4d, 0x8a, 0x6a, 0x89, 0xd0, 0x31, 0x68, 0x66,
+ 0xe8, 0x14, 0xe7, 0xd7, 0xaa, 0xd8, 0x2a, 0x61,
+ 0x03, 0x62, 0xb7, 0x46, 0x8e, 0x98, 0xa7, 0xfd,
+ 0x96, 0xe7, 0xbb, 0x5d, 0xf0, 0xc7, 0x42, 0xe1,
+ 0xef, 0x96, 0x1c, 0x79, 0xc0, 0xa4, 0x60, 0x69,
+ 0x2c, 0xc8, 0x02, 0x1f, 0xf4, 0xbf, 0x8f, 0xa4,
+ 0x0e, 0xb5, 0x35, 0xca, 0x51, 0x23, 0xc5, 0x62,
+ 0x13, 0x54, 0xbb, 0xcb, 0x2a, 0x4a, 0xdd, 0x79,
+ 0x32, 0x9f, 0x72, 0xa6, 0xeb, 0xe9, 0x04, 0x61,
+
+ /* ICV */
+ 0x79, 0xbc, 0xb6, 0x2d, 0xcc, 0x14, 0xc8, 0xea,
+ 0xfa, 0x5b, 0x57, 0x8d, 0x0a, 0xec, 0x56, 0xb7,
+ 0xca, 0xb2, 0x38, 0x9b, 0x05, 0x79, 0xf8, 0xdd,
+ },
+};
+
+static const ODP_UNUSED ipsec_test_packet
+ pkt_ipv4_icmp_0_esp_aes_cbc_sha512_1 = {
+ .len = 202,
+ .l2_offset = 0,
+ .l3_offset = 14,
+ .l4_offset = 34,
+ .data = {
+ /* ETH */
+ 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
+ 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0x08, 0x00,
+
+ /* IP */
+ 0x45, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x32, 0xab, 0xba, 0xc0, 0xa8, 0x6f, 0x02,
+ 0xc0, 0xa8, 0xde, 0x02,
+
+ /* ESP */
+ 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x01,
+
+ /* IV */
+ 0xf9, 0x6b, 0x50, 0xa9, 0x7b, 0x4e, 0xc9, 0xdf,
+ 0x70, 0x29, 0xe2, 0x76, 0xdc, 0x12, 0x1e, 0x8f,
+
+ /* data */
+ 0x9a, 0x7c, 0x5b, 0x96, 0xcd, 0xcb, 0x76, 0x07,
+ 0xf3, 0xb0, 0x86, 0x31, 0xa4, 0xf0, 0xa3, 0xdb,
+ 0xb6, 0x08, 0x46, 0xd4, 0xb2, 0x2c, 0x15, 0x86,
+ 0xdf, 0x4e, 0xb9, 0xd2, 0x75, 0xb5, 0x18, 0x30,
+ 0x25, 0x15, 0x38, 0xbb, 0xbd, 0x17, 0x8b, 0x01,
+ 0xc6, 0xc4, 0x14, 0xe8, 0xe7, 0xc2, 0xc7, 0x63,
+ 0x70, 0x4d, 0xcb, 0x02, 0x95, 0x68, 0x36, 0x85,
+ 0x11, 0x66, 0x76, 0xa0, 0x73, 0xd4, 0xa9, 0x1c,
+ 0x33, 0xff, 0xe6, 0x04, 0x80, 0x47, 0x6d, 0xa4,
+ 0x63, 0x1a, 0x15, 0x89, 0x57, 0xb7, 0x39, 0x4f,
+ 0x61, 0x71, 0x8f, 0x4b, 0xaf, 0x3c, 0x31, 0x0d,
+ 0x9b, 0x1a, 0xea, 0x21, 0x38, 0xb8, 0x64, 0x89,
+ 0x96, 0x76, 0xc7, 0xd2, 0xfc, 0x8e, 0x36, 0x02,
+ 0x35, 0xfe, 0xde, 0x40, 0xc7, 0xd8, 0x60, 0x8d,
+
+ /* ICV */
+ 0xe8, 0x66, 0x6b, 0xb7, 0x4f, 0xb2, 0xa5, 0x08,
+ 0xf1, 0x76, 0x82, 0xa9, 0x3e, 0xed, 0x39, 0xac,
+ 0x17, 0x8f, 0xa8, 0xfe, 0x58, 0x4d, 0x40, 0xed,
+ 0xfe, 0xd9, 0x35, 0x60, 0x13, 0xb5, 0x20, 0xf8,
+ },
+};
+
static const ODP_UNUSED ipsec_test_packet pkt_ipv4_icmp_0_esp_aes_ctr_null_1 = {
.len = 162,
.l2_offset = 0,
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index b521e1654..87961f6d1 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,6 +12,8 @@
#include <odp_cunit_common.h>
#include <test_packet_parser.h>
+#include <odp/helper/odph_api.h>
+
/* Reserve some tailroom for tests */
#define TAILROOM_RESERVE 4
/* Number of packets in the test packet pool */
@@ -24,6 +27,13 @@ ODP_STATIC_ASSERT(PACKET_POOL_NUM_SEG > 1 &&
/* Number of packets in parse test */
#define PARSE_TEST_NUM_PKT 10
+/* Default packet vector size */
+#define PKT_VEC_SIZE 64
+/* Number of packet vectors in default pool */
+#define PKT_VEC_NUM 10
+/* Number of preallocated packet vector test packets */
+#define PKT_VEC_PACKET_NUM PKT_VEC_NUM
+
static odp_pool_capability_t pool_capa;
static odp_pool_param_t default_param;
static odp_pool_t default_pool;
@@ -33,6 +43,10 @@ static uint32_t segmented_packet_len;
static odp_bool_t segmentation_supported = true;
odp_packet_t test_packet, segmented_test_packet;
+/* Packet vector globals */
+static odp_packet_t pkt_vec[PKT_VEC_PACKET_NUM];
+static odp_packet_vector_t pktv_default = ODP_PACKET_VECTOR_INVALID;
+static odp_pool_t vector_default_pool = ODP_POOL_INVALID;
static struct udata_struct {
uint64_t u64;
@@ -2614,6 +2628,329 @@ static void packet_test_ref(void)
odp_packet_free(ref_pkt[1]);
}
+static void packet_vector_test_event_conversion(void)
+{
+ odp_packet_vector_t pktv0 = pktv_default;
+ odp_packet_vector_t pktv1;
+ odp_event_t event;
+
+ event = odp_packet_vector_to_event(pktv0);
+ CU_ASSERT_FATAL(event != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(event) == ODP_EVENT_PACKET_VECTOR);
+
+ pktv1 = odp_packet_vector_from_event(event);
+ CU_ASSERT_FATAL(pktv1 != ODP_PACKET_VECTOR_INVALID);
+ CU_ASSERT(pktv1 == pktv0);
+}
+
+static int remove_invalid_pkts_tbl(odp_packet_t *pkt_tbl, int num_pkts)
+{
+ int i, j, count = 0;
+
+ for (i = 0; i < (num_pkts - count) ; i++) {
+ if (pkt_tbl[i] == ODP_PACKET_INVALID) {
+ for (j = i; j < num_pkts; j++)
+ pkt_tbl[j] = pkt_tbl[j + 1];
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void packet_vector_test_tbl(void)
+{
+ odp_packet_vector_t pktv = ODP_PACKET_VECTOR_INVALID;
+ odp_packet_t *pkt_tbl, packet;
+ odp_packet_t clone_packet = ODP_PACKET_INVALID;
+ odp_packet_t orig_pkt_tbl[PKT_VEC_SIZE];
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ uint32_t i, num;
+ uint32_t max_size = PKT_VEC_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.vector.max_size > 0);
+
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = 1;
+ params.vector.max_size = max_size;
+
+ pool = odp_pool_create("vector_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only vector from the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ /* Check if vector packet is valid */
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv) == 1)
+ CU_ASSERT(odp_packet_vector_to_u64(pktv) !=
+ odp_packet_vector_to_u64(ODP_PACKET_VECTOR_INVALID));
+
+ /* Allocate packets */
+ for (i = 0; i < max_size; i++) {
+ orig_pkt_tbl[i] = odp_packet_alloc(default_pool,
+ default_param.pkt.len);
+ CU_ASSERT_FATAL(orig_pkt_tbl[i] != ODP_PACKET_INVALID);
+ }
+
+ /* Get packet vector table */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ /* Make sure there are initially no packets in the vector */
+ CU_ASSERT(num == 0);
+
+ /* Fill the allocated packets in the vector */
+ for (i = 0; i < max_size; i++)
+ pkt_tbl[i] = orig_pkt_tbl[i];
+
+ /* Set number of packets stored in the vector */
+ odp_packet_vector_size_set(pktv, max_size);
+
+ /* Get number of packets in the vector */
+ num = odp_packet_vector_size(pktv);
+ CU_ASSERT(num == max_size);
+
+ if (max_size < 4) {
+ printf("Max vector size too small to run all tests.\n");
+ goto cleanup;
+ }
+
+ /* Preparing a copy of the packet */
+ packet = orig_pkt_tbl[0];
+ clone_packet = odp_packet_copy(packet, odp_packet_pool(packet));
+ CU_ASSERT_FATAL(clone_packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_to_u64(clone_packet) != odp_packet_to_u64(packet));
+
+ /* Change one packet handle in the table */
+ pkt_tbl[1] = clone_packet;
+ /* Read packet vector table. */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ /* Packets available should be equal to last updated */
+ CU_ASSERT(num == max_size);
+ /* Check if packet handle still corresponds to cloned packet */
+ CU_ASSERT(odp_packet_to_u64(pkt_tbl[1]) ==
+ odp_packet_to_u64(clone_packet));
+
+ /* Mark the first packet as invalid */
+ pkt_tbl[0] = ODP_PACKET_INVALID;
+ /* Reading the table to confirm if the first packet is invalid */
+ num = odp_packet_vector_tbl(pktv, &pkt_tbl);
+ CU_ASSERT(odp_packet_is_valid(pkt_tbl[0]) == 0);
+
+ /* Invalid packet should never be present in the table, following logic
+ * updates the pkt_tble array and returns the number of invalid packets
+ * removed. */
+ num = remove_invalid_pkts_tbl(pkt_tbl, odp_packet_vector_size(pktv));
+ CU_ASSERT(num == 1);
+ /* Update number of valid packets in the table */
+ odp_packet_vector_size_set(pktv, odp_packet_vector_size(pktv) - num);
+ CU_ASSERT(odp_packet_vector_size(pktv) == max_size - num);
+ /* The first packet should be valid now */
+ CU_ASSERT(odp_packet_is_valid(pkt_tbl[0]) == 1);
+
+cleanup:
+ if (clone_packet != ODP_PACKET_INVALID)
+ odp_packet_free(clone_packet);
+ odp_packet_free_multi(orig_pkt_tbl, max_size);
+ odp_packet_vector_free(pktv);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_vector_test_debug(void)
+{
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv_default) == 1);
+ printf("\n\n");
+ odp_packet_vector_print(pktv_default);
+}
+
+static void packet_vector_test_alloc_free(void)
+{
+ odp_packet_vector_t pktv = ODP_PACKET_VECTOR_INVALID;
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_packet_t pkt;
+ odp_packet_t *pkts_tbl;
+ uint32_t max_size = PKT_VEC_SIZE;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ CU_ASSERT_FATAL(capa.vector.max_size > 0);
+
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = 1;
+ params.vector.max_size = max_size;
+
+ pool = odp_pool_create("vector_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Allocate the only vector from the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ /* Check if vector packet is valid */
+ CU_ASSERT_FATAL(odp_packet_vector_valid(pktv) == 1)
+ CU_ASSERT(odp_packet_vector_to_u64(pktv) !=
+ odp_packet_vector_to_u64(ODP_PACKET_VECTOR_INVALID));
+
+ /* Since it was only one buffer pool, more vector packets can't be
+ * allocated.
+ */
+ CU_ASSERT_FATAL(odp_packet_vector_alloc(pool) == ODP_PACKET_VECTOR_INVALID);
+
+ /* Freeing the buffer back to pool */
+ odp_packet_vector_free(pktv);
+
+ /* Check that the buffer was returned back to the pool */
+ pktv = odp_packet_vector_alloc(pool);
+ CU_ASSERT_FATAL(pktv != ODP_PACKET_VECTOR_INVALID);
+ CU_ASSERT(odp_packet_vector_size(pktv) == 0);
+
+ /* Free packet vector using odp_event_free() */
+ pkt = odp_packet_alloc(default_pool, default_param.pkt.len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_vector_tbl(pktv, &pkts_tbl) == 0);
+ pkts_tbl[0] = pkt;
+ odp_packet_vector_size_set(pktv, 1);
+
+ odp_event_free(odp_packet_vector_to_event(pktv));
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void packet_vector_basic_test(void)
+{
+ odp_packet_t *pkt_tbl;
+ odp_pool_capability_t capa;
+ uint32_t i, num;
+ uint32_t max_size = PKT_VEC_PACKET_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+ if (capa.vector.max_size < max_size)
+ max_size = capa.vector.max_size;
+
+ /* Checking if default vector packet is valid */
+ CU_ASSERT(odp_packet_vector_valid(pktv_default) == 1)
+
+ /* Making sure default vector packet is from default vector pool */
+ CU_ASSERT(odp_packet_vector_pool(pktv_default) == vector_default_pool)
+
+ /* Get packet vector table */
+ num = odp_packet_vector_tbl(pktv_default, &pkt_tbl);
+ /* Making sure initially no packet in the vector */
+ CU_ASSERT(num == 0);
+
+ /* Fill the preallocated packets in vector */
+ for (i = 0; i < max_size; i++)
+ pkt_tbl[i] = pkt_vec[i];
+
+ /* Setting up number of packets stored in vector */
+ odp_packet_vector_size_set(pktv_default, max_size);
+
+ /* Get number of packets in vector */
+ num = odp_packet_vector_size(pktv_default);
+ CU_ASSERT(num == max_size);
+
+ CU_ASSERT(odp_packet_vector_valid(pktv_default) == 1);
+}
+
+static int packet_vector_suite_init(void)
+{
+ uint32_t num_pkt = PKT_VEC_PACKET_NUM;
+ uint32_t num = PACKET_POOL_NUM;
+ odp_pool_param_t params;
+ uint32_t i, ret, len;
+
+ memset(&pool_capa, 0, sizeof(odp_pool_capability_t));
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("pool_capability failed\n");
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_num != 0 && pool_capa.pkt.max_num < num)
+ num = pool_capa.pkt.max_num;
+
+ /* Creating default packet pool */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_PACKET;
+ params.pkt.len = pool_capa.pkt.min_seg_len;
+ params.pkt.num = num;
+
+ memcpy(&default_param, &params, sizeof(odp_pool_param_t));
+
+ default_pool = odp_pool_create("default_pool", &params);
+ if (default_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("default pool create failed\n");
+ return -1;
+ }
+
+ /* Allocating ipv4-udp packets */
+ len = sizeof(test_packet_ipv4_udp);
+ ret = odp_packet_alloc_multi(default_pool, len, pkt_vec, num_pkt);
+ if (ret != num_pkt) {
+ ODPH_ERR("packet allocation failed\n");
+ if (ret > 0)
+ odp_packet_free_multi(pkt_vec, ret);
+ goto err;
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ ret = odp_packet_copy_from_mem(pkt_vec[i], 0, len,
+ test_packet_ipv4_udp);
+ if (ret != 0) {
+ ODPH_ERR("packet preparation failed\n");
+ goto err1;
+ }
+ }
+
+ /* Creating the vector pool */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_VEC_NUM;
+ params.vector.max_size = pool_capa.vector.max_size < PKT_VEC_SIZE ?
+ pool_capa.vector.max_size : PKT_VEC_SIZE;
+
+ vector_default_pool = odp_pool_create("vector_default_pool", &params);
+
+ if (vector_default_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("default vector pool create failed\n");
+ goto err1;
+ }
+
+ /* Allocating a default vector */
+ pktv_default = odp_packet_vector_alloc(vector_default_pool);
+ if (pktv_default == ODP_PACKET_VECTOR_INVALID) {
+ ODPH_ERR("default vector packet allocation failed\n");
+ goto err2;
+ }
+ return 0;
+err2:
+ odp_pool_destroy(vector_default_pool);
+err1:
+ odp_packet_free_multi(pkt_vec, PKT_VEC_PACKET_NUM);
+err:
+ odp_pool_destroy(default_pool);
+ return -1;
+}
+
+static int packet_vector_suite_term(void)
+{
+ odp_packet_free_multi(pkt_vec, PKT_VEC_PACKET_NUM);
+
+ odp_pool_destroy(default_pool);
+
+ odp_packet_vector_free(pktv_default);
+ odp_pool_destroy(vector_default_pool);
+ return 0;
+}
static void packet_test_max_pools(void)
{
odp_pool_param_t param;
@@ -2789,6 +3126,50 @@ static void parse_eth_ipv4_udp(void)
odp_packet_free_multi(pkt, num_pkt);
}
+/* Ethernet SNAP/IPv4/UDP */
+static void parse_eth_snap_ipv4_udp(void)
+{
+ odp_packet_parse_param_t parse;
+ int i;
+ odp_packet_chksum_status_t chksum_status;
+ int num_pkt = PARSE_TEST_NUM_PKT;
+ odp_packet_t pkt[num_pkt];
+
+ parse_test_alloc(pkt, test_packet_snap_ipv4_udp,
+ sizeof(test_packet_snap_ipv4_udp), num_pkt);
+
+ for (i = 0; i < num_pkt; i++) {
+ chksum_status = odp_packet_l3_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ chksum_status = odp_packet_l4_chksum_status(pkt[i]);
+ CU_ASSERT(chksum_status == ODP_PACKET_CHKSUM_UNKNOWN);
+ }
+
+ parse.proto = ODP_PROTO_ETH;
+ parse.last_layer = ODP_PROTO_LAYER_ALL;
+ parse.chksums = parse_test.all_chksums;
+
+ CU_ASSERT(odp_packet_parse(pkt[0], 0, &parse) == 0);
+ CU_ASSERT(odp_packet_parse_multi(&pkt[1], parse_test.offset_zero,
+ num_pkt - 1, &parse) == (num_pkt - 1));
+
+ for (i = 0; i < num_pkt; i++) {
+ CU_ASSERT(odp_packet_has_eth(pkt[i]));
+ CU_ASSERT(odp_packet_has_ipv4(pkt[i]));
+ CU_ASSERT(odp_packet_has_udp(pkt[i]));
+ CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
+ CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
+ CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
+ ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
+ ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
+ ODP_PROTO_L4_TYPE_UDP);
+ }
+
+ odp_packet_free_multi(pkt, num_pkt);
+}
+
/* IPv4/UDP */
static void parse_ipv4_udp(void)
{
@@ -3519,55 +3900,55 @@ static void parse_result(void)
CU_ASSERT(result[i].flag.all != 0);
CU_ASSERT(result[i].flag.has_error ==
- odp_packet_has_error(pkt[i]));
+ !!odp_packet_has_error(pkt[i]));
CU_ASSERT(result[i].flag.has_l2_error ==
- odp_packet_has_l2_error(pkt[i]));
+ !!odp_packet_has_l2_error(pkt[i]));
CU_ASSERT(result[i].flag.has_l3_error ==
- odp_packet_has_l3_error(pkt[i]));
+ !!odp_packet_has_l3_error(pkt[i]));
CU_ASSERT(result[i].flag.has_l4_error ==
- odp_packet_has_l4_error(pkt[i]));
+ !!odp_packet_has_l4_error(pkt[i]));
CU_ASSERT(result[i].flag.has_l2 ==
- odp_packet_has_l2(pkt[i]));
+ !!odp_packet_has_l2(pkt[i]));
CU_ASSERT(result[i].flag.has_l3 ==
- odp_packet_has_l3(pkt[i]));
+ !!odp_packet_has_l3(pkt[i]));
CU_ASSERT(result[i].flag.has_l4 ==
- odp_packet_has_l4(pkt[i]));
+ !!odp_packet_has_l4(pkt[i]));
CU_ASSERT(result[i].flag.has_eth ==
- odp_packet_has_eth(pkt[i]));
+ !!odp_packet_has_eth(pkt[i]));
CU_ASSERT(result[i].flag.has_eth_bcast ==
- odp_packet_has_eth_bcast(pkt[i]));
+ !!odp_packet_has_eth_bcast(pkt[i]));
CU_ASSERT(result[i].flag.has_eth_mcast ==
- odp_packet_has_eth_mcast(pkt[i]));
+ !!odp_packet_has_eth_mcast(pkt[i]));
CU_ASSERT(result[i].flag.has_jumbo ==
- odp_packet_has_jumbo(pkt[i]));
+ !!odp_packet_has_jumbo(pkt[i]));
CU_ASSERT(result[i].flag.has_vlan ==
- odp_packet_has_vlan(pkt[i]));
+ !!odp_packet_has_vlan(pkt[i]));
CU_ASSERT(result[i].flag.has_vlan_qinq ==
- odp_packet_has_vlan_qinq(pkt[i]));
+ !!odp_packet_has_vlan_qinq(pkt[i]));
CU_ASSERT(result[i].flag.has_arp ==
- odp_packet_has_arp(pkt[i]));
+ !!odp_packet_has_arp(pkt[i]));
CU_ASSERT(result[i].flag.has_ipv4 ==
- odp_packet_has_ipv4(pkt[i]));
+ !!odp_packet_has_ipv4(pkt[i]));
CU_ASSERT(result[i].flag.has_ipv6 ==
- odp_packet_has_ipv6(pkt[i]));
+ !!odp_packet_has_ipv6(pkt[i]));
CU_ASSERT(result[i].flag.has_ip_bcast ==
- odp_packet_has_ip_bcast(pkt[i]));
+ !!odp_packet_has_ip_bcast(pkt[i]));
CU_ASSERT(result[i].flag.has_ip_mcast ==
- odp_packet_has_ip_mcast(pkt[i]));
+ !!odp_packet_has_ip_mcast(pkt[i]));
CU_ASSERT(result[i].flag.has_ipfrag ==
- odp_packet_has_ipfrag(pkt[i]));
+ !!odp_packet_has_ipfrag(pkt[i]));
CU_ASSERT(result[i].flag.has_ipopt ==
- odp_packet_has_ipopt(pkt[i]));
+ !!odp_packet_has_ipopt(pkt[i]));
CU_ASSERT(result[i].flag.has_ipsec ==
- odp_packet_has_ipsec(pkt[i]));
+ !!odp_packet_has_ipsec(pkt[i]));
CU_ASSERT(result[i].flag.has_udp ==
- odp_packet_has_udp(pkt[i]));
+ !!odp_packet_has_udp(pkt[i]));
CU_ASSERT(result[i].flag.has_tcp ==
- odp_packet_has_tcp(pkt[i]));
+ !!odp_packet_has_tcp(pkt[i]));
CU_ASSERT(result[i].flag.has_sctp ==
- odp_packet_has_sctp(pkt[i]));
+ !!odp_packet_has_sctp(pkt[i]));
CU_ASSERT(result[i].flag.has_icmp ==
- odp_packet_has_icmp(pkt[i]));
+ !!odp_packet_has_icmp(pkt[i]));
CU_ASSERT(result[i].packet_len == odp_packet_len(pkt[i]));
CU_ASSERT(result[i].l2_offset == odp_packet_l2_offset(pkt[i]));
@@ -3622,8 +4003,18 @@ odp_testinfo_t packet_suite[] = {
ODP_TEST_INFO_NULL,
};
+odp_testinfo_t packet_vector_parse_suite[] = {
+ ODP_TEST_INFO(packet_vector_test_debug),
+ ODP_TEST_INFO(packet_vector_basic_test),
+ ODP_TEST_INFO(packet_vector_test_alloc_free),
+ ODP_TEST_INFO(packet_vector_test_tbl),
+ ODP_TEST_INFO(packet_vector_test_event_conversion),
+ ODP_TEST_INFO_NULL,
+};
+
odp_testinfo_t packet_parse_suite[] = {
ODP_TEST_INFO(parse_eth_ipv4_udp),
+ ODP_TEST_INFO(parse_eth_snap_ipv4_udp),
ODP_TEST_INFO(parse_ipv4_udp),
ODP_TEST_INFO(parse_eth_ipv4_tcp),
ODP_TEST_INFO(parse_eth_ipv6_udp),
@@ -3660,6 +4051,11 @@ odp_suiteinfo_t packet_suites[] = {
.init_func = packet_parse_suite_init,
.term_func = packet_parse_suite_term,
},
+ { .name = "packet vector tests",
+ .testinfo_tbl = packet_vector_parse_suite,
+ .init_func = packet_vector_suite_init,
+ .term_func = packet_vector_suite_term,
+ },
ODP_SUITE_INFO_NULL,
};
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 55138a402..185545fe9 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2020, Nokia
+ * Copyright (c) 2020, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,7 +14,7 @@
#include <stdlib.h>
#include "parser.h"
-#define PKT_BUF_NUM 32
+#define PKT_BUF_NUM 128
#define PKT_BUF_SIZE (9 * 1024)
#define PKT_LEN_NORMAL 64
#define PKT_LEN_MAX (PKT_BUF_SIZE - ODPH_ETHHDR_LEN - \
@@ -24,6 +25,8 @@
#define TEST_SEQ_INVALID ((uint32_t)~0)
#define TEST_SEQ_MAGIC 0x92749451
#define TX_BATCH_LEN 4
+#define PKTV_TX_BATCH_LEN 32
+#define PKTV_DEFAULT_SIZE 8
#define MAX_QUEUES 128
#define PKTIO_TS_INTERVAL (50 * ODP_TIME_MSEC_IN_NS)
@@ -94,6 +97,9 @@ static uint32_t packet_len = PKT_LEN_NORMAL;
/** default packet pool */
odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
+/** default packet vector pool */
+odp_pool_t default_pktv_pool = ODP_POOL_INVALID;
+
/** sequence number of IP packets */
odp_atomic_u32_t ip_seq;
@@ -102,6 +108,8 @@ pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+odp_pool_t pktv_pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+
static inline void _pktio_wait_linkup(odp_pktio_t pktio)
{
/* wait 1 second for link up */
@@ -201,25 +209,24 @@ static uint32_t pktio_pkt_seq_hdr(odp_packet_t pkt, size_t l4_hdr_len)
pkt_tail_t tail;
if (pkt == ODP_PACKET_INVALID) {
- fprintf(stderr, "error: pkt invalid\n");
+ ODPH_ERR("pkt invalid\n");
return TEST_SEQ_INVALID;
}
off = odp_packet_l4_offset(pkt);
if (off == ODP_PACKET_OFFSET_INVALID) {
- fprintf(stderr, "error: offset invalid\n");
+ ODPH_ERR("offset invalid\n");
return TEST_SEQ_INVALID;
}
off += l4_hdr_len;
if (odp_packet_copy_to_mem(pkt, off, sizeof(head), &head) != 0) {
- fprintf(stderr, "error: header copy failed\n");
+ ODPH_ERR("header copy failed\n");
return TEST_SEQ_INVALID;
}
if (head.magic != TEST_SEQ_MAGIC) {
- fprintf(stderr, "error: header magic invalid 0x%" PRIx32 "\n",
- head.magic);
+ ODPH_ERR("header magic invalid 0x%" PRIx32 "\n", head.magic);
odp_packet_print(pkt);
return TEST_SEQ_INVALID;
}
@@ -228,7 +235,7 @@ static uint32_t pktio_pkt_seq_hdr(odp_packet_t pkt, size_t l4_hdr_len)
off = packet_len - sizeof(tail);
if (odp_packet_copy_to_mem(pkt, off, sizeof(tail),
&tail) != 0) {
- fprintf(stderr, "error: header copy failed\n");
+ ODPH_ERR("header copy failed\n");
return TEST_SEQ_INVALID;
}
@@ -236,15 +243,11 @@ static uint32_t pktio_pkt_seq_hdr(odp_packet_t pkt, size_t l4_hdr_len)
seq = head.seq;
CU_ASSERT(seq != TEST_SEQ_INVALID);
} else {
- fprintf(stderr,
- "error: tail magic invalid 0x%" PRIx32 "\n",
- tail.magic);
+ ODPH_ERR("tail magic invalid 0x%" PRIx32 "\n", tail.magic);
}
} else {
- fprintf(stderr,
- "error: packet length invalid: "
- "%" PRIu32 "(%" PRIu32 ")\n",
- odp_packet_len(pkt), packet_len);
+ ODPH_ERR("packet length invalid: %" PRIu32 "(%" PRIu32 ")\n",
+ odp_packet_len(pkt), packet_len);
}
return seq;
@@ -408,6 +411,35 @@ static int default_pool_create(void)
return 0;
}
+static int default_pktv_pool_create(void)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ if (pool_capa.vector.max_num < PKT_BUF_NUM)
+ return -1;
+
+ if (default_pktv_pool != ODP_POOL_INVALID)
+ return -1;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_BUF_NUM;
+ params.vector.max_size = pool_capa.vector.max_size;
+
+ snprintf(pool_name, sizeof(pool_name),
+ "pktv_pool_default_%d", pool_segmentation);
+ default_pktv_pool = odp_pool_create(pool_name, &params);
+ if (default_pktv_pool == ODP_POOL_INVALID)
+ return -1;
+
+ return 0;
+}
+
static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
odp_pktout_mode_t omode)
{
@@ -442,6 +474,59 @@ static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
return pktio;
}
+static odp_pktio_t create_pktv_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode, odp_schedule_sync_t sync_mode)
+{
+ const char *iface = iface_name[iface_idx];
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktio_param_t pktio_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+
+ odp_pktio_param_init(&pktio_param);
+
+ pktio_param.in_mode = imode;
+ pktio_param.out_mode = omode;
+
+ pktio = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(pktio, &capa) == 0);
+ if (!capa.vector.supported) {
+ printf("Vector mode is not supported. Test Skipped.\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (imode == ODP_PKTIN_MODE_SCHED) {
+ pktin_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ pktin_param.queue_param.sched.sync = sync_mode;
+ pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ pktin_param.hash_enable = 0;
+ pktin_param.num_queues = 1;
+ pktin_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ pktin_param.vector.enable = 1;
+ pktin_param.vector.pool = pktv_pool[iface_idx];
+ pktin_param.vector.max_size = capa.vector.max_size < PKTV_DEFAULT_SIZE ?
+ capa.vector.max_size : PKTV_DEFAULT_SIZE;
+ pktin_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &pktin_param) == 0);
+
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ pktout_param.num_queues = 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &pktout_param) == 0);
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
static int flush_input_queue(odp_pktio_t pktio, odp_pktin_mode_t imode)
{
odp_event_t ev;
@@ -547,7 +632,7 @@ static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
}
static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
- int num, txrx_mode_e mode)
+ int num, txrx_mode_e mode, odp_bool_t vector_mode)
{
odp_event_t evt_tbl[num];
int num_evts = 0;
@@ -575,7 +660,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
num_evts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
evt_tbl, num);
} else {
- odp_event_t evt_tmp;
+ odp_event_t evt_tmp = ODP_EVENT_INVALID;
if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
evt_tmp = odp_queue_deq(pktio_rx->inq);
@@ -588,10 +673,33 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
/* convert events to packets, discarding any non-packet events */
for (i = 0; i < num_evts; ++i) {
- if (odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET)
+ if (!vector_mode && odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET) {
pkt_tbl[num_pkts++] = odp_packet_from_event(evt_tbl[i]);
- else
+ } else if (vector_mode && odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET_VECTOR &&
+ num_pkts < num) {
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkts;
+ int pktv_len;
+
+ pktv = odp_packet_vector_from_event(evt_tbl[i]);
+ pktv_len = odp_packet_vector_tbl(pktv, &pkts);
+
+ /* Make sure too many packets are not received */
+ if (num_pkts + pktv_len > num) {
+ int new_pkts = num - num_pkts;
+
+ memcpy(&pkt_tbl[num_pkts], pkts, new_pkts * sizeof(odp_packet_t));
+ odp_packet_free_multi(&pkts[new_pkts], pktv_len - new_pkts);
+ num_pkts += new_pkts;
+
+ } else {
+ memcpy(&pkt_tbl[num_pkts], pkts, pktv_len * sizeof(odp_packet_t));
+ num_pkts += pktv_len;
+ }
+ odp_packet_vector_free(pktv);
+ } else {
odp_event_free(evt_tbl[i]);
+ }
}
return num_pkts;
@@ -599,7 +707,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
static int wait_for_packets_hdr(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
uint32_t seq_tbl[], int num, txrx_mode_e mode,
- uint64_t ns, size_t l4_hdr_len)
+ uint64_t ns, size_t l4_hdr_len, odp_bool_t vector_mode)
{
odp_time_t wait_time, end, start;
int num_rx = 0;
@@ -611,7 +719,7 @@ static int wait_for_packets_hdr(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
end = odp_time_sum(start, wait_time);
while (num_rx < num && odp_time_cmp(end, odp_time_local()) > 0) {
- int n = get_packets(pktio_rx, pkt_tmp, num - num_rx, mode);
+ int n = get_packets(pktio_rx, pkt_tmp, num - num_rx, mode, vector_mode);
if (n < 0)
break;
@@ -633,10 +741,10 @@ static int wait_for_packets_hdr(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
uint32_t seq_tbl[], int num, txrx_mode_e mode,
- uint64_t ns)
+ uint64_t ns, odp_bool_t vector_mode)
{
return wait_for_packets_hdr(pktio_rx, pkt_tbl, seq_tbl, num, mode, ns,
- ODPH_UDPHDR_LEN);
+ ODPH_UDPHDR_LEN, vector_mode);
}
static int recv_packets_tmo(odp_pktio_t pktio, odp_packet_t pkt_tbl[],
@@ -790,7 +898,8 @@ static void check_parser_capa(odp_pktio_t pktio, int *l2, int *l3, int *l4)
static void pktio_txrx_multi(pktio_info_t *pktio_info_a,
pktio_info_t *pktio_info_b,
- int num_pkts, txrx_mode_e mode)
+ int num_pkts, txrx_mode_e mode,
+ odp_bool_t vector_mode)
{
odp_packet_t tx_pkt[num_pkts];
odp_packet_t rx_pkt[num_pkts];
@@ -848,13 +957,11 @@ static void pktio_txrx_multi(pktio_info_t *pktio_info_a,
}
/* and wait for them to arrive back */
- num_rx = wait_for_packets(pktio_info_b, rx_pkt, tx_seq,
- num_pkts, mode, ODP_TIME_SEC_IN_NS);
+ num_rx = wait_for_packets(pktio_info_b, rx_pkt, tx_seq, num_pkts, mode,
+ ODP_TIME_SEC_IN_NS, vector_mode);
CU_ASSERT(num_rx == num_pkts);
- if (num_rx != num_pkts) {
- fprintf(stderr, "error: received %i, out of %i packets\n",
- num_rx, num_pkts);
- }
+ if (num_rx != num_pkts)
+ ODPH_ERR("received %i, out of %i packets\n", num_rx, num_pkts);
for (i = 0; i < num_rx; ++i) {
odp_packet_data_range_t range;
@@ -903,7 +1010,8 @@ static void pktio_txrx_multi(pktio_info_t *pktio_info_a,
}
static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
- txrx_mode_e mode)
+ txrx_mode_e mode, odp_schedule_sync_t sync_mode,
+ odp_bool_t vector_mode)
{
int ret, i, if_b;
pktio_info_t pktios[MAX_NUM_IFACES];
@@ -921,7 +1029,10 @@ static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
io = &pktios[i];
io->name = iface_name[i];
- io->id = create_pktio(i, in_mode, out_mode);
+ if (vector_mode)
+ io->id = create_pktv_pktio(i, in_mode, out_mode, sync_mode);
+ else
+ io->id = create_pktio(i, in_mode, out_mode);
if (io->id == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open iface");
return;
@@ -956,7 +1067,7 @@ static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
/* if we have two interfaces then send through one and receive on
* another but if there's only one assume it's a loopback */
if_b = (num_ifaces == 1) ? 0 : 1;
- pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts, mode);
+ pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts, mode, vector_mode);
for (i = 0; i < num_ifaces; ++i) {
ret = odp_pktio_stop(pktios[i].id);
@@ -969,54 +1080,54 @@ static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
static void pktio_test_plain_queue(void)
{
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE, 0, false);
}
static void pktio_test_plain_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI, 0, false);
}
static void pktio_test_plain_multi_event(void)
{
- test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
}
static void pktio_test_sched_queue(void)
{
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_SINGLE, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_SINGLE, 0, false);
}
static void pktio_test_sched_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI, 0, false);
}
static void pktio_test_sched_multi_event(void)
{
- test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
}
static void pktio_test_recv(void)
{
- test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE, 0, false);
}
static void pktio_test_recv_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI, 0, false);
}
static void pktio_test_recv_multi_event(void)
{
- test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_MULTI_EVENT);
- test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_MULTI_EVENT, 0, false);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, false);
}
static void pktio_test_recv_queue(void)
@@ -1713,26 +1824,26 @@ static void pktio_test_pktout_queue_config(void)
#ifdef DEBUG_STATS
static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
{
- fprintf(stderr, "\n%s:\n"
- " in_octets %" PRIu64 "\n"
- " in_ucast_pkts %" PRIu64 "\n"
- " in_discards %" PRIu64 "\n"
- " in_errors %" PRIu64 "\n"
- " in_unknown_protos %" PRIu64 "\n"
- " out_octets %" PRIu64 "\n"
- " out_ucast_pkts %" PRIu64 "\n"
- " out_discards %" PRIu64 "\n"
- " out_errors %" PRIu64 "\n",
- name,
- s->in_octets,
- s->in_ucast_pkts,
- s->in_discards,
- s->in_errors,
- s->in_unknown_protos,
- s->out_octets,
- s->out_ucast_pkts,
- s->out_discards,
- s->out_errors);
+ ODPH_ERR("\n%s:\n"
+ " in_octets %" PRIu64 "\n"
+ " in_ucast_pkts %" PRIu64 "\n"
+ " in_discards %" PRIu64 "\n"
+ " in_errors %" PRIu64 "\n"
+ " in_unknown_protos %" PRIu64 "\n"
+ " out_octets %" PRIu64 "\n"
+ " out_ucast_pkts %" PRIu64 "\n"
+ " out_discards %" PRIu64 "\n"
+ " out_errors %" PRIu64 "\n",
+ name,
+ s->in_octets,
+ s->in_ucast_pkts,
+ s->in_discards,
+ s->in_errors,
+ s->in_unknown_protos,
+ s->out_octets,
+ s->out_ucast_pkts,
+ s->out_discards,
+ s->out_errors);
}
#endif
@@ -2144,7 +2255,7 @@ static void pktio_test_pktin_ts(void)
CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
&pkt_tbl[i], 1) == 1);
ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
- 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS);
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS, false);
if (ret != 1)
break;
odp_time_wait_ns(PKTIO_TS_INTERVAL);
@@ -2254,7 +2365,8 @@ static void pktio_test_pktout_ts(void)
CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
&pkt_tbl[i], 1) == 1);
ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
- 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS);
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS,
+ false);
if (ret != 1)
break;
@@ -2340,7 +2452,7 @@ static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
send_packets(pktout_queue, pkt_tbl, TX_BATCH_LEN);
num_rx = wait_for_packets(&pktio_rx_info, pkt_tbl, pkt_seq,
TX_BATCH_LEN, TXRX_MODE_MULTI,
- ODP_TIME_SEC_IN_NS);
+ ODP_TIME_SEC_IN_NS, false);
CU_ASSERT(num_rx == TX_BATCH_LEN);
for (i = 0; i < num_rx; i++) {
test_fn(pkt_tbl[i]);
@@ -2415,7 +2527,7 @@ static void pktio_test_chksum_sctp(void (*config_fn)(odp_pktio_t, odp_pktio_t),
send_packets(pktout_queue, pkt_tbl, TX_BATCH_LEN);
num_rx = wait_for_packets_hdr(&pktio_rx_info, pkt_tbl, pkt_seq,
TX_BATCH_LEN, TXRX_MODE_MULTI,
- ODP_TIME_SEC_IN_NS, ODPH_SCTPHDR_LEN);
+ ODP_TIME_SEC_IN_NS, ODPH_SCTPHDR_LEN, false);
CU_ASSERT(num_rx == TX_BATCH_LEN);
for (i = 0; i < num_rx; i++) {
test_fn(pkt_tbl[i]);
@@ -2976,14 +3088,184 @@ static int create_pool(const char *iface, int num)
pool[num] = odp_pool_create(pool_name, &params);
if (ODP_POOL_INVALID == pool[num]) {
- fprintf(stderr, "%s: failed to create pool: %d",
- __func__, odp_errno());
+ ODPH_ERR("failed to create pool: %d", odp_errno());
return -1;
}
return 0;
}
+static int create_pktv_pool(const char *iface, int num)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t params;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
+ if (pool_capa.vector.max_num < PKT_BUF_NUM)
+ return -1;
+
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
+ params.type = ODP_POOL_VECTOR;
+ params.vector.num = PKT_BUF_NUM;
+ params.vector.max_size = pool_capa.vector.max_size;
+
+ snprintf(pool_name, sizeof(pool_name), "pktv_pool_%s_%d",
+ iface, pool_segmentation);
+
+ pktv_pool[num] = odp_pool_create(pool_name, &params);
+ if (ODP_POOL_INVALID == pktv_pool[num]) {
+ ODPH_ERR("failed to create pool: %d", odp_errno());
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pktio_check_pktv(odp_pktin_mode_t in_mode)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = in_mode;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.vector.supported)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pktio_check_pktv_queue(void)
+{
+ return pktio_check_pktv(ODP_PKTIN_MODE_QUEUE);
+}
+
+static int pktio_check_pktv_sched(void)
+{
+ return pktio_check_pktv(ODP_PKTIN_MODE_SCHED);
+}
+
+static void pktio_test_pktv_recv_plain(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT, 0, true);
+}
+
+static void pktio_test_pktv_recv_parallel(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_PARALLEL, true);
+}
+
+static void pktio_test_pktv_recv_ordered(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_ORDERED, true);
+}
+
+static void pktio_test_pktv_recv_atomic(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, PKTV_TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT,
+ ODP_SCHED_SYNC_ATOMIC, true);
+}
+
+static void pktio_test_pktv_pktin_queue_config(odp_pktin_mode_t in_mode)
+{
+ odp_pktin_queue_param_t queue_param;
+ odp_pktio_capability_t capa;
+ odp_pktio_t pktio;
+ int num_queues;
+ int i;
+
+ pktio = create_pktio(0, in_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.vector.enable = 1;
+ queue_param.vector.pool = default_pktv_pool;
+ queue_param.vector.max_size = capa.vector.min_size;
+ queue_param.vector.max_tmo_ns = capa.vector.min_tmo_ns;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.vector.max_size = capa.vector.max_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ if (capa.vector.max_size != capa.vector.min_size) {
+ queue_param.vector.max_size = capa.vector.max_size - capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+ }
+
+ queue_param.vector.max_size = capa.vector.min_size - 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ queue_param.vector.max_size = capa.vector.max_size + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio = create_pktio(i, in_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+
+ if (!capa.vector.supported) {
+ printf("Vector mode is not supported. Test Skipped\n");
+ return;
+ }
+
+ queue_param.vector.enable = 1;
+ queue_param.vector.pool = pktv_pool[i];
+ queue_param.vector.max_size = capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.vector.max_size = capa.vector.max_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ if (capa.vector.max_size != capa.vector.min_size) {
+ queue_param.vector.max_size = capa.vector.max_size - capa.vector.min_size;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) == 0);
+ }
+
+ queue_param.vector.max_size = capa.vector.min_size - 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ queue_param.vector.max_size = capa.vector.max_size + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) != 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+ }
+}
+
+static void pktio_test_pktv_pktin_queue_config_queue(void)
+{
+ pktio_test_pktv_pktin_queue_config(ODP_PKTIN_MODE_QUEUE);
+}
+
+static void pktio_test_pktv_pktin_queue_config_sched(void)
+{
+ pktio_test_pktv_pktin_queue_config(ODP_PKTIN_MODE_SCHED);
+}
+
static int pktio_suite_init(void)
{
int i;
@@ -3011,10 +3293,18 @@ static int pktio_suite_init(void)
for (i = 0; i < num_ifaces; i++) {
if (create_pool(iface_name[i], i) != 0)
return -1;
+
+ if (create_pktv_pool(iface_name[i], i) != 0)
+ return -1;
}
if (default_pool_create() != 0) {
- fprintf(stderr, "error: failed to create default pool\n");
+ ODPH_ERR("failed to create default pool\n");
+ return -1;
+ }
+
+ if (default_pktv_pool_create() != 0) {
+ ODPH_ERR("failed to create default pktv pool\n");
return -1;
}
@@ -3033,6 +3323,12 @@ static int pktio_suite_init_segmented(void)
return pktio_suite_init();
}
+static int pktv_suite_init(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_init();
+}
+
static int pktio_suite_term(void)
{
char pool_name[ODP_POOL_NAME_LEN];
@@ -3048,24 +3344,48 @@ static int pktio_suite_term(void)
continue;
if (odp_pool_destroy(pool) != 0) {
- fprintf(stderr, "error: failed to destroy pool %s\n",
- pool_name);
+ ODPH_ERR("failed to destroy pool %s\n", pool_name);
+ ret = -1;
+ }
+ }
+
+ for (i = 0; i < num_ifaces; ++i) {
+ snprintf(pool_name, sizeof(pool_name),
+ "pktv_pool_%s_%d", iface_name[i], pool_segmentation);
+ pool = odp_pool_lookup(pool_name);
+ if (pool == ODP_POOL_INVALID)
+ continue;
+
+ if (odp_pool_destroy(pool) != 0) {
+ ODPH_ERR("failed to destroy pool %s\n", pool_name);
ret = -1;
}
}
if (odp_pool_destroy(default_pkt_pool) != 0) {
- fprintf(stderr, "error: failed to destroy default pool\n");
+ ODPH_ERR("failed to destroy default pool\n");
ret = -1;
}
default_pkt_pool = ODP_POOL_INVALID;
+ if (odp_pool_destroy(default_pktv_pool) != 0) {
+ ODPH_ERR("failed to destroy default pktv pool\n");
+ ret = -1;
+ }
+ default_pktv_pool = ODP_POOL_INVALID;
+
if (odp_cunit_print_inactive())
ret = -1;
return ret;
}
+static int pktv_suite_term(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_term();
+}
+
odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_open),
ODP_TEST_INFO(pktio_test_lookup),
@@ -3143,12 +3463,23 @@ odp_testinfo_t pktio_suite_segmented[] = {
ODP_TEST_INFO_NULL
};
+odp_testinfo_t pktv_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_pktin_queue_config_queue, pktio_check_pktv_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_pktin_queue_config_sched, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_plain, pktio_check_pktv_queue),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_parallel, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_ordered, pktio_check_pktv_sched),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktv_recv_atomic, pktio_check_pktv_sched),
+ ODP_TEST_INFO_NULL
+};
+
odp_suiteinfo_t pktio_suites[] = {
{"Packet I/O Unsegmented", pktio_suite_init_unsegmented,
pktio_suite_term, pktio_suite_unsegmented},
{"Packet I/O Segmented", pktio_suite_init_segmented,
pktio_suite_term, pktio_suite_segmented},
{"Packet parser", parser_suite_init, parser_suite_term, parser_suite},
+ {"Packet vector", pktv_suite_init, pktv_suite_term, pktv_suite},
ODP_SUITE_INFO_NULL
};
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 651a00162..78037c65c 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -1,4 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020, Marvell
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,8 +12,11 @@
#define BUF_SIZE 1500
#define BUF_NUM 1000
#define TMO_NUM 1000
+#define VEC_NUM 1000
+#define VEC_LEN 32
#define PKT_LEN 400
#define PKT_NUM 500
+#define CACHE_SIZE 32
#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
typedef struct {
@@ -75,6 +80,27 @@ static void pool_test_create_destroy_timeout(void)
pool_create_destroy(&param);
}
+static void pool_test_create_destroy_vector(void)
+{
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ CU_ASSERT_FATAL(capa.vector.max_pools > 0);
+
+ if (capa.vector.max_num && capa.vector.max_num < max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = capa.vector.max_size < VEC_LEN ? capa.vector.max_size : VEC_LEN;
+
+ pool_create_destroy(&param);
+}
+
static void pool_test_lookup_info_print(void)
{
odp_pool_t pool;
@@ -156,6 +182,61 @@ static void pool_test_alloc_buffer_max_cache(void)
alloc_buffer(global_pool_capa.buf.max_cache_size);
}
+static void alloc_packet_vector(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t i, num;
+ odp_packet_vector_t pkt_vec[VEC_NUM];
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.vector.max_num && capa.vector.max_num < max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = capa.vector.max_size < VEC_LEN ? capa.vector.max_size : VEC_LEN;
+ param.vector.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ pkt_vec[num] = odp_packet_vector_alloc(pool);
+ CU_ASSERT(pkt_vec[num] != ODP_PACKET_VECTOR_INVALID);
+ CU_ASSERT(odp_packet_vector_valid(pkt_vec[num]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_vector_to_event(pkt_vec[num])) == 1);
+
+ if (pkt_vec[num] != ODP_PACKET_VECTOR_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_packet_vector_free(pkt_vec[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_packet_vector(void)
+{
+ alloc_packet_vector(default_pool_param.vector.cache_size);
+}
+
+static void pool_test_alloc_packet_vector_min_cache(void)
+{
+ alloc_packet_vector(global_pool_capa.vector.min_cache_size);
+}
+
+static void pool_test_alloc_packet_vector_max_cache(void)
+{
+ alloc_packet_vector(global_pool_capa.vector.max_cache_size);
+}
+
static void alloc_packet(uint32_t cache_size)
{
odp_pool_t pool;
@@ -434,8 +515,11 @@ static void pool_test_buf_max_num(void)
for (i = 0; i < max_num; i++) {
buf[num] = odp_buffer_alloc(pool);
- if (buf[num] != ODP_BUFFER_INVALID)
+ if (buf[num] != ODP_BUFFER_INVALID) {
+ CU_ASSERT(odp_buffer_is_valid(buf[num]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_buffer_to_event(buf[num])) == 1);
num++;
+ }
}
CU_ASSERT(num == max_num);
@@ -488,8 +572,11 @@ static void pool_test_pkt_max_num(void)
for (i = 0; i < max_num; i++) {
pkt[num] = odp_packet_alloc(pool, len);
- if (pkt[num] != ODP_PACKET_INVALID)
+ if (pkt[num] != ODP_PACKET_INVALID) {
+ CU_ASSERT(odp_packet_is_valid(pkt[num]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt[num])) == 1);
num++;
+ }
}
CU_ASSERT(num == max_num);
@@ -501,6 +588,54 @@ static void pool_test_pkt_max_num(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void pool_test_packet_vector_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t num, i;
+ odp_shm_t shm;
+ odp_packet_vector_t *pktv;
+ uint32_t max_num = VEC_NUM;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.vector.max_num)
+ max_num = capa.vector.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = max_num;
+ param.vector.max_size = 1;
+
+ pool = odp_pool_create("test_packet_vector_max_num", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm", max_num * sizeof(odp_packet_vector_t),
+ sizeof(odp_packet_vector_t), 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ pktv = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(pktv != NULL);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ pktv[num] = odp_packet_vector_alloc(pool);
+
+ if (pktv[num] != ODP_PACKET_VECTOR_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_packet_vector_free(pktv[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
static void pool_test_pkt_seg_len(void)
{
uint32_t len = 1500;
@@ -576,8 +711,10 @@ static void pool_test_tmo_max_num(void)
for (i = 0; i < max_num; i++) {
tmo[num] = odp_timeout_alloc(pool);
- if (tmo[num] != ODP_TIMEOUT_INVALID)
+ if (tmo[num] != ODP_TIMEOUT_INVALID) {
+ CU_ASSERT(odp_event_is_valid(odp_timeout_to_event(tmo[num])) == 1);
num++;
+ }
}
CU_ASSERT(num == max_num);
@@ -679,6 +816,311 @@ static void pool_test_create_after_fork(void)
CU_ASSERT(!odp_shm_free(shm));
}
+static void pool_test_pool_index(void)
+{
+ uint32_t max_pools = global_pool_capa.pkt.max_pools;
+ uint32_t i, num_pools;
+ unsigned int max_index = odp_pool_max_index();
+ odp_packet_t pool_lookup[max_index + 1];
+ odp_packet_t pkt;
+ odp_pool_t pool[max_pools];
+ odp_pool_param_t param;
+ int pool_index;
+
+ CU_ASSERT_FATAL(max_pools > 0);
+
+ /* Pool max index should match to pool capability */
+ CU_ASSERT_FATAL(max_index >= global_pool_capa.max_pools - 1);
+ CU_ASSERT_FATAL(max_index >= global_pool_capa.pkt.max_pools - 1);
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = 1;
+ param.pkt.max_num = 1;
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ /* Ensuring max possible pools are created */
+ num_pools = i;
+ CU_ASSERT(num_pools == max_pools);
+
+ for (i = 0; i < num_pools; i++) {
+ pkt = odp_packet_alloc(pool[i], PKT_LEN);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ /* Only one packet should be possible from each pool */
+ CU_ASSERT_FATAL(odp_packet_alloc(pool[i], PKT_LEN) == ODP_PACKET_INVALID);
+
+ /* Check pool index validity */
+ pool_index = odp_pool_index(pool[i]);
+ CU_ASSERT_FATAL(pool_index >= 0);
+ CU_ASSERT_FATAL((unsigned int)pool_index <= odp_pool_max_index());
+
+ /* Store packet handle in pool lookup table */
+ pool_lookup[pool_index] = pkt;
+ }
+
+ for (i = 0; i < num_pools; i++) {
+ pool_index = odp_pool_index(pool[i]);
+
+ /* Free the packet using pool lookup */
+ odp_packet_free(pool_lookup[pool_index]);
+
+ /* Now packet allocation from the pool should be possible */
+ pkt = odp_packet_alloc(pool[i], PKT_LEN);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ odp_packet_free(pkt);
+
+ /* Destroy the pool */
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+ }
+}
+
+static int pool_check_buffer_pool_statistics(void)
+{
+ if (global_pool_capa.buf.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_packet_pool_statistics(void)
+{
+ if (global_pool_capa.pkt.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_packet_vector_pool_statistics(void)
+{
+ if (global_pool_capa.vector.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int pool_check_timeout_pool_statistics(void)
+{
+ if (global_pool_capa.tmo.stats.all == 0)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pool_test_pool_statistics(int pool_type)
+{
+ odp_pool_stats_t stats;
+ odp_pool_param_t param;
+ odp_pool_stats_opt_t supported;
+ uint32_t i, j, num_pool, num_obj, cache_size;
+ uint32_t max_pools = 2;
+
+ odp_pool_param_init(&param);
+
+ if (pool_type == ODP_POOL_BUFFER) {
+ max_pools = global_pool_capa.buf.max_pools < max_pools ?
+ global_pool_capa.buf.max_pools : max_pools;
+ num_obj = BUF_NUM;
+ supported.all = global_pool_capa.buf.stats.all;
+ param.type = ODP_POOL_BUFFER;
+ cache_size = CACHE_SIZE > global_pool_capa.buf.max_cache_size ?
+ global_pool_capa.buf.max_cache_size : CACHE_SIZE;
+ param.buf.cache_size = cache_size;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = num_obj;
+ } else if (pool_type == ODP_POOL_PACKET) {
+ max_pools = global_pool_capa.pkt.max_pools < max_pools ?
+ global_pool_capa.pkt.max_pools : max_pools;
+ num_obj = PKT_NUM;
+ supported.all = global_pool_capa.pkt.stats.all;
+ param.type = ODP_POOL_PACKET;
+ cache_size = CACHE_SIZE > global_pool_capa.pkt.max_cache_size ?
+ global_pool_capa.pkt.max_cache_size : CACHE_SIZE;
+ param.pkt.cache_size = cache_size;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = num_obj;
+ param.pkt.max_num = num_obj;
+ } else if (pool_type == ODP_POOL_VECTOR) {
+ max_pools = global_pool_capa.vector.max_pools < max_pools ?
+ global_pool_capa.vector.max_pools : max_pools;
+ num_obj = VEC_NUM;
+ if (global_pool_capa.vector.max_num && global_pool_capa.vector.max_num < num_obj)
+ num_obj = global_pool_capa.vector.max_num;
+ supported.all = global_pool_capa.vector.stats.all;
+ param.type = ODP_POOL_VECTOR;
+ cache_size = CACHE_SIZE > global_pool_capa.vector.max_cache_size ?
+ global_pool_capa.vector.max_cache_size : CACHE_SIZE;
+ param.vector.cache_size = cache_size;
+ param.vector.num = num_obj;
+ param.vector.max_size = global_pool_capa.vector.max_size < VEC_LEN ?
+ global_pool_capa.vector.max_size : VEC_LEN;
+ } else {
+ max_pools = global_pool_capa.tmo.max_pools < max_pools ?
+ global_pool_capa.tmo.max_pools : max_pools;
+ num_obj = TMO_NUM;
+ supported.all = global_pool_capa.tmo.stats.all;
+ param.type = ODP_POOL_TIMEOUT;
+ cache_size = CACHE_SIZE > global_pool_capa.tmo.max_cache_size ?
+ global_pool_capa.tmo.max_cache_size : CACHE_SIZE;
+ param.tmo.cache_size = cache_size;
+ param.tmo.num = num_obj;
+ }
+
+ param.stats.all = supported.all;
+
+ CU_ASSERT_FATAL(max_pools != 0);
+
+ /* Extra alloc rounds for testing odp_pool_stats_t.alloc_fails */
+ uint32_t num_alloc_rounds = num_obj + 100;
+ odp_event_t event[max_pools][num_alloc_rounds];
+ uint32_t num_event[max_pools];
+ odp_pool_t pool[max_pools];
+
+ for (i = 0; i < max_pools; i++) {
+ pool[i] = odp_pool_create(NULL, &param);
+
+ if (pool[i] == ODP_POOL_INVALID)
+ break;
+ }
+
+ num_pool = i;
+ CU_ASSERT(num_pool == max_pools);
+
+ for (i = 0; i < num_pool; i++) {
+ uint32_t num_events = 0;
+ uint32_t num_fails = 0;
+
+ CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ CU_ASSERT(stats.available <= num_obj);
+ CU_ASSERT(stats.alloc_ops == 0);
+ CU_ASSERT(stats.alloc_fails == 0);
+ CU_ASSERT(stats.free_ops == 0);
+ CU_ASSERT(stats.total_ops == 0);
+ CU_ASSERT(stats.cache_available <= num_obj);
+ CU_ASSERT(stats.cache_alloc_ops == 0);
+ CU_ASSERT(stats.cache_free_ops == 0);
+
+ /* Allocate the events */
+ for (j = 0; j < num_alloc_rounds; j++) {
+ odp_event_t new_event = ODP_EVENT_INVALID;
+
+ if (pool_type == ODP_POOL_BUFFER) {
+ odp_buffer_t buf = odp_buffer_alloc(pool[i]);
+
+ if (buf != ODP_BUFFER_INVALID)
+ new_event = odp_buffer_to_event(buf);
+ } else if (pool_type == ODP_POOL_PACKET) {
+ odp_packet_t pkt = odp_packet_alloc(pool[i], PKT_LEN);
+
+ if (pkt != ODP_PACKET_INVALID)
+ new_event = odp_packet_to_event(pkt);
+ } else if (pool_type == ODP_POOL_VECTOR) {
+ odp_packet_vector_t pktv = odp_packet_vector_alloc(pool[i]);
+
+ if (pktv != ODP_PACKET_VECTOR_INVALID)
+ new_event = odp_packet_vector_to_event(pktv);
+ } else {
+ odp_timeout_t tmo = odp_timeout_alloc(pool[i]);
+
+ if (tmo != ODP_TIMEOUT_INVALID)
+ new_event = odp_timeout_to_event(tmo);
+ }
+
+ if (new_event != ODP_EVENT_INVALID)
+ event[i][num_events++] = new_event;
+ else
+ num_fails++;
+
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT(stats.available <= num_obj - num_events);
+ CU_ASSERT(stats.cache_available <= num_obj - num_events);
+ }
+
+ CU_ASSERT(num_events == num_obj);
+ num_event[i] = num_events;
+
+ /* All events are allocated, available count in pool and pool
+ * local caches should be zero. */
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+ CU_ASSERT(stats.available == 0);
+ CU_ASSERT(stats.cache_available == 0);
+ if (supported.bit.alloc_ops)
+ CU_ASSERT(stats.alloc_ops > 0 && stats.alloc_ops <= num_obj + 1);
+ if (supported.bit.alloc_fails)
+ CU_ASSERT(stats.alloc_fails == num_fails);
+ if (supported.bit.total_ops)
+ CU_ASSERT(stats.total_ops > 0 && stats.total_ops <= num_obj + 1);
+ CU_ASSERT(stats.free_ops == 0);
+ CU_ASSERT(stats.cache_free_ops == 0);
+ }
+
+ for (i = 0; i < num_pool; i++) {
+ odp_event_free_multi(event[i], num_event[i]);
+
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ if (supported.bit.available && supported.bit.cache_available)
+ CU_ASSERT(stats.available + stats.cache_available == num_obj);
+ if (supported.bit.free_ops)
+ CU_ASSERT(stats.free_ops > 0);
+ if (supported.bit.total_ops)
+ CU_ASSERT(stats.total_ops > 0);
+
+ if (i == 0) {
+ printf("\nPool Statistics\n---------------\n");
+ printf(" available: %" PRIu64 "\n", stats.available);
+ printf(" alloc_ops: %" PRIu64 "\n", stats.alloc_ops);
+ printf(" alloc_fails: %" PRIu64 "\n", stats.alloc_fails);
+ printf(" free_ops: %" PRIu64 "\n", stats.free_ops);
+ printf(" total_ops: %" PRIu64 "\n", stats.total_ops);
+ printf(" cache_available: %" PRIu64 "\n", stats.cache_available);
+ printf(" cache_alloc_ops: %" PRIu64 "\n", stats.cache_alloc_ops);
+ printf(" cache_free_ops: %" PRIu64 "\n", stats.cache_free_ops);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ CU_ASSERT(stats.alloc_ops == 0);
+ CU_ASSERT(stats.alloc_fails == 0);
+ CU_ASSERT(stats.free_ops == 0);
+ CU_ASSERT(stats.total_ops == 0);
+ CU_ASSERT(stats.cache_alloc_ops == 0);
+ CU_ASSERT(stats.cache_free_ops == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
+ }
+}
+
+static void pool_test_buffer_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_BUFFER);
+}
+
+static void pool_test_packet_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_PACKET);
+}
+
+static void pool_test_packet_vector_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_VECTOR);
+}
+
+static void pool_test_timeout_pool_statistics(void)
+{
+ pool_test_pool_statistics(ODP_POOL_TIMEOUT);
+}
+
static int pool_suite_init(void)
{
memset(&global_pool_capa, 0, sizeof(odp_pool_capability_t));
@@ -698,9 +1140,13 @@ odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_create_destroy_buffer),
ODP_TEST_INFO(pool_test_create_destroy_packet),
ODP_TEST_INFO(pool_test_create_destroy_timeout),
+ ODP_TEST_INFO(pool_test_create_destroy_vector),
ODP_TEST_INFO(pool_test_alloc_buffer),
ODP_TEST_INFO(pool_test_alloc_buffer_min_cache),
ODP_TEST_INFO(pool_test_alloc_buffer_max_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_vector_max_cache),
ODP_TEST_INFO(pool_test_alloc_packet),
ODP_TEST_INFO(pool_test_alloc_packet_min_cache),
ODP_TEST_INFO(pool_test_alloc_packet_max_cache),
@@ -713,9 +1159,19 @@ odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_info_data_range),
ODP_TEST_INFO(pool_test_buf_max_num),
ODP_TEST_INFO(pool_test_pkt_max_num),
+ ODP_TEST_INFO(pool_test_packet_vector_max_num),
ODP_TEST_INFO(pool_test_pkt_seg_len),
ODP_TEST_INFO(pool_test_tmo_max_num),
ODP_TEST_INFO(pool_test_create_after_fork),
+ ODP_TEST_INFO(pool_test_pool_index),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_buffer_pool_statistics,
+ pool_check_buffer_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_packet_pool_statistics,
+ pool_check_packet_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_packet_vector_pool_statistics,
+ pool_check_packet_vector_pool_statistics),
+ ODP_TEST_INFO_CONDITIONAL(pool_test_timeout_pool_statistics,
+ pool_check_timeout_pool_statistics),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index ca8af7920..6b8e30559 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -746,6 +746,116 @@ static void scheduler_test_order_ignore(void)
CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
}
+static void scheduler_test_create_group(void)
+{
+ odp_thrmask_t mask;
+ odp_schedule_group_t group;
+ int thr_id;
+ odp_pool_t pool;
+ odp_pool_param_t pool_params;
+ odp_queue_t queue, from;
+ odp_queue_param_t qp;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint64_t wait_time;
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ group = odp_schedule_group_create("create_group", &mask);
+ CU_ASSERT_FATAL(group != ODP_SCHED_GROUP_INVALID);
+
+ odp_pool_param_init(&pool_params);
+ pool_params.buf.size = 100;
+ pool_params.buf.num = 2;
+ pool_params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("create_group", &pool_params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = odp_schedule_default_prio();
+ qp.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ qp.sched.group = group;
+
+ queue = odp_queue_create("create_group", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue, ev) == 0);
+
+ wait_time = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ ev = odp_schedule(&from, wait_time);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ /* Free schedule context */
+ drain_queues();
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group) == 0);
+
+ /* Run scheduler after the group has been destroyed */
+ CU_ASSERT_FATAL(odp_schedule(NULL, wait_time) == ODP_EVENT_INVALID);
+}
+
+static void scheduler_test_create_max_groups(void)
+{
+ odp_thrmask_t mask;
+ int thr_id;
+ uint32_t i;
+ odp_queue_param_t queue_param;
+ odp_schedule_capability_t sched_capa;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+ uint32_t max_groups = sched_capa.max_groups;
+ odp_schedule_group_t group[max_groups];
+ odp_queue_t queue[max_groups];
+
+ CU_ASSERT_FATAL(max_groups > 0);
+ CU_ASSERT_FATAL(sched_capa.max_queues >= sched_capa.max_groups);
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ for (i = 0; i < max_groups; i++) {
+ group[i] = odp_schedule_group_create("max_groups", &mask);
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("schedule group create %u failed\n", i);
+ break;
+ }
+
+ queue_param.sched.group = group[i];
+ queue[i] = odp_queue_create("max_groups", &queue_param);
+ CU_ASSERT_FATAL(queue[i] != ODP_QUEUE_INVALID);
+ }
+
+ CU_ASSERT(i == max_groups);
+ max_groups = i;
+
+ for (i = 0; i < max_groups; i++) {
+ CU_ASSERT_FATAL(odp_queue_destroy(queue[i]) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group[i]) == 0);
+ }
+}
+
static void scheduler_test_groups(void)
{
odp_pool_t p;
@@ -1271,13 +1381,16 @@ static int schedule_common_(void *arg)
}
}
- for (j = 0; j < num; j++)
+ for (j = 0; j < num; j++) {
+ CU_ASSERT(odp_event_is_valid(events[j]) == 1);
odp_event_free(events[j]);
+ }
} else {
ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
if (ev == ODP_EVENT_INVALID)
continue;
+ CU_ASSERT(odp_event_is_valid(ev) == 1);
buf = odp_buffer_from_event(ev);
num = 1;
if (sync == ODP_SCHED_SYNC_ORDERED) {
@@ -2746,6 +2859,8 @@ odp_testinfo_t scheduler_suite[] = {
ODP_TEST_INFO(scheduler_test_queue_size),
ODP_TEST_INFO(scheduler_test_full_queues),
ODP_TEST_INFO(scheduler_test_order_ignore),
+ ODP_TEST_INFO(scheduler_test_create_group),
+ ODP_TEST_INFO(scheduler_test_create_max_groups),
ODP_TEST_INFO(scheduler_test_groups),
ODP_TEST_INFO(scheduler_test_pause_resume),
ODP_TEST_INFO(scheduler_test_pause_enqueue),
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 09f8fc467..8943d4d97 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -41,12 +41,19 @@ struct test_timer {
uint64_t tick; /* Expiration tick or TICK_INVALID */
};
+struct thread_args {
+ pthrd_arg thrdarg;
+ odp_queue_type_t queue_type;
+};
+
typedef struct {
/* Default resolution / timeout parameters */
struct {
uint64_t res_ns;
uint64_t min_tmo;
uint64_t max_tmo;
+ odp_bool_t queue_type_sched;
+ odp_bool_t queue_type_plain;
} param;
/* Timeout pool handle used by all threads */
@@ -152,6 +159,8 @@ static int timer_global_init(odp_instance_t *inst)
global_mem->param.res_ns = res_ns;
global_mem->param.min_tmo = min_tmo;
global_mem->param.max_tmo = max_tmo;
+ global_mem->param.queue_type_plain = capa.queue_type_plain;
+ global_mem->param.queue_type_sched = capa.queue_type_sched;
return 0;
}
@@ -179,6 +188,24 @@ static int timer_global_term(odp_instance_t inst)
return 0;
}
+static int
+check_sched_queue_support(void)
+{
+ if (global_mem->param.queue_type_sched)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int
+check_plain_queue_support(void)
+{
+ if (global_mem->param.queue_type_plain)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
static void timer_test_capa(void)
{
odp_timer_capability_t capa;
@@ -323,12 +350,26 @@ static void timer_test_timeout_pool_free(void)
static void timer_pool_create_destroy(void)
{
odp_timer_pool_param_t tparam;
+ odp_queue_param_t queue_param;
+ odp_timer_capability_t capa;
odp_timer_pool_info_t info;
odp_timer_pool_t tp[2];
odp_timer_t tim;
odp_queue_t queue;
+ int ret;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ CU_ASSERT_FATAL(ret == 0);
- queue = odp_queue_create("timer_queue", NULL);
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+ queue = odp_queue_create("timer_queue", &queue_param);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
memset(&tparam, 0, sizeof(odp_timer_pool_param_t));
@@ -391,6 +432,7 @@ static void timer_pool_max_res(void)
{
odp_timer_capability_t capa;
odp_timer_pool_param_t tp_param;
+ odp_queue_param_t queue_param;
odp_timer_pool_t tp;
odp_timer_t timer;
odp_pool_param_t pool_param;
@@ -411,7 +453,14 @@ static void timer_pool_max_res(void)
pool = odp_pool_create("timeout_pool", &pool_param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- queue = odp_queue_create("timer_queue", NULL);
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+ queue = odp_queue_create("timer_queue", &queue_param);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
/* Highest resolution: first in nsec, then in hz */
@@ -824,6 +873,8 @@ static void timer_test_cancel(void)
odp_pool_t pool;
odp_pool_param_t params;
odp_timer_pool_param_t tparam;
+ odp_queue_param_t queue_param;
+ odp_timer_capability_t capa;
odp_timer_pool_t tp;
odp_queue_t queue;
odp_timer_t tim;
@@ -831,6 +882,11 @@ static void timer_test_cancel(void)
odp_timeout_t tmo;
odp_timer_set_t rc;
uint64_t tick;
+ int ret;
+
+ memset(&capa, 0, sizeof(capa));
+ ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ CU_ASSERT_FATAL(ret == 0);
odp_pool_param_init(&params);
params.type = ODP_POOL_TIMEOUT;
@@ -855,7 +911,15 @@ static void timer_test_cancel(void)
/* Start all created timer pools */
odp_timer_pool_start();
- queue = odp_queue_create("timer_queue", NULL);
+ odp_queue_param_init(&queue_param);
+ if (capa.queue_type_plain) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ } else if (capa.queue_type_sched) {
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ }
+
+ queue = odp_queue_create("timer_queue", &queue_param);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -1165,10 +1229,10 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
if (ttp->tim != tim)
CU_FAIL("odp_timeout_timer() wrong timer");
- if (!odp_timeout_fresh(tmo))
- CU_FAIL("Wrong status (stale) for fresh timeout");
-
if (!stale) {
+ if (!odp_timeout_fresh(tmo))
+ CU_FAIL("Wrong status (stale) for fresh timeout");
+
/* tmo tick cannot be smaller than pre-calculated tick */
if (tick < ttp->tick) {
ODPH_DBG("Too small tick: pre-calculated %" PRIu64 " "
@@ -1194,7 +1258,7 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
/* Worker thread entrypoint which performs timer alloc/set/cancel/free
* tests */
-static int worker_entrypoint(void *arg ODP_UNUSED)
+static int worker_entrypoint(void *arg)
{
int thr = odp_thread_id();
uint32_t i, allocated;
@@ -1218,8 +1282,31 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
odp_pool_t tbp = global_mem->tbp;
uint32_t num_timers = global_mem->timers_per_thread;
uint64_t min_tmo = global_mem->param.min_tmo;
+ odp_queue_param_t queue_param;
+ odp_queue_type_t queue_type = ODP_QUEUE_TYPE_PLAIN;
+ odp_thrmask_t thr_mask;
+ odp_schedule_group_t group;
+ struct thread_args *thr_args = arg;
+ uint64_t sched_tmo;
+
+ odp_queue_param_init(&queue_param);
+ if (thr_args->queue_type == ODP_QUEUE_TYPE_PLAIN) {
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+ queue_type = ODP_QUEUE_TYPE_PLAIN;
+ } else {
+ odp_thrmask_zero(&thr_mask);
+ odp_thrmask_set(&thr_mask, odp_thread_id());
+ group = odp_schedule_group_create(NULL, &thr_mask);
+ if (group == ODP_SCHED_GROUP_INVALID)
+ CU_FAIL_FATAL("Schedule group create failed");
- queue = odp_queue_create("timer_queue", NULL);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.group = group;
+ }
+
+ queue = odp_queue_create("timer_queue", &queue_param);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -1279,7 +1366,10 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
prev_tick = odp_timer_current_tick(tp);
for (ms = 0; ms < 7 * RANGE_MS / 10 && allocated > 0; ms++) {
- while ((ev = odp_queue_deq(queue)) != ODP_EVENT_INVALID) {
+ while ((ev = queue_type == ODP_QUEUE_TYPE_PLAIN ?
+ odp_queue_deq(queue) :
+ odp_schedule(NULL, ODP_SCHED_NO_WAIT))
+ != ODP_EVENT_INVALID) {
/* Subtract one from prev_tick to allow for timeouts
* to be delivered a tick late */
handle_tmo(ev, false, prev_tick - 1);
@@ -1289,6 +1379,9 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
i = rand_r(&seed) % allocated;
if (tt[i].ev == ODP_EVENT_INVALID &&
(rand_r(&seed) % 2 == 0)) {
+ if (odp_timer_current_tick(tp) >= tt[i].tick)
+ /* Timer just expired. */
+ goto sleep;
/* Timer active, cancel it */
rc = odp_timer_cancel(tt[i].tim, &tt[i].ev);
if (rc != 0) {
@@ -1307,6 +1400,9 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
if (tt[i].ev != ODP_EVENT_INVALID)
/* Timer inactive => set */
nset++;
+ else if (odp_timer_current_tick(tp) >= tt[i].tick)
+ /* Timer just expired. */
+ goto sleep;
else
/* Timer active => reset */
nreset++;
@@ -1334,6 +1430,7 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
CU_FAIL("Failed to set timer: bad return code");
}
}
+sleep:
ts.tv_sec = 0;
ts.tv_nsec = 1000000; /* 1ms */
if (nanosleep(&ts, NULL) < 0)
@@ -1368,8 +1465,12 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
if (nanosleep(&ts, NULL) < 0)
CU_FAIL_FATAL("nanosleep failed");
+ sched_tmo = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * RANGE_MS);
while (nstale != 0) {
- ev = odp_queue_deq(queue);
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, sched_tmo);
if (ev != ODP_EVENT_INVALID) {
handle_tmo(ev, true, 0/*Don't care for stale tmo's*/);
nstale--;
@@ -1385,7 +1486,10 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
}
/* Check if there any more (unexpected) events */
- ev = odp_queue_deq(queue);
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, sched_tmo);
if (ev != ODP_EVENT_INVALID)
CU_FAIL("Unexpected event received");
@@ -1401,7 +1505,7 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
return CU_get_number_of_failures();
}
-static void timer_test_all(void)
+static void timer_test_all(odp_queue_type_t queue_type)
{
int rc;
odp_pool_param_t params;
@@ -1411,23 +1515,30 @@ static void timer_test_all(void)
uint64_t ns, tick, ns2;
uint64_t res_ns, min_tmo, max_tmo;
uint32_t timers_allocated;
- pthrd_arg thrdarg;
+ struct thread_args thr_args;
odp_pool_capability_t pool_capa;
odp_timer_capability_t timer_capa;
+ odp_schedule_capability_t sched_capa;
odp_pool_t tbp;
odp_timer_pool_t tp;
uint32_t num_timers;
+ uint32_t num_workers;
int timers_per_thread;
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
/* Reserve at least one core for running other processes so the timer
* test hopefully can run undisturbed and thus get better timing
* results. */
- int num_workers = odp_cpumask_default_worker(&unused, 0);
+ num_workers = odp_cpumask_default_worker(&unused, 0);
/* force to max CPU count */
if (num_workers > MAX_WORKERS)
num_workers = MAX_WORKERS;
+ if (queue_type == ODP_QUEUE_TYPE_SCHED &&
+ num_workers > sched_capa.max_groups)
+ num_workers = sched_capa.max_groups;
+
/* On a single-CPU machine run at least one thread */
if (num_workers < 1)
num_workers = 1;
@@ -1527,12 +1638,13 @@ static void timer_test_all(void)
odp_atomic_init_u32(&global_mem->timers_allocated, 0);
/* Create and start worker threads */
- thrdarg.testcase = 0;
- thrdarg.numthrds = num_workers;
- odp_cunit_thread_create(worker_entrypoint, &thrdarg);
+ thr_args.thrdarg.testcase = 0;
+ thr_args.thrdarg.numthrds = num_workers;
+ thr_args.queue_type = queue_type;
+ odp_cunit_thread_create(worker_entrypoint, &thr_args.thrdarg);
/* Wait for worker threads to exit */
- odp_cunit_thread_exit(&thrdarg);
+ odp_cunit_thread_exit(&thr_args.thrdarg);
ODPH_DBG("Number of timeouts delivered/received too late: "
"%" PRIu32 "\n",
odp_atomic_load_u32(&global_mem->ndelivtoolate));
@@ -1551,8 +1663,16 @@ static void timer_test_all(void)
/* Destroy timeout pool, all timeouts must have been freed */
rc = odp_pool_destroy(tbp);
CU_ASSERT(rc == 0);
+}
- CU_PASS("ODP timer test");
+static void timer_test_plain_all(void)
+{
+ timer_test_all(ODP_QUEUE_TYPE_PLAIN);
+}
+
+static void timer_test_sched_all(void)
+{
+ timer_test_all(ODP_QUEUE_TYPE_SCHED);
}
odp_testinfo_t timer_suite[] = {
@@ -1561,26 +1681,47 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_timeout_pool_free),
ODP_TEST_INFO(timer_pool_create_destroy),
ODP_TEST_INFO(timer_pool_max_res),
- ODP_TEST_INFO(timer_test_tmo_event_plain),
- ODP_TEST_INFO(timer_test_tmo_event_sched),
- ODP_TEST_INFO(timer_test_buf_event_plain),
- ODP_TEST_INFO(timer_test_buf_event_sched),
- ODP_TEST_INFO(timer_test_pkt_event_plain),
- ODP_TEST_INFO(timer_test_pkt_event_sched),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_buf_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_buf_event_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_pkt_event_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_pkt_event_sched,
+ check_sched_queue_support),
ODP_TEST_INFO(timer_test_cancel),
- ODP_TEST_INFO(timer_test_max_res_min_tmo_plain),
- ODP_TEST_INFO(timer_test_max_res_min_tmo_sched),
- ODP_TEST_INFO(timer_test_max_res_max_tmo_plain),
- ODP_TEST_INFO(timer_test_max_res_max_tmo_sched),
- ODP_TEST_INFO(timer_test_max_tmo_min_tmo_plain),
- ODP_TEST_INFO(timer_test_max_tmo_min_tmo_sched),
- ODP_TEST_INFO(timer_test_max_tmo_max_tmo_plain),
- ODP_TEST_INFO(timer_test_max_tmo_max_tmo_sched),
- ODP_TEST_INFO(timer_test_plain_queue),
- ODP_TEST_INFO(timer_test_sched_queue),
- ODP_TEST_INFO(timer_test_plain_queue_priv),
- ODP_TEST_INFO(timer_test_sched_queue_priv),
- ODP_TEST_INFO(timer_test_all),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_min_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_min_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_max_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_res_max_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_min_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_min_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_max_tmo_plain,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_max_tmo_max_tmo_sched,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_queue,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_queue,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_queue_priv,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_queue_priv,
+ check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_plain_all,
+ check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_sched_all,
+ check_sched_queue_support),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/traffic_mngr/traffic_mngr.c b/test/validation/api/traffic_mngr/traffic_mngr.c
index 8234c1748..1029a128b 100644
--- a/test/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/validation/api/traffic_mngr/traffic_mngr.c
@@ -2701,7 +2701,7 @@ static int test_sched_queue_priority(const char *shaper_name,
for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--)
pkts_sent += send_pkts(tm_queues[priority], num_pkts);
- busy_wait(1000000); /* wait 1 millisecond */
+ busy_wait(100 * ODP_TIME_MSEC_IN_NS);
/* Disable the shaper, so as to get the pkts out quicker. */
set_shaper(node_name, shaper_name, 0, 0);
@@ -2807,7 +2807,7 @@ static int test_sched_node_priority(const char *shaper_name,
}
}
- busy_wait(1000000); /* wait 1 millisecond */
+ busy_wait(100 * ODP_TIME_MSEC_IN_NS);
/* Disable the shaper, so as to get the pkts out quicker. */
set_shaper(node_name, shaper_name, 0, 0);