aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/common/odp_cunit_common.c38
-rw-r--r--test/performance/.gitignore1
-rw-r--r--test/performance/Makefile.am4
-rw-r--r--test/performance/odp_atomic_perf.c1184
-rw-r--r--test/performance/odp_mem_perf.c13
-rwxr-xr-xtest/performance/odp_sched_latency_run.sh8
-rwxr-xr-xtest/performance/odp_scheduling_run.sh17
-rw-r--r--test/validation/api/atomic/atomic.c258
-rw-r--r--test/validation/api/chksum/chksum.c112
-rw-r--r--test/validation/api/ipsec/ipsec.c57
-rw-r--r--test/validation/api/ipsec/ipsec.h4
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c106
-rw-r--r--test/validation/api/pktio/lso.c1
-rw-r--r--test/validation/api/pktio/pktio.c10
-rw-r--r--test/validation/api/queue/queue.c2
-rw-r--r--test/validation/api/timer/timer.c21
16 files changed, 1773 insertions, 63 deletions
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 25895d628..62418c356 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2019, Nokia
+ * Copyright (c) 2021, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -273,6 +274,39 @@ static int default_term_func(void)
return odp_cunit_print_inactive();
}
+static void _cunit_test_setup_func(void)
+{
+ CU_AllTestsCompleteMessageHandler all_test_comp_handler;
+ CU_SuiteCompleteMessageHandler suite_comp_handler;
+ CU_pFailureRecord failrec;
+ CU_pSuite suite;
+
+ if (!getenv("ODP_CUNIT_FAIL_IMMEDIATE"))
+ return;
+
+ if (CU_get_number_of_failure_records() == 0)
+ return;
+
+ /* User wants the suite to fail immediately once a test hits an error */
+ suite = CU_get_current_suite();
+ failrec = CU_get_failure_list();
+
+ printf("Force aborting as a previous test failed\n");
+
+ /* Call the Cleanup functions before aborting */
+ suite->pCleanupFunc();
+
+ suite_comp_handler = CU_get_suite_complete_handler();
+ if (suite_comp_handler)
+ suite_comp_handler(suite, failrec);
+
+ all_test_comp_handler = CU_get_all_test_complete_handler();
+ if (all_test_comp_handler)
+ all_test_comp_handler(failrec);
+
+ exit(EXIT_FAILURE);
+}
+
/*
* Register suites and tests with CUnit.
*
@@ -292,7 +326,9 @@ static int cunit_register_suites(odp_suiteinfo_t testsuites[])
if (sinfo->term_func)
term_func = sinfo->term_func;
- suite = CU_add_suite(sinfo->name, _cunit_suite_init, term_func);
+ suite = CU_add_suite_with_setup_and_teardown(sinfo->name, _cunit_suite_init,
+ term_func, _cunit_test_setup_func,
+ NULL);
if (!suite)
return CU_get_error();
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
index 80396e5d9..0e6d9ef57 100644
--- a/test/performance/.gitignore
+++ b/test/performance/.gitignore
@@ -1,6 +1,7 @@
*.log
*.trs
odp_atomic
+odp_atomic_perf
odp_bench_packet
odp_cpu_bench
odp_crypto
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index 624795f8b..7566ab8a8 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -2,7 +2,8 @@ include $(top_srcdir)/test/Makefile.inc
TESTS_ENVIRONMENT += TEST_DIR=${builddir}
-EXECUTABLES = odp_bench_packet \
+EXECUTABLES = odp_atomic_perf \
+ odp_bench_packet \
odp_cpu_bench \
odp_crypto \
odp_ipsec \
@@ -39,6 +40,7 @@ endif
bin_PROGRAMS = $(EXECUTABLES) $(COMPILE_ONLY)
+odp_atomic_perf_SOURCES = odp_atomic_perf.c
odp_bench_packet_SOURCES = odp_bench_packet.c
odp_cpu_bench_SOURCES = odp_cpu_bench.c
odp_crypto_SOURCES = odp_crypto.c
diff --git a/test/performance/odp_atomic_perf.c b/test/performance/odp_atomic_perf.c
new file mode 100644
index 000000000..2ed88a5e8
--- /dev/null
+++ b/test/performance/odp_atomic_perf.c
@@ -0,0 +1,1184 @@
+/* Copyright (c) 2021, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/* Default number of test rounds */
+#define NUM_ROUNDS 1000000u
+
+/* Initial value for atomic variables */
+#define INIT_VAL 1234567
+
+/* Max number of workers if num_cpu=0 */
+#define DEFAULT_MAX_WORKERS 10
+
+#define TEST_INFO(name, test, validate, op_type) \
+ {name, test, validate, op_type}
+
+/* Test function template */
+typedef void (*test_fn_t)(void *val, void *out, uint32_t num_round);
+/* Test result validation function template */
+typedef int (*validate_fn_t)(void *val, void *out, uint32_t num_round,
+ uint32_t num_worker, int private);
+
+typedef enum {
+ OP_32BIT,
+ OP_64BIT
+} op_bit_t;
+
+/* Command line options */
+typedef struct test_options_t {
+ uint32_t num_cpu;
+ uint32_t num_round;
+ int private;
+
+} test_options_t;
+
+/* Cache aligned atomics for private mode operation */
+typedef struct ODP_ALIGNED_CACHE test_atomic_t {
+ union {
+ odp_atomic_u32_t u32;
+ odp_atomic_u64_t u64;
+ };
+} test_atomic_t;
+
+typedef struct test_global_t test_global_t;
+
+/* Worker thread context */
+typedef struct test_thread_ctx_t {
+ test_global_t *global;
+ test_fn_t func;
+ uint64_t nsec;
+ uint32_t idx;
+ op_bit_t type;
+
+} test_thread_ctx_t;
+
+/* Global data */
+struct test_global_t {
+ test_options_t test_options;
+ odp_barrier_t barrier;
+ union {
+ odp_atomic_u32_t atomic_u32;
+ odp_atomic_u64_t atomic_u64;
+ };
+ odp_cpumask_t cpumask;
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_thread_ctx_t thread_ctx[ODP_THREAD_COUNT_MAX];
+ test_atomic_t atomic_private[ODP_THREAD_COUNT_MAX];
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } output[ODP_THREAD_COUNT_MAX];
+};
+
+typedef struct {
+ const char *name;
+ test_fn_t test_fn;
+ validate_fn_t validate_fn;
+ op_bit_t type;
+} test_case_t;
+
+static test_global_t *test_global;
+
+static inline void test_atomic_load_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline int validate_atomic_init_val_u32(void *val, void *out, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED,
+ int private ODP_UNUSED)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+
+ return (odp_atomic_load_u32(atomic_val) != INIT_VAL) ||
+ (*result != (uint32_t)INIT_VAL * num_round);
+}
+
+static inline int validate_atomic_init_val_u64(void *val, void *out ODP_UNUSED,
+ uint32_t num_round ODP_UNUSED,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+
+ return (odp_atomic_load_u64(atomic_val) != INIT_VAL) ||
+ (*result != (uint64_t)INIT_VAL * num_round);
+}
+
+static inline void test_atomic_store_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_u32(atomic_val, new_val++);
+}
+
+static inline void test_atomic_store_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_u64(atomic_val, new_val++);
+}
+
+static inline int validate_atomic_num_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_num_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL + num_round);
+}
+
+static inline void test_atomic_fetch_add_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_add_u32(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_add_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_add_u64(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline int validate_atomic_add_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL + num_round);
+
+ return odp_atomic_load_u32(atomic_val) != (INIT_VAL + (num_worker * num_round));
+}
+
+static inline int validate_atomic_add_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL + num_round);
+
+ return odp_atomic_load_u64(atomic_val) != (INIT_VAL + ((uint64_t)num_worker * num_round));
+}
+
+static inline void test_atomic_add_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_add_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_fetch_sub_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_sub_u32(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_sub_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_sub_u64(atomic_val, 1);
+
+ *result = ret;
+}
+
+static inline int validate_atomic_sub_round_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL - num_round);
+
+ return odp_atomic_load_u32(atomic_val) != ((uint32_t)INIT_VAL - (num_worker * num_round));
+}
+
+static inline int validate_atomic_sub_round_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker, int private)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ if (private)
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL - num_round);
+
+ return odp_atomic_load_u64(atomic_val) != ((uint64_t)INIT_VAL -
+ ((uint64_t)num_worker * num_round));
+}
+
+static inline void test_atomic_sub_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_fetch_inc_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_inc_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_inc_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_inc_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_inc_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_inc_u32(atomic_val);
+}
+
+static inline void test_atomic_inc_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_inc_u64(atomic_val);
+}
+
+static inline void test_atomic_fetch_dec_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_dec_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_fetch_dec_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_fetch_dec_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_dec_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_dec_u32(atomic_val);
+}
+
+static inline void test_atomic_dec_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_dec_u64(atomic_val);
+}
+
+static inline void test_atomic_max_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_max = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_max_u32(atomic_val, new_max++);
+}
+
+static inline void test_atomic_max_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_max = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_max_u64(atomic_val, new_max++);
+}
+
+static inline int validate_atomic_max_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ return (result != ((uint32_t)INIT_VAL + num_round)) && (result != UINT32_MAX);
+}
+
+static inline int validate_atomic_max_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ return (result != ((uint64_t)INIT_VAL + num_round)) && (result != UINT64_MAX);
+}
+
+static inline void test_atomic_min_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_min = INIT_VAL - 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_min_u32(atomic_val, new_min--);
+}
+
+static inline void test_atomic_min_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_min = INIT_VAL - 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_min_u64(atomic_val, new_min--);
+}
+
+static inline int validate_atomic_min_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ return result != ((uint32_t)INIT_VAL - num_round) && result != 0;
+}
+
+static inline int validate_atomic_min_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private ODP_UNUSED)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ return result != ((uint64_t)INIT_VAL - num_round) && result != 0;
+}
+
+static inline void test_atomic_cas_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline int validate_atomic_cas_u32(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private)
+{
+ uint32_t result = odp_atomic_load_u32((odp_atomic_u32_t *)val);
+
+ if (private)
+ return result != ((uint32_t)INIT_VAL + num_round);
+
+ return result > ((uint32_t)INIT_VAL + num_round);
+}
+
+static inline int validate_atomic_cas_u64(void *val, void *out ODP_UNUSED, uint32_t num_round,
+ uint32_t num_worker ODP_UNUSED, int private)
+{
+ uint64_t result = odp_atomic_load_u64((odp_atomic_u64_t *)val);
+
+ if (private)
+ return result != ((uint64_t)INIT_VAL + num_round);
+
+ return result > ((uint64_t)INIT_VAL + num_round);
+}
+
+static inline void test_atomic_xchg_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_xchg_u32(atomic_val, new_val++);
+
+ *result = ret;
+}
+
+static inline void test_atomic_xchg_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_xchg_u64(atomic_val, new_val++);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_acq_u32(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t *result = out;
+ uint32_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_acq_u32(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_load_acq_u64(void *val, void *out, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t *result = out;
+ uint64_t ret = 0;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ ret += odp_atomic_load_acq_u64(atomic_val);
+
+ *result = ret;
+}
+
+static inline void test_atomic_store_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_rel_u32(atomic_val, new_val++);
+}
+
+static inline void test_atomic_store_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_store_rel_u64(atomic_val, new_val++);
+}
+
+static inline void test_atomic_add_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_rel_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_add_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_add_rel_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_rel_u32(atomic_val, 1);
+}
+
+static inline void test_atomic_sub_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+
+ for (uint32_t i = 0; i < num_round; i++)
+ odp_atomic_sub_rel_u64(atomic_val, 1);
+}
+
+static inline void test_atomic_cas_acq_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_rel_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_rel_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_rel_u32(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u32_t *atomic_val = val;
+ uint32_t new_val = INIT_VAL + 1;
+ uint32_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_rel_u32(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static inline void test_atomic_cas_acq_rel_u64(void *val, void *out ODP_UNUSED, uint32_t num_round)
+{
+ odp_atomic_u64_t *atomic_val = val;
+ uint64_t new_val = INIT_VAL + 1;
+ uint64_t old_val = INIT_VAL;
+
+ for (uint32_t i = 0; i < num_round; i++) {
+ if (odp_atomic_cas_acq_rel_u64(atomic_val, &old_val, new_val))
+ old_val = new_val++;
+ }
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Atomic operations performance test\n"
+ "\n"
+ "Usage: odp_atomic_perf [options]\n"
+ "\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs (or max %d) (default)\n"
+ " -r, --num_round Number of rounds (default %u)\n"
+ " -p, --private 0: The same atomic variable is shared between threads (default)\n"
+ " 1: Atomic variables are private to each thread\n"
+ " -h, --help This help\n"
+ "\n", DEFAULT_MAX_WORKERS, NUM_ROUNDS);
+}
+
+static void print_info(test_options_t *test_options)
+{
+ odp_atomic_op_t atomic_ops;
+
+ printf("\nAtomic operations performance test configuration:\n");
+ printf(" num cpu %u\n", test_options->num_cpu);
+ printf(" num rounds %u\n", test_options->num_round);
+ printf(" private %i\n", test_options->private);
+ printf("\n");
+
+ atomic_ops.all_bits = 0;
+ odp_atomic_lock_free_u64(&atomic_ops);
+
+ printf("\nAtomic operations lock-free:\n");
+ printf(" odp_atomic_load_u64: %" PRIu32 "\n", atomic_ops.op.load);
+ printf(" odp_atomic_store_u64: %" PRIu32 "\n", atomic_ops.op.store);
+ printf(" odp_atomic_fetch_add_u64: %" PRIu32 "\n", atomic_ops.op.fetch_add);
+ printf(" odp_atomic_add_u64: %" PRIu32 "\n", atomic_ops.op.add);
+ printf(" odp_atomic_fetch_sub_u64: %" PRIu32 "\n", atomic_ops.op.fetch_sub);
+ printf(" odp_atomic_sub_u64: %" PRIu32 "\n", atomic_ops.op.sub);
+ printf(" odp_atomic_fetch_inc_u64: %" PRIu32 "\n", atomic_ops.op.fetch_inc);
+ printf(" odp_atomic_inc_u64: %" PRIu32 "\n", atomic_ops.op.inc);
+ printf(" odp_atomic_fetch_dec_u64: %" PRIu32 "\n", atomic_ops.op.fetch_dec);
+ printf(" odp_atomic_dec_u64: %" PRIu32 "\n", atomic_ops.op.dec);
+ printf(" odp_atomic_min_u64: %" PRIu32 "\n", atomic_ops.op.min);
+ printf(" odp_atomic_max_u64: %" PRIu32 "\n", atomic_ops.op.max);
+ printf(" odp_atomic_cas_u64: %" PRIu32 "\n", atomic_ops.op.cas);
+ printf(" odp_atomic_xchg_u64: %" PRIu32 "\n", atomic_ops.op.xchg);
+ printf("\n\n");
+}
+
+static int parse_options(int argc, char *argv[], test_options_t *test_options)
+{
+ int opt;
+ int long_index;
+ int ret = 0;
+
+ static const struct option longopts[] = {
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"private", required_argument, NULL, 'p'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+
+ static const char *shortopts = "+c:r:p:h";
+
+ memset(test_options, 0, sizeof(test_options_t));
+ test_options->num_cpu = 0;
+ test_options->num_round = NUM_ROUNDS;
+ test_options->private = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
+ case 'r':
+ test_options->num_round = atol(optarg);
+ break;
+ case 'p':
+ test_options->private = atoi(optarg);
+ break;
+ case 'h':
+ /* fall through */
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_options->num_round < 1) {
+ ODPH_ERR("Invalid number of test rounds: %" PRIu32 "\n", test_options->num_round);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int set_num_cpu(test_global_t *global)
+{
+ int ret, max_num;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+
+ /* One thread used for the main thread */
+ if (num_cpu > ODP_THREAD_COUNT_MAX - 1) {
+ ODPH_ERR("Too many workers. Maximum is %i.\n", ODP_THREAD_COUNT_MAX - 1);
+ return -1;
+ }
+
+ max_num = num_cpu;
+ if (num_cpu == 0) {
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+ if (max_num > DEFAULT_MAX_WORKERS)
+ max_num = DEFAULT_MAX_WORKERS;
+ }
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
+
+ if (num_cpu && ret != num_cpu) {
+ ODPH_ERR("Too many workers. Max supported %i.\n", ret);
+ return -1;
+ }
+
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
+ }
+
+ odp_barrier_init(&global->barrier, num_cpu);
+
+ return 0;
+}
+
+static int init_test(test_global_t *global, const char *name, op_bit_t type)
+{
+ printf("TEST: %s\n", name);
+
+ if (type == OP_32BIT)
+ odp_atomic_init_u32(&global->atomic_u32, INIT_VAL);
+ else if (type == OP_64BIT)
+ odp_atomic_init_u64(&global->atomic_u64, INIT_VAL);
+ else
+ return -1;
+
+ for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (type == OP_32BIT) {
+ global->output[i].u32 = 0;
+ odp_atomic_init_u32(&global->atomic_private[i].u32, INIT_VAL);
+ } else {
+ global->output[i].u64 = 0;
+ odp_atomic_init_u64(&global->atomic_private[i].u64, INIT_VAL);
+ }
+ }
+ return 0;
+}
+
+static int run_test(void *arg)
+{
+ uint64_t nsec;
+ odp_time_t t1, t2;
+ test_thread_ctx_t *thread_ctx = arg;
+ test_global_t *global = thread_ctx->global;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ uint32_t idx = thread_ctx->idx;
+ test_fn_t test_func = thread_ctx->func;
+ op_bit_t type = thread_ctx->type;
+ void *val;
+ void *out;
+ uint32_t out_u32 = 0;
+ uint64_t out_u64 = 0;
+
+ if (type == OP_32BIT) {
+ val = &global->atomic_u32;
+ out = &out_u32;
+ } else {
+ val = &global->atomic_u64;
+ out = &out_u64;
+ }
+
+ if (global->test_options.private) {
+ if (type == OP_32BIT)
+ val = &global->atomic_private[idx].u32;
+ else
+ val = &global->atomic_private[idx].u64;
+ }
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
+ t1 = odp_time_local();
+
+ test_func(val, out, num_round);
+
+ t2 = odp_time_local();
+
+ nsec = odp_time_diff_ns(t2, t1);
+
+ /* Update stats */
+ thread_ctx->nsec = nsec;
+ if (type == OP_32BIT)
+ global->output[idx].u32 = out_u32;
+ else
+ global->output[idx].u64 = out_u64;
+
+ return 0;
+}
+
+static int start_workers(test_global_t *global, odp_instance_t instance,
+ test_fn_t func, op_bit_t type)
+{
+ odph_thread_common_param_t param;
+ int i, ret;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ odph_thread_param_t thr_param[num_cpu];
+
+ memset(&param, 0, sizeof(odph_thread_common_param_t));
+ param.instance = instance;
+ param.cpumask = &global->cpumask;
+
+ memset(thr_param, 0, sizeof(thr_param));
+ for (i = 0; i < num_cpu; i++) {
+ test_thread_ctx_t *thread_ctx = &global->thread_ctx[i];
+
+ thread_ctx->global = global;
+ thread_ctx->idx = i;
+ thread_ctx->func = func;
+ thread_ctx->type = type;
+
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ thr_param[i].start = run_test;
+ thr_param[i].arg = thread_ctx;
+ }
+
+ ret = odph_thread_create(global->thread_tbl, &param, thr_param, num_cpu);
+ if (ret != num_cpu) {
+ ODPH_ERR("Failed to create all threads %i\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_results(test_global_t *global, validate_fn_t validate, op_bit_t type)
+{
+ int i;
+ test_options_t *test_options = &global->test_options;
+ uint32_t num_round = test_options->num_round;
+ int num_cpu = test_options->num_cpu;
+ int private = global->test_options.private;
+ void *val;
+ void *out;
+
+ for (i = 0; i < num_cpu; i++) {
+ if (type == OP_32BIT) {
+ out = &global->output[i].u32;
+ val = &global->atomic_u32;
+ if (private)
+ val = &global->atomic_private[i].u32;
+ } else {
+ out = &global->output[i].u64;
+ val = &global->atomic_u64;
+ if (private)
+ val = &global->atomic_private[i].u64;
+ }
+
+ if (validate(val, out, num_round, num_cpu, private))
+ return -1;
+ }
+ return 0;
+}
+
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double nsec_ave;
+ test_options_t *test_options = &global->test_options;
+ int num_cpu = test_options->num_cpu;
+ uint32_t num_round = test_options->num_round;
+ uint64_t nsec_sum = 0;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ nsec_sum += global->thread_ctx[i].nsec;
+
+ if (nsec_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ nsec_ave = nsec_sum / num_cpu;
+ num = 0;
+
+ printf("---------------------------------------------\n");
+ printf("Per thread results (Millions of ops per sec):\n");
+ printf("---------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->thread_ctx[i].nsec) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%8.2f ", num_round / (global->thread_ctx[i].nsec / 1000.0));
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("Average results over %i threads:\n", num_cpu);
+ printf("---------------------------------------\n");
+ printf(" duration: %8.2f sec\n", nsec_ave / ODP_TIME_SEC_IN_NS);
+ printf(" operations per cpu: %8.2fM ops/sec\n", num_round / (nsec_ave / 1000.0));
+ printf(" total operations: %8.2fM ops/sec\n",
+ (num_cpu * num_round) / (nsec_ave / 1000.0));
+ printf("\n\n");
+}
+
+/**
+ * Test functions
+ */
+static test_case_t test_suite[] = {
+ TEST_INFO("odp_atomic_load_u32", test_atomic_load_u32,
+ validate_atomic_init_val_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_store_u32", test_atomic_store_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_add_u32", test_atomic_fetch_add_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_add_u32", test_atomic_add_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_sub_u32", test_atomic_fetch_sub_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_sub_u32", test_atomic_sub_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_inc_u32", test_atomic_fetch_inc_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_inc_u32", test_atomic_inc_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_fetch_dec_u32", test_atomic_fetch_dec_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_dec_u32", test_atomic_dec_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_max_u32", test_atomic_max_u32,
+ validate_atomic_max_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_min_u32", test_atomic_min_u32,
+ validate_atomic_min_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_u32", test_atomic_cas_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_xchg_u32", test_atomic_xchg_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_load_acq_u32", test_atomic_load_acq_u32,
+ validate_atomic_init_val_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_store_rel_u32", test_atomic_store_rel_u32,
+ validate_atomic_num_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_add_rel_u32", test_atomic_add_rel_u32,
+ validate_atomic_add_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_sub_rel_u32", test_atomic_sub_rel_u32,
+ validate_atomic_sub_round_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_acq_u32", test_atomic_cas_acq_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_rel_u32", test_atomic_cas_rel_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_cas_acq_rel_u32", test_atomic_cas_acq_rel_u32,
+ validate_atomic_cas_u32, OP_32BIT),
+ TEST_INFO("odp_atomic_load_u64", test_atomic_load_u64,
+ validate_atomic_init_val_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_store_u64", test_atomic_store_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_add_u64", test_atomic_fetch_add_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_add_u64", test_atomic_add_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_sub_u64", test_atomic_fetch_sub_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_sub_u64", test_atomic_sub_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_inc_u64", test_atomic_fetch_inc_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_inc_u64", test_atomic_inc_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_fetch_dec_u64", test_atomic_fetch_dec_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_dec_u64", test_atomic_dec_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_max_u64", test_atomic_max_u64,
+ validate_atomic_max_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_min_u64", test_atomic_min_u64,
+ validate_atomic_min_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_u64", test_atomic_cas_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_xchg_u64", test_atomic_xchg_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_load_acq_u64", test_atomic_load_acq_u64,
+ validate_atomic_init_val_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_store_rel_u64", test_atomic_store_rel_u64,
+ validate_atomic_num_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_add_rel_u64", test_atomic_add_rel_u64,
+ validate_atomic_add_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_sub_rel_u64", test_atomic_sub_rel_u64,
+ validate_atomic_sub_round_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_acq_u64", test_atomic_cas_acq_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_rel_u64", test_atomic_cas_rel_u64,
+ validate_atomic_cas_u64, OP_64BIT),
+ TEST_INFO("odp_atomic_cas_acq_rel_u64", test_atomic_cas_acq_rel_u64,
+ validate_atomic_cas_u64, OP_64BIT)
+};
+
+int main(int argc, char **argv)
+{
+ odp_instance_t instance;
+ odp_init_t init;
+ odp_shm_t shm;
+ test_options_t test_options;
+ int num_tests, i;
+
+ if (parse_options(argc, argv, &test_options))
+ exit(EXIT_FAILURE);
+
+ /* List features not to be used */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.schedule = 1;
+ init.not_used.feat.stash = 1;
+ init.not_used.feat.timer = 1;
+ init.not_used.feat.tm = 1;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, &init, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for global data from shared mem */
+ shm = odp_shm_reserve("test_global", sizeof(test_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Shared memory reserve failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ test_global = odp_shm_addr(shm);
+ if (test_global == NULL) {
+ ODPH_ERR("Shared memory alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(test_global, 0, sizeof(test_global_t));
+ test_global->test_options = test_options;
+
+ odp_sys_info_print();
+
+ if (set_num_cpu(test_global))
+ exit(EXIT_FAILURE);
+
+ print_info(&test_global->test_options);
+
+ /* Loop all test cases */
+ num_tests = sizeof(test_suite) / sizeof(test_suite[0]);
+
+ for (i = 0; i < num_tests; i++) {
+ /* Initialize test variables */
+ if (init_test(test_global, test_suite[i].name, test_suite[i].type)) {
+ ODPH_ERR("Failed to initialize atomics.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Start workers */
+ if (start_workers(test_global, instance, test_suite[i].test_fn, test_suite[i].type))
+ exit(EXIT_FAILURE);
+
+ /* Wait workers to exit */
+ odph_thread_join(test_global->thread_tbl, test_global->test_options.num_cpu);
+
+ print_stat(test_global);
+
+ /* Validate test results */
+ if (validate_results(test_global, test_suite[i].validate_fn, test_suite[i].type)) {
+ ODPH_ERR("Test %s result validation failed.\n", test_suite[i].name);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Shm free failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Local terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Global terminate failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
diff --git a/test/performance/odp_mem_perf.c b/test/performance/odp_mem_perf.c
index a833a04e8..0470d337a 100644
--- a/test/performance/odp_mem_perf.c
+++ b/test/performance/odp_mem_perf.c
@@ -135,7 +135,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
static int set_num_cpu(test_global_t *global)
{
- int ret;
+ int ret, max_num;
test_options_t *test_options = &global->test_options;
int num_cpu = test_options->num_cpu;
@@ -145,7 +145,11 @@ static int set_num_cpu(test_global_t *global)
return -1;
}
- ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
+ max_num = num_cpu;
+ if (num_cpu == 0)
+ max_num = ODP_THREAD_COUNT_MAX - 1;
+
+ ret = odp_cpumask_default_worker(&global->cpumask, max_num);
if (num_cpu && ret != num_cpu) {
ODPH_ERR("Too many workers. Max supported %i.\n", ret);
@@ -154,6 +158,11 @@ static int set_num_cpu(test_global_t *global)
/* Zero: all available workers */
if (num_cpu == 0) {
+ if (ret > max_num) {
+ ODPH_ERR("Too many cpus from odp_cpumask_default_worker(): %i\n", ret);
+ return -1;
+ }
+
num_cpu = ret;
test_options->num_cpu = num_cpu;
}
diff --git a/test/performance/odp_sched_latency_run.sh b/test/performance/odp_sched_latency_run.sh
index bcccd77a9..372fdb166 100755
--- a/test/performance/odp_sched_latency_run.sh
+++ b/test/performance/odp_sched_latency_run.sh
@@ -14,9 +14,13 @@ ALL=0
run()
{
echo odp_sched_latency_run starts requesting $1 worker threads
- echo ===============================================
+ echo =========================================================
- $TEST_DIR/odp_sched_latency${EXEEXT} -c $1 || exit $?
+ if [ $(nproc) -lt $1 ]; then
+ echo "Not enough CPU cores. Skipping test."
+ else
+ $TEST_DIR/odp_sched_latency${EXEEXT} -c $1 || exit $?
+ fi
}
run 1
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh
index 082dc4521..2b4281ee9 100755
--- a/test/performance/odp_scheduling_run.sh
+++ b/test/performance/odp_scheduling_run.sh
@@ -14,14 +14,17 @@ ALL=0
run()
{
echo odp_scheduling_run starts requesting $1 worker threads
- echo ===============================================
+ echo ======================================================
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1
-
- RET_VAL=$?
- if [ $RET_VAL -ne 0 ]; then
- echo odp_scheduling FAILED
- exit $RET_VAL
+ if [ $(nproc) -lt $1 ]; then
+ echo "Not enough CPU cores. Skipping test."
+ else
+ $TEST_DIR/odp_scheduling${EXEEXT} -c $1
+ RET_VAL=$?
+ if [ $RET_VAL -ne 0 ]; then
+ echo odp_scheduling FAILED
+ exit $RET_VAL
+ fi
fi
}
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index 36484295f..907624bb0 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -28,11 +28,13 @@
#define CHECK_MAX_MIN (1 << 0)
#define CHECK_XCHG (1 << 2)
+#define CHECK_CAS_128 (1 << 4)
typedef __volatile uint32_t volatile_u32_t;
typedef __volatile uint64_t volatile_u64_t;
typedef struct {
+ odp_atomic_u128_t a128u;
odp_atomic_u64_t a64u;
odp_atomic_u64_t a64u_min;
odp_atomic_u64_t a64u_max;
@@ -489,6 +491,90 @@ static void test_atomic_non_relaxed_64(void)
}
}
+static void test_atomic_relaxed_128(void)
+{
+ int i, ret;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_acq(void)
+{
+ int i, ret;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_acq_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_rel(void)
+{
+ int i, ret;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_rel_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
+static void test_atomic_non_relaxed_128_acq_rel(void)
+{
+ int i, ret;
+ odp_u128_t old, new;
+ odp_atomic_u128_t *a128u = &global_mem->a128u;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u128(a128u);
+
+ do {
+ new.u64[0] = old.u64[0] + 2;
+ new.u64[1] = old.u64[1] + 1;
+
+ ret = odp_atomic_cas_acq_rel_u128(a128u, &old, new);
+
+ } while (ret == 0);
+ }
+}
+
static void test_atomic_inc_dec_32(void)
{
test_atomic_inc_32();
@@ -561,6 +647,14 @@ static void test_atomic_cas_inc_dec_64(void)
test_atomic_cas_dec_64();
}
+static void test_atomic_cas_inc_128(void)
+{
+ test_atomic_relaxed_128();
+ test_atomic_non_relaxed_128_acq();
+ test_atomic_non_relaxed_128_rel();
+ test_atomic_non_relaxed_128_acq_rel();
+}
+
static void test_atomic_init(void)
{
odp_atomic_init_u32(&global_mem->a32u, 0);
@@ -571,6 +665,12 @@ static void test_atomic_init(void)
odp_atomic_init_u64(&global_mem->a64u_max, 0);
odp_atomic_init_u32(&global_mem->a32u_xchg, 0);
odp_atomic_init_u64(&global_mem->a64u_xchg, 0);
+
+ odp_u128_t a128u_tmp;
+
+ a128u_tmp.u64[0] = 0;
+ a128u_tmp.u64[1] = 0;
+ odp_atomic_init_u128(&global_mem->a128u, a128u_tmp);
}
static void test_atomic_store(void)
@@ -583,6 +683,12 @@ static void test_atomic_store(void)
odp_atomic_store_u64(&global_mem->a64u_max, U64_INIT_VAL);
odp_atomic_store_u32(&global_mem->a32u_xchg, U32_INIT_VAL);
odp_atomic_store_u64(&global_mem->a64u_xchg, U64_INIT_VAL);
+
+ odp_u128_t a128u_tmp;
+
+ a128u_tmp.u64[0] = U64_INIT_VAL;
+ a128u_tmp.u64[1] = U64_INIT_VAL;
+ odp_atomic_store_u128(&global_mem->a128u, a128u_tmp);
}
static void test_atomic_validate(int check)
@@ -590,6 +696,20 @@ static void test_atomic_validate(int check)
CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&global_mem->a32u));
CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&global_mem->a64u));
+ odp_u128_t a128u_tmp;
+
+ a128u_tmp = odp_atomic_load_u128(&global_mem->a128u);
+
+ if (check & CHECK_CAS_128) {
+ uint64_t iterations = 0;
+
+ iterations = a128u_tmp.u64[0] - a128u_tmp.u64[1];
+ CU_ASSERT(iterations == 4 * CNT * global_mem->g_num_threads);
+ } else {
+ CU_ASSERT(U64_INIT_VAL == a128u_tmp.u64[0]);
+ CU_ASSERT(U64_INIT_VAL == a128u_tmp.u64[1]);
+ }
+
if (check & CHECK_MAX_MIN) {
CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_max) >
odp_atomic_load_u32(&global_mem->a32u_min));
@@ -763,6 +883,7 @@ static int test_atomic_cas_inc_dec_thread(void *arg UNUSED)
per_thread_mem = thread_init();
test_atomic_cas_inc_dec_32();
test_atomic_cas_inc_dec_64();
+ test_atomic_cas_inc_128();
thread_finalize(per_thread_mem);
@@ -807,51 +928,9 @@ static void test_atomic_functional(int func_ptr(void *), int check)
test_atomic_validate(check);
}
-static void atomic_test_atomic_inc_dec(void)
-{
- test_atomic_functional(test_atomic_inc_dec_thread, 0);
-}
-
-static void atomic_test_atomic_add_sub(void)
-{
- test_atomic_functional(test_atomic_add_sub_thread, 0);
-}
-
-static void atomic_test_atomic_fetch_inc_dec(void)
-{
- test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
-}
-
-static void atomic_test_atomic_fetch_add_sub(void)
-{
- test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
-}
-
-static void atomic_test_atomic_max_min(void)
-{
- test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
-}
-
-static void atomic_test_atomic_cas_inc_dec(void)
-{
- test_atomic_functional(test_atomic_cas_inc_dec_thread, 0);
-}
-
-static void atomic_test_atomic_xchg(void)
-{
- test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
-}
-
-static void atomic_test_atomic_non_relaxed(void)
-{
- test_atomic_functional(test_atomic_non_relaxed_thread,
- CHECK_MAX_MIN | CHECK_XCHG);
-}
-
-static void atomic_test_atomic_op_lock_free(void)
+static void test_atomic_op_lock_free_set(void)
{
odp_atomic_op_t atomic_op;
- int ret_null, ret;
memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
atomic_op.all_bits = 0;
@@ -897,6 +976,12 @@ static void atomic_test_atomic_op_lock_free(void)
CU_ASSERT(atomic_op.all_bits != 0);
atomic_op.op.dec = 0;
CU_ASSERT(atomic_op.all_bits == 0);
+}
+
+static void test_atomic_op_lock_free_64(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
ret = odp_atomic_lock_free_u64(&atomic_op);
@@ -954,6 +1039,93 @@ static void atomic_test_atomic_op_lock_free(void)
}
}
+static void test_atomic_op_lock_free_128(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u128(&atomic_op);
+ ret_null = odp_atomic_lock_free_u128(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u128() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ }
+}
+
+static void atomic_test_atomic_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_inc_dec_thread, 0);
+}
+
+static void atomic_test_atomic_add_sub(void)
+{
+ test_atomic_functional(test_atomic_add_sub_thread, 0);
+}
+
+static void atomic_test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
+}
+
+static void atomic_test_atomic_fetch_add_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
+}
+
+static void atomic_test_atomic_max_min(void)
+{
+ test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
+}
+
+static void atomic_test_atomic_cas_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_cas_inc_dec_thread, CHECK_CAS_128);
+}
+
+static void atomic_test_atomic_xchg(void)
+{
+ test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
+}
+
+static void atomic_test_atomic_non_relaxed(void)
+{
+ test_atomic_functional(test_atomic_non_relaxed_thread,
+ CHECK_MAX_MIN | CHECK_XCHG);
+}
+
+static void atomic_test_atomic_op_lock_free(void)
+{
+ test_atomic_op_lock_free_set();
+ test_atomic_op_lock_free_64();
+ test_atomic_op_lock_free_128();
+}
+
odp_testinfo_t atomic_suite_atomic[] = {
ODP_TEST_INFO(atomic_test_atomic_inc_dec),
ODP_TEST_INFO(atomic_test_atomic_add_sub),
diff --git a/test/validation/api/chksum/chksum.c b/test/validation/api/chksum/chksum.c
index ce905c04b..86306ab0b 100644
--- a/test/validation/api/chksum/chksum.c
+++ b/test/validation/api/chksum/chksum.c
@@ -313,10 +313,122 @@ static void chksum_ones_complement_udp_long(void)
CU_ASSERT(res == UDP_LONG_CHKSUM);
}
+static uint16_t chksum_rfc1071(const void *p, uint32_t len)
+{
+ uint32_t sum = 0;
+ const uint16_t *data = p;
+
+ while (len > 1) {
+ sum += *data++;
+ len -= 2;
+ }
+
+ /* Add left-over byte, if any */
+ if (len > 0) {
+ uint16_t left_over = 0;
+
+ *(uint8_t *)&left_over = *(const uint8_t *)data;
+ sum += left_over;
+ }
+
+ /* Fold 32-bit sum to 16 bits */
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ return sum;
+}
+
+/*
+ * 64-bit KISS RNGs
+ * George Marsaglia
+ * https://www.thecodingforums.com/threads/64-bit-kiss-rngs.673657
+ */
+
+static unsigned long long x = 1234567890987654321ULL, c = 123456123456123456ULL,
+ y = 362436362436362436ULL, z = 1066149217761810ULL, t;
+
+#define MWC (t = (x << 58) + c, c = (x >> 6), x += t, c += (x < t), x)
+#define XSH (y ^= (y << 13), y ^= (y >> 17), y ^= (y << 43))
+#define CNG (z = 6906969069LL * z + 1234567)
+#define KISS (MWC + XSH + CNG)
+
+/*
+ * Test with pseudorandom data and different data lengths and alignments.
+ */
+static void chksum_ones_complement_pseudorandom(void)
+{
+ const int size = 32 * 1024;
+ const unsigned long page = 4096;
+ /* Allocate some extra pages for alignment and length. */
+ uint8_t *buf = (uint8_t *)malloc(size + page * 4);
+ uint8_t *data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ for (int i = 0; i < (size + (int)page * 3) / 8; i++)
+ ((uint64_t *)(uintptr_t)data)[i] = KISS;
+
+ /* Test data lengths from 1 to more than 9000 bytes. */
+ for (int len = 1; len < 10000; len++) {
+ /*
+ * To avoid spending too much time on long data, the number of
+ * rounds goes down as data length goes up.
+ */
+ int rounds = 1000000000 / (len * len + 1000000);
+
+ for (int i = 0; i < rounds; i++) {
+ /* Align p to two bytes. */
+ uint8_t *p = data + (KISS & (size - 1) & ~1UL);
+ /*
+ * Generate some fresh random bits at the start of the
+ * data to be checksummed.
+ */
+ uint64_t rnd = KISS;
+
+ memcpy(p, &rnd, sizeof(rnd));
+ CU_ASSERT(chksum_rfc1071(p, len) ==
+ odp_chksum_ones_comp16(p, len));
+ }
+ }
+
+ free(buf);
+}
+
+/*
+ * Test with very long data with most of the bits set. The idea is to
+ * maximize the number of carries.
+ */
+static void chksum_ones_complement_very_long(void)
+{
+ const int size = 64 * 1024;
+ const unsigned long page = 4096;
+ /* Allocate two extra pages for alignment. */
+ uint8_t *buf = (uint8_t *)malloc(size + page * 2);
+ uint8_t *data = (uint8_t *)(((uintptr_t)buf + (page - 1)) & ~(page - 1));
+
+ /* Start with all bits set. */
+ memset(data, 0xff, size + page);
+
+ for (int i = 0; i < 100; i++) {
+ for (int len = size - 8; len <= size; len++) {
+ /* Alignment 0, 2, 4, 6, 8. */
+ for (int a = 0; a <= 8; a += 2)
+ CU_ASSERT(chksum_rfc1071(data + a, len) ==
+ odp_chksum_ones_comp16(data + a, len));
+ }
+
+ /* Turn off some random bits in the data. */
+ uint64_t rnd = KISS;
+ ((uint8_t *)data)[rnd & (size - 1)] &= (rnd >> 32) & 0xff;
+ }
+
+ free(buf);
+}
+
odp_testinfo_t chksum_suite[] = {
ODP_TEST_INFO(chksum_ones_complement_ip),
ODP_TEST_INFO(chksum_ones_complement_udp),
ODP_TEST_INFO(chksum_ones_complement_udp_long),
+ ODP_TEST_INFO(chksum_ones_complement_pseudorandom),
+ ODP_TEST_INFO(chksum_ones_complement_very_long),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index 43322e36c..1458e9953 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -261,6 +261,18 @@ int ipsec_check_esp_chacha20_poly1305(void)
ODP_AUTH_ALG_CHACHA20_POLY1305, 0);
}
+int ipsec_check_test_sa_update_seq_num(void)
+{
+ odp_ipsec_capability_t capa;
+
+ odp_ipsec_capability(&capa);
+
+ if (!capa.test.sa_operations.seq_num)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
odp_bool_t in,
odp_bool_t ah,
@@ -734,6 +746,48 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
return num_out;
}
+int ipsec_test_sa_update_seq_num(odp_ipsec_sa_t sa, uint32_t seq_num)
+{
+ odp_ipsec_test_sa_operation_t sa_op;
+ odp_ipsec_test_sa_param_t sa_param;
+
+ sa_op = ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM;
+ sa_param.seq_num = seq_num;
+
+ return odp_ipsec_test_sa_update(sa, sa_op, &sa_param);
+}
+
+static void ipsec_pkt_seq_num_check(odp_packet_t pkt, uint32_t seq_num)
+{
+ uint32_t l3_off = odp_packet_l3_offset(pkt);
+ uint32_t l4_off;
+ odph_ipv4hdr_t ip;
+
+ CU_ASSERT_NOT_EQUAL_FATAL(ODP_PACKET_OFFSET_INVALID, l3_off);
+ CU_ASSERT_EQUAL_FATAL(0, odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip),
+ &ip));
+
+ if (ODPH_IPV4HDR_VER(ip.ver_ihl) == ODPH_IPV4) {
+ l4_off = l3_off + (ODPH_IPV4HDR_IHL(ip.ver_ihl) * 4);
+
+ if (ip.proto == ODPH_IPPROTO_ESP) {
+ odph_esphdr_t esp;
+
+ odp_packet_copy_to_mem(pkt, l4_off, sizeof(esp), &esp);
+ CU_ASSERT_EQUAL(odp_be_to_cpu_32(esp.seq_no), seq_num);
+ } else if (ip.proto == ODPH_IPPROTO_AH) {
+ odph_ahhdr_t ah;
+
+ odp_packet_copy_to_mem(pkt, l4_off, sizeof(ah), &ah);
+ CU_ASSERT_EQUAL(odp_be_to_cpu_32(ah.seq_no), seq_num);
+ } else {
+ CU_FAIL("Unexpected IP Proto");
+ }
+ } else {
+ CU_FAIL("Unexpected IP Version");
+ }
+}
+
static void ipsec_pkt_proto_err_set(odp_packet_t pkt)
{
uint32_t l3_off = odp_packet_l3_offset(pkt);
@@ -898,6 +952,9 @@ void ipsec_check_out_in_one(const ipsec_test_part *part,
CU_ASSERT_FATAL(odp_packet_len(pkto[i]) <=
sizeof(pkt_in.data));
+ if (part->flags.test_sa_seq_num)
+ ipsec_pkt_seq_num_check(pkto[i], part->out[i].seq_num);
+
if (part->flags.stats == IPSEC_TEST_STATS_PROTO_ERR)
ipsec_pkt_proto_err_set(pkto[i]);
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index a9213b420..3bbcb7b64 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -59,6 +59,7 @@ typedef struct {
odp_bool_t lookup;
odp_bool_t ah;
odp_bool_t inline_hdr_in_packet;
+ odp_bool_t test_sa_seq_num;
enum ipsec_test_stats stats;
} ipsec_test_flags;
@@ -73,6 +74,7 @@ typedef struct {
const ipsec_test_packet *pkt_res;
odp_proto_l3_type_t l3_type;
odp_proto_l4_type_t l4_type;
+ uint32_t seq_num;
} out[1];
struct {
odp_ipsec_op_status_t status;
@@ -101,6 +103,7 @@ void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa);
void ipsec_check_out_in_one(const ipsec_test_part *part,
odp_ipsec_sa_t sa,
odp_ipsec_sa_t sa_in);
+int ipsec_test_sa_update_seq_num(odp_ipsec_sa_t sa, uint32_t seq_num);
int ipsec_check(odp_bool_t ah,
odp_cipher_alg_t cipher,
@@ -128,5 +131,6 @@ int ipsec_check_esp_null_aes_gmac_128(void);
int ipsec_check_esp_null_aes_gmac_192(void);
int ipsec_check_esp_null_aes_gmac_256(void);
int ipsec_check_esp_chacha20_poly1305(void);
+int ipsec_check_test_sa_update_seq_num(void);
#endif
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index b4065d667..6f285d59a 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -460,6 +460,21 @@ static void test_out_in_common(ipsec_test_flags *flags,
test_ipsec_stats_zero_assert(&stats);
}
+ if (flags->test_sa_seq_num) {
+ int rc;
+
+ test.out[0].seq_num = 0x1235;
+ rc = ipsec_test_sa_update_seq_num(sa_out, test.out[0].seq_num);
+
+ /* Skip further checks related to this specific test if the
+ * SA update call was not successful.
+ */
+ if (rc < 0) {
+ printf("\t >> skipped");
+ test.flags.test_sa_seq_num = false;
+ }
+ }
+
ipsec_check_out_in_one(&test, sa_out, sa_in);
if (flags->stats == IPSEC_TEST_STATS_SUCCESS) {
@@ -1284,6 +1299,7 @@ static void test_sa_info(void)
param_in.inbound.antireplay_ws = 32;
sa_in = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_FATAL(sa_in != ODP_IPSEC_SA_INVALID);
memset(&info_out, 0, sizeof(info_out));
CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
@@ -1362,6 +1378,46 @@ static void test_sa_info(void)
ipsec_sa_destroy(sa_out);
ipsec_sa_destroy(sa_in);
+
+ /*
+ * Additional check for SA lookup parameters. Let's use transport
+ * mode SA and ODP_IPSEC_DSTADD_SPI lookup mode.
+ */
+ ipsec_sa_param_fill(&param_in,
+ true, false, 123, NULL,
+ ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
+ ODP_AUTH_ALG_SHA1_HMAC, &key_5a_160,
+ NULL, NULL);
+ param_in.inbound.lookup_mode = ODP_IPSEC_LOOKUP_DSTADDR_SPI;
+ param_in.inbound.lookup_param.ip_version = ODP_IPSEC_IPV4;
+ param_in.inbound.lookup_param.dst_addr = &dst;
+ sa_in = odp_ipsec_sa_create(&param_in);
+ CU_ASSERT_FATAL(sa_in != ODP_IPSEC_SA_INVALID);
+
+ memset(&info_in, 0, sizeof(info_in));
+ CU_ASSERT_FATAL(odp_ipsec_sa_info(sa_in, &info_in) == 0);
+
+ CU_ASSERT(info_in.param.inbound.lookup_mode ==
+ ODP_IPSEC_LOOKUP_DSTADDR_SPI);
+ CU_ASSERT_FATAL(info_in.param.inbound.lookup_param.dst_addr ==
+ &info_in.inbound.lookup_param.dst_addr);
+ CU_ASSERT(!memcmp(info_in.param.inbound.lookup_param.dst_addr,
+ &dst,
+ ODP_IPV4_ADDR_SIZE));
+ ipsec_sa_destroy(sa_in);
+}
+
+static void test_test_sa_update_seq_num(void)
+{
+ ipsec_test_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+ flags.display_algo = true;
+ flags.test_sa_seq_num = true;
+
+ test_esp_out_in_all(&flags);
+
+ printf("\n ");
}
static void ipsec_test_capability(void)
@@ -1371,6 +1427,53 @@ static void ipsec_test_capability(void)
CU_ASSERT(odp_ipsec_capability(&capa) == 0);
}
+static void ipsec_test_default_values(void)
+{
+ odp_ipsec_config_t config;
+ odp_ipsec_sa_param_t sa_param;
+
+ memset(&config, 0x55, sizeof(config));
+ memset(&sa_param, 0x55, sizeof(sa_param));
+
+ odp_ipsec_config_init(&config);
+ CU_ASSERT(config.inbound.lookup.min_spi == 0);
+ CU_ASSERT(config.inbound.lookup.max_spi == UINT32_MAX);
+ CU_ASSERT(config.inbound.lookup.spi_overlap == 0);
+ CU_ASSERT(config.inbound.retain_outer == ODP_PROTO_LAYER_NONE);
+ CU_ASSERT(config.inbound.parse_level == ODP_PROTO_LAYER_NONE);
+ CU_ASSERT(config.inbound.chksums.all_chksum == 0);
+ CU_ASSERT(config.outbound.all_chksum == 0);
+ CU_ASSERT(!config.stats_en);
+
+ odp_ipsec_sa_param_init(&sa_param);
+ CU_ASSERT(sa_param.proto == ODP_IPSEC_ESP);
+ CU_ASSERT(sa_param.crypto.cipher_alg == ODP_CIPHER_ALG_NULL);
+ CU_ASSERT(sa_param.crypto.auth_alg == ODP_AUTH_ALG_NULL);
+ CU_ASSERT(sa_param.opt.esn == 0);
+ CU_ASSERT(sa_param.opt.udp_encap == 0);
+ CU_ASSERT(sa_param.opt.copy_dscp == 0);
+ CU_ASSERT(sa_param.opt.copy_flabel == 0);
+ CU_ASSERT(sa_param.opt.copy_df == 0);
+ CU_ASSERT(sa_param.opt.dec_ttl == 0);
+ CU_ASSERT(sa_param.lifetime.soft_limit.bytes == 0);
+ CU_ASSERT(sa_param.lifetime.soft_limit.packets == 0);
+ CU_ASSERT(sa_param.lifetime.hard_limit.bytes == 0);
+ CU_ASSERT(sa_param.lifetime.hard_limit.packets == 0);
+ CU_ASSERT(sa_param.context == NULL);
+ CU_ASSERT(sa_param.context_len == 0);
+ CU_ASSERT(sa_param.inbound.lookup_mode == ODP_IPSEC_LOOKUP_DISABLED);
+ CU_ASSERT(sa_param.inbound.antireplay_ws == 0);
+ CU_ASSERT(sa_param.inbound.pipeline == ODP_IPSEC_PIPELINE_NONE);
+ CU_ASSERT(sa_param.outbound.tunnel.type == ODP_IPSEC_TUNNEL_IPV4);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.dscp == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.df == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv4.ttl == 255);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.flabel == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.dscp == 0);
+ CU_ASSERT(sa_param.outbound.tunnel.ipv6.hlimit == 255);
+ CU_ASSERT(sa_param.outbound.frag_mode == ODP_IPSEC_FRAG_DISABLED);
+}
+
static void test_ipsec_stats(void)
{
ipsec_test_flags flags;
@@ -1394,6 +1497,7 @@ static void test_ipsec_stats(void)
odp_testinfo_t ipsec_out_suite[] = {
ODP_TEST_INFO(ipsec_test_capability),
+ ODP_TEST_INFO(ipsec_test_default_values),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256,
ipsec_check_ah_sha256),
ODP_TEST_INFO_CONDITIONAL(test_out_ipv4_ah_sha256_tun_ipv4,
@@ -1444,6 +1548,8 @@ odp_testinfo_t ipsec_out_suite[] = {
ipsec_check_esp_null_sha256),
ODP_TEST_INFO_CONDITIONAL(test_sa_info,
ipsec_check_esp_aes_cbc_128_sha1),
+ ODP_TEST_INFO_CONDITIONAL(test_test_sa_update_seq_num,
+ ipsec_check_test_sa_update_seq_num),
ODP_TEST_INFO(test_esp_out_in_all_basic),
ODP_TEST_INFO_CONDITIONAL(test_esp_out_in_all_hdr_in_packet,
is_out_mode_inline),
diff --git a/test/validation/api/pktio/lso.c b/test/validation/api/pktio/lso.c
index 5d0596861..e3ef57bf5 100644
--- a/test/validation/api/pktio/lso.c
+++ b/test/validation/api/pktio/lso.c
@@ -772,6 +772,7 @@ static void lso_send_ipv4(int use_opt)
ODPH_DBG(" LSO segment[%i] payload: %u bytes\n", i, payload_len);
+ CU_ASSERT(odp_packet_has_ipv4(packet[i]));
CU_ASSERT(odp_packet_has_ipfrag(packet[i]));
CU_ASSERT(odp_packet_has_error(packet[i]) == 0);
CU_ASSERT(payload_len <= IPV4_MAX_PAYLOAD);
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 47320e2e8..3f8df07f3 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -1135,7 +1135,7 @@ static void pktio_test_recv_multi_event(void)
static void pktio_test_recv_queue(void)
{
odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
odp_pktio_capability_t capa;
odp_pktin_queue_param_t in_queue_param;
odp_pktout_queue_param_t out_queue_param;
@@ -1247,7 +1247,7 @@ static void pktio_test_recv_queue(void)
static void test_recv_tmo(recv_tmo_mode_e mode)
{
odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
odp_pktio_capability_t capa;
odp_pktin_queue_param_t in_queue_param;
odp_pktout_queue_t pktout_queue;
@@ -2302,7 +2302,7 @@ static int pktio_check_pktin_ts(void)
static void pktio_test_pktin_ts(void)
{
odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
pktio_info_t pktio_rx_info;
odp_pktio_capability_t capa;
odp_pktio_config_t config;
@@ -2417,7 +2417,7 @@ static int pktio_check_pktout_ts(void)
static void pktio_test_pktout_ts(void)
{
odp_packet_t pkt_tbl[TX_BATCH_LEN];
- odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
odp_pktout_queue_t pktout_queue;
odp_pktio_t pktio_tx, pktio_rx;
uint32_t pkt_seq[TX_BATCH_LEN];
@@ -3381,7 +3381,7 @@ static void pktio_test_pktv_pktin_queue_config_sched(void)
static void pktio_test_recv_maxlen_set(void)
{
odp_pktio_t pktio_tx, pktio_rx;
- odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
pktio_info_t pktio_rx_info;
odp_pktio_capability_t capa;
odp_pktio_config_t config;
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index 256194f81..b5d594a9a 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -759,6 +759,8 @@ static void queue_test_info(void)
CU_ASSERT(info.param.sched.lock_count == lock_count);
odp_queue_print(q_order);
+ odp_queue_print_all();
+
CU_ASSERT(odp_queue_destroy(q_plain) == 0);
CU_ASSERT(odp_queue_destroy(q_order) == 0);
}
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 8943d4d97..f2cc93cb8 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -534,6 +534,8 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
odp_time_t t1, t2;
uint64_t period_ns, period_tick, duration_ns;
int i, ret, num_tmo;
+ const char *user_ctx = "User context";
+ int test_print = 0;
int num = 5;
odp_timer_t timer[num];
@@ -562,6 +564,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
} else if (event_type == ODP_EVENT_TIMEOUT) {
pool_param.type = ODP_POOL_TIMEOUT;
pool_param.tmo.num = num;
+ test_print = 1;
} else {
CU_FAIL("Bad event_type");
return;
@@ -589,7 +592,8 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
ODPH_DBG(" max_tmo %" PRIu64 "\n", timer_param.max_tmo);
ODPH_DBG(" period_ns %" PRIu64 "\n", period_ns);
ODPH_DBG(" period_tick %" PRIu64 "\n", period_tick);
- ODPH_DBG(" duration_ns %" PRIu64 "\n\n", duration_ns);
+ ODPH_DBG(" duration_ns %" PRIu64 "\n", duration_ns);
+ ODPH_DBG(" user_ptr %p\n\n", user_ctx);
for (i = 0; i < num; i++) {
if (event_type == ODP_EVENT_BUFFER) {
@@ -605,7 +609,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
CU_ASSERT(ev != ODP_EVENT_INVALID);
- timer[i] = odp_timer_alloc(timer_pool, queue, NULL);
+ timer[i] = odp_timer_alloc(timer_pool, queue, user_ctx);
CU_ASSERT_FATAL(timer[i] != ODP_TIMER_INVALID);
ret = odp_timer_set_rel(timer[i], (i + 1) * period_tick, &ev);
@@ -620,6 +624,12 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
CU_ASSERT(ret == ODP_TIMER_SUCCESS);
}
+ if (test_print) {
+ printf("\n");
+ odp_timer_pool_print(timer_pool);
+ odp_timer_print(timer[0]);
+ }
+
ev = ODP_EVENT_INVALID;
num_tmo = 0;
t1 = odp_time_local();
@@ -642,6 +652,13 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
CU_ASSERT(odp_event_type(ev) == event_type);
+ if (test_print) {
+ test_print = 0;
+ tmo = odp_timeout_from_event(ev);
+ odp_timeout_print(tmo);
+ printf("\n");
+ }
+
odp_event_free(ev);
num_tmo++;