aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/README6
-rw-r--r--test/common/mask_common.c25
-rw-r--r--test/common/mask_common.h6
-rw-r--r--test/common/odp_cunit_common.c31
-rw-r--r--test/common/odp_cunit_common.h63
-rw-r--r--test/common/packet_common.c6
-rw-r--r--test/common/packet_common.h6
-rw-r--r--test/common/test_common_macros.h6
-rw-r--r--test/common/test_packet_custom.h6
-rw-r--r--test/common/test_packet_ipsec.h8
-rw-r--r--test/common/test_packet_ipv4.h8
-rw-r--r--test/common/test_packet_ipv4_with_crc.h6
-rw-r--r--test/common/test_packet_ipv6.h8
-rw-r--r--test/m4/configure.m41
-rw-r--r--test/miscellaneous/.gitignore1
-rw-r--r--test/miscellaneous/Makefile.am37
-rw-r--r--test/miscellaneous/odp_api_headers.c7
-rw-r--r--test/miscellaneous/odp_dyn_workers.c1357
-rwxr-xr-xtest/miscellaneous/odp_dyn_workers_run.sh23
-rw-r--r--test/performance/.gitignore2
-rw-r--r--test/performance/Makefile.am11
-rw-r--r--test/performance/dummy_crc.h40
-rw-r--r--test/performance/odp_atomic_perf.c7
-rw-r--r--test/performance/odp_bench_buffer.c8
-rw-r--r--test/performance/odp_bench_misc.c6
-rw-r--r--test/performance/odp_bench_packet.c8
-rw-r--r--test/performance/odp_bench_pktio_sp.c4
-rw-r--r--test/performance/odp_bench_timer.c6
-rw-r--r--test/performance/odp_cpu_bench.c10
-rwxr-xr-xtest/performance/odp_cpu_bench_run.sh5
-rw-r--r--test/performance/odp_crc.c7
-rw-r--r--test/performance/odp_crypto.c8
-rwxr-xr-xtest/performance/odp_crypto_run.sh5
-rwxr-xr-xtest/performance/odp_dma_perf_run.sh5
-rwxr-xr-xtest/performance/odp_dmafwd_run.sh1
-rw-r--r--test/performance/odp_ipsec.c10
-rwxr-xr-xtest/performance/odp_ipsec_run.sh5
-rw-r--r--test/performance/odp_l2fwd.c763
-rwxr-xr-xtest/performance/odp_l2fwd_run.sh35
-rw-r--r--test/performance/odp_lock_perf.c7
-rw-r--r--test/performance/odp_mem_perf.c7
-rw-r--r--test/performance/odp_packet_gen.c325
-rwxr-xr-xtest/performance/odp_packet_gen_run.sh6
-rw-r--r--test/performance/odp_pktio_ordered.c22
-rwxr-xr-xtest/performance/odp_pktio_ordered_run.sh17
-rw-r--r--test/performance/odp_pktio_perf.c6
-rw-r--r--test/performance/odp_pool_latency.c81
-rw-r--r--test/performance/odp_pool_perf.c9
-rw-r--r--test/performance/odp_queue_perf.c8
-rw-r--r--test/performance/odp_random.c18
-rw-r--r--test/performance/odp_sched_latency.c8
-rwxr-xr-xtest/performance/odp_sched_latency_run.sh6
-rw-r--r--test/performance/odp_sched_perf.c124
-rwxr-xr-xtest/performance/odp_sched_perf_run.sh62
-rw-r--r--test/performance/odp_sched_pktio.c6
-rwxr-xr-xtest/performance/odp_sched_pktio_run.sh24
-rw-r--r--test/performance/odp_scheduling.c1042
-rwxr-xr-xtest/performance/odp_scheduling_run.sh37
-rw-r--r--test/performance/odp_stress.c131
-rw-r--r--test/performance/odp_timer_accuracy.c1438
-rwxr-xr-xtest/performance/odp_timer_accuracy_run.sh17
-rw-r--r--test/performance/odp_timer_perf.c6
-rwxr-xr-xtest/performance/odp_timer_perf_run.sh6
-rw-r--r--test/validation/api/Makefile.am2
-rw-r--r--test/validation/api/README7
-rw-r--r--test/validation/api/atomic/atomic.c8
-rw-r--r--test/validation/api/barrier/barrier.c10
-rw-r--r--test/validation/api/buffer/buffer.c44
-rw-r--r--test/validation/api/chksum/chksum.c6
-rw-r--r--test/validation/api/classification/classification.c6
-rw-r--r--test/validation/api/classification/classification.h6
-rw-r--r--test/validation/api/classification/odp_classification_basic.c18
-rw-r--r--test/validation/api/classification/odp_classification_common.c8
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c8
-rw-r--r--test/validation/api/classification/odp_classification_tests.c10
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h6
-rw-r--r--test/validation/api/comp/comp.c6
-rw-r--r--test/validation/api/comp/test_vectors.h6
-rw-r--r--test/validation/api/cpu/.gitignore1
-rw-r--r--test/validation/api/cpu/Makefile.am4
-rw-r--r--test/validation/api/cpu/cpu.c461
-rw-r--r--test/validation/api/cpumask/cpumask.c8
-rw-r--r--test/validation/api/crypto/crypto_op_test.c12
-rw-r--r--test/validation/api/crypto/crypto_op_test.h7
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c52
-rw-r--r--test/validation/api/crypto/test_vector_defs.h8
-rw-r--r--test/validation/api/crypto/test_vectors.h8
-rw-r--r--test/validation/api/crypto/test_vectors_len.h7
-rw-r--r--test/validation/api/crypto/util.c140
-rw-r--r--test/validation/api/crypto/util.h8
-rw-r--r--test/validation/api/dma/dma.c45
-rw-r--r--test/validation/api/errno/errno.c10
-rw-r--r--test/validation/api/event/event.c8
-rw-r--r--test/validation/api/hash/hash.c8
-rw-r--r--test/validation/api/ipsec/ipsec.c197
-rw-r--r--test/validation/api/ipsec/ipsec.h10
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c130
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c211
-rw-r--r--test/validation/api/ipsec/reass_test_vectors.c6
-rw-r--r--test/validation/api/ipsec/reass_test_vectors.h6
-rw-r--r--test/validation/api/ipsec/test_vectors.h8
-rw-r--r--test/validation/api/lock/lock.c8
-rw-r--r--test/validation/api/ml/ml.c1
-rw-r--r--test/validation/api/packet/packet.c213
-rw-r--r--test/validation/api/pktio/lso.c12
-rw-r--r--test/validation/api/pktio/lso.h6
-rw-r--r--test/validation/api/pktio/parser.c6
-rw-r--r--test/validation/api/pktio/parser.h6
-rw-r--r--test/validation/api/pktio/pktio.c91
-rw-r--r--test/validation/api/pool/pool.c68
-rw-r--r--test/validation/api/queue/queue.c41
-rw-r--r--test/validation/api/random/random.c8
-rw-r--r--test/validation/api/scheduler/scheduler.c45
-rw-r--r--test/validation/api/scheduler/scheduler_no_predef_groups.c8
-rw-r--r--test/validation/api/shmem/shmem.c59
-rw-r--r--test/validation/api/stash/stash.c31
-rw-r--r--test/validation/api/std/std.c6
-rw-r--r--test/validation/api/system/system.c366
-rw-r--r--test/validation/api/thread/thread.c14
-rw-r--r--test/validation/api/time/time.c199
-rw-r--r--test/validation/api/timer/timer.c36
-rw-r--r--test/validation/api/traffic_mngr/traffic_mngr.c153
122 files changed, 5984 insertions, 2869 deletions
diff --git a/test/README b/test/README
index 4ef634d53..eebde9a47 100644
--- a/test/README
+++ b/test/README
@@ -1,7 +1,5 @@
-Copyright (c) 2014-2018, Linaro Limited
-All rights reserved.
-
-SPDX-License-Identifier: BSD-3-Clause
+SPDX-License-Identifier: BSD-3-Clause
+Copyright (c) 2014-2018 Linaro Limited
Files in test/validation directory are intended to be terse
checks that help ensure that the ODP implementations all perform identically
diff --git a/test/common/mask_common.c b/test/common/mask_common.c
index 65c9c6629..130429c61 100644
--- a/test/common/mask_common.c
+++ b/test/common/mask_common.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#include <odp_api.h>
@@ -123,8 +121,8 @@ MASK_TESTFUNC(to_from_str)
/* check that returned size matches original (with NULL): */
CU_ASSERT(str_sz == (int32_t)stringlen(buf_in) + 1);
- /* check that returned string matches original (with NULL): */
- CU_ASSERT_NSTRING_EQUAL(buf_out, buf_in, stringlen(buf_in) + 1);
+ /* check that returned string matches original: */
+ CU_ASSERT(!strcmp(buf_out, buf_in));
/* check that no extra buffer writes occurred: */
CU_ASSERT(buf_out[stringlen(buf_in) + 2] == FILLING_PATTERN);
@@ -150,8 +148,7 @@ MASK_TESTFUNC(to_from_str)
stringlen(TEST_MASK_0) + 1);
CU_ASSERT(str_sz == (int32_t)stringlen(TEST_MASK_0) + 1);
- CU_ASSERT_NSTRING_EQUAL(buf_out, TEST_MASK_0,
- stringlen(TEST_MASK_0) + 1);
+ CU_ASSERT(!strcmp(buf_out, TEST_MASK_0));
free(buf_out);
free(buf_in);
@@ -167,7 +164,7 @@ MASK_TESTFUNC(equal)
_odp_mask_from_str(&mask2, TEST_MASK_0);
_odp_mask_from_str(&mask3, TEST_MASK_NONE);
CU_ASSERT(_odp_mask_equal(&mask1, &mask2));
- CU_ASSERT_FALSE(_odp_mask_equal(&mask1, &mask3));
+ CU_ASSERT(!_odp_mask_equal(&mask1, &mask3));
if (mask_capacity() < 4)
return;
@@ -176,7 +173,7 @@ MASK_TESTFUNC(equal)
_odp_mask_from_str(&mask2, TEST_MASK_0_2);
_odp_mask_from_str(&mask3, TEST_MASK_1_2);
CU_ASSERT(_odp_mask_equal(&mask1, &mask2));
- CU_ASSERT_FALSE(_odp_mask_equal(&mask1, &mask3));
+ CU_ASSERT(!_odp_mask_equal(&mask1, &mask3));
if (mask_capacity() < 8)
return;
@@ -185,7 +182,7 @@ MASK_TESTFUNC(equal)
_odp_mask_from_str(&mask2, TEST_MASK_0_2_4_6);
_odp_mask_from_str(&mask3, TEST_MASK_1_2_4_6);
CU_ASSERT(_odp_mask_equal(&mask1, &mask2));
- CU_ASSERT_FALSE(_odp_mask_equal(&mask1, &mask3));
+ CU_ASSERT(!_odp_mask_equal(&mask1, &mask3));
}
MASK_TESTFUNC(zero)
@@ -256,16 +253,16 @@ MASK_TESTFUNC(isset)
CU_ASSERT(_odp_mask_isset(&mask1, 0));
_odp_mask_from_str(&mask1, TEST_MASK_NONE);
- CU_ASSERT_FALSE(_odp_mask_isset(&mask1, 0));
+ CU_ASSERT(!_odp_mask_isset(&mask1, 0));
if (mask_capacity() < 4)
return;
_odp_mask_from_str(&mask1, TEST_MASK_0_2);
CU_ASSERT(_odp_mask_isset(&mask1, 0));
- CU_ASSERT_FALSE(_odp_mask_isset(&mask1, 1));
+ CU_ASSERT(!_odp_mask_isset(&mask1, 1));
CU_ASSERT(_odp_mask_isset(&mask1, 2));
- CU_ASSERT_FALSE(_odp_mask_isset(&mask1, 3));
+ CU_ASSERT(!_odp_mask_isset(&mask1, 3));
}
MASK_TESTFUNC(count)
diff --git a/test/common/mask_common.h b/test/common/mask_common.h
index 60c2390b8..a40dd7c04 100644
--- a/test/common/mask_common.h
+++ b/test/common/mask_common.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#ifndef ODP_MASK_COMMON_H_
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 651ae791e..e4e678a54 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
- * Copyright (c) 2021, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
+ * Copyright (c) 2021 Marvell
*/
#ifndef _GNU_SOURCE
@@ -107,10 +105,8 @@ void odp_cu_assert(CU_BOOL value, unsigned int line,
if (idx < MAX_FAILURES) {
assertion_failure_t *a = &thr_global->failure[idx];
- strncpy(a->cond, condition, sizeof(a->cond));
- strncpy(a->file, file, sizeof(a->file));
- a->cond[sizeof(a->cond) - 1] = 0;
- a->file[sizeof(a->file) - 1] = 0;
+ odph_strcpy(a->cond, condition, sizeof(a->cond));
+ odph_strcpy(a->file, file, sizeof(a->file));
a->line = line;
a->fatal = fatal;
}
@@ -244,14 +240,25 @@ int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], in
int odp_cunit_thread_join(int num)
{
+ odph_thread_join_result_t res[num];
+
/* Wait for threads to exit */
- if (odph_thread_join(thread_tbl, num) != num) {
- fprintf(stderr, "error: odph_thread_join() failed.\n");
+ if (odph_thread_join_result(thread_tbl, res, num) != num) {
+ fprintf(stderr, "error: odph_thread_join_result() failed.\n");
return -1;
}
+
threads_running = 0;
thread_func = 0;
+ for (int i = 0; i < num; i++) {
+ if (res[i].is_sig || res[i].ret != 0) {
+ fprintf(stderr, "error: worker thread failure%s: %d.\n", res[i].is_sig ?
+ " (signaled)" : "", res[i].ret);
+ return -1;
+ }
+ }
+
handle_postponed_asserts();
return 0;
diff --git a/test/common/odp_cunit_common.h b/test/common/odp_cunit_common.h
index 63e95d5fb..8a5053589 100644
--- a/test/common/odp_cunit_common.h
+++ b/test/common/odp_cunit_common.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2020-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2020-2022 Nokia
*/
/**
@@ -154,48 +152,33 @@ static inline void odp_cu_assert_fatal(CU_BOOL value, unsigned int line,
#define CU_FAIL_FATAL(msg) \
{ odp_cu_assert_fatal(CU_FALSE, __LINE__, ("CU_FAIL_FATAL(" #msg ")"), __FILE__); }
+#undef CU_ASSERT_TRUE
+#undef CU_ASSERT_TRUE_FATAL
+#undef CU_ASSERT_FALSE
+#undef CU_ASSERT_FALSE_FATAL
#undef CU_ASSERT_EQUAL
-#define CU_ASSERT_EQUAL(actual, expected) \
- { odp_cu_assert(((actual) == (expected)), __LINE__, \
- ("CU_ASSERT_EQUAL(" #actual "," #expected ")"), \
- __FILE__, CU_FALSE); }
-
#undef CU_ASSERT_EQUAL_FATAL
-#define CU_ASSERT_EQUAL_FATAL(actual, expected) \
- { odp_cu_assert_fatal(((actual) == (expected)), __LINE__, \
- ("CU_ASSERT_EQUAL_FATAL(" #actual "," #expected ")"), \
- __FILE__); }
-
#undef CU_ASSERT_NOT_EQUAL
-#define CU_ASSERT_NOT_EQUAL(actual, expected) \
- { odp_cu_assert(((actual) != (expected)), __LINE__, \
- ("CU_ASSERT_NOT_EQUAL(" #actual "," #expected ")"), \
- __FILE__, CU_FALSE); }
-
#undef CU_ASSERT_NOT_EQUAL_FATAL
-#define CU_ASSERT_NOT_EQUAL_FATAL(actual, expected) \
- { odp_cu_assert_fatal(((actual) != (expected)), __LINE__, \
- ("CU_ASSERT_NOT_EQUAL_FATAL(" #actual "," #expected ")"), \
- __FILE__); }
-
+#undef CU_ASSERT_PTR_EQUAL
+#undef CU_ASSERT_PTR_EQUAL_FATAL
+#undef CU_ASSERT_PTR_NOT_EQUAL
+#undef CU_ASSERT_PTR_NOT_EQUAL_FATAL
#undef CU_ASSERT_PTR_NULL
-#define CU_ASSERT_PTR_NULL(value) \
- { odp_cu_assert((NULL == (const void *)(value)), __LINE__, \
- ("CU_ASSERT_PTR_NULL(" #value ")"), __FILE__, CU_FALSE); }
-
#undef CU_ASSERT_PTR_NULL_FATAL
-#define CU_ASSERT_PTR_NULL_FATAL(value) \
- { odp_cu_assert_fatal((NULL == (const void *)(value)), __LINE__, \
- ("CU_ASSERT_PTR_NULL_FATAL(" #value ")"), __FILE__); }
-
#undef CU_ASSERT_PTR_NOT_NULL
-#define CU_ASSERT_PTR_NOT_NULL(value) \
- { odp_cu_assert((NULL != (const void *)(value)), __LINE__, \
- ("CU_ASSERT_PTR_NOT_NULL_FATAL(" #value ")"), __FILE__, CU_FALSE); }
-
#undef CU_ASSERT_PTR_NOT_NULL_FATAL
-#define CU_ASSERT_PTR_NOT_NULL_FATAL(value) \
- { odp_cu_assert_fatal((NULL != (const void *)(value)), __LINE__, \
- ("CU_ASSERT_PTR_NOT_NULL_FATAL(" #value ")"), __FILE__); }
+#undef CU_ASSERT_STRING_EQUAL
+#undef CU_ASSERT_STRING_EQUAL_FATAL
+#undef CU_ASSERT_STRING_NOT_EQUAL
+#undef CU_ASSERT_STRING_NOT_EQUAL_FATAL
+#undef CU_ASSERT_NSTRING_EQUAL
+#undef CU_ASSERT_NSTRING_EQUAL_FATAL
+#undef CU_ASSERT_NSTRING_NOT_EQUAL
+#undef CU_ASSERT_NSTRING_NOT_EQUAL_FATAL
+#undef CU_ASSERT_DOUBLE_EQUAL
+#undef CU_ASSERT_DOUBLE_EQUAL_FATAL
+#undef CU_ASSERT_DOUBLE_NOT_EQUAL
+#undef CU_ASSERT_DOUBLE_NOT_EQUAL_FATAL
#endif /* ODP_CUNICT_COMMON_H */
diff --git a/test/common/packet_common.c b/test/common/packet_common.c
index e0bca3147..a2b960af6 100644
--- a/test/common/packet_common.c
+++ b/test/common/packet_common.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
*/
#include <packet_common.h>
diff --git a/test/common/packet_common.h b/test/common/packet_common.h
index c7cd5e27f..2ffc5eab0 100644
--- a/test/common/packet_common.h
+++ b/test/common/packet_common.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
*/
#include <odp_api.h>
diff --git a/test/common/test_common_macros.h b/test/common/test_common_macros.h
index 344ac8159..405f626e9 100644
--- a/test/common/test_common_macros.h
+++ b/test/common/test_common_macros.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
#ifndef TEST_COMMON_MACROS_H_
diff --git a/test/common/test_packet_custom.h b/test/common/test_packet_custom.h
index 7ff652bd8..5045a14aa 100644
--- a/test/common/test_packet_custom.h
+++ b/test/common/test_packet_custom.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2020-2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2021 Nokia
*/
#ifndef TEST_PACKET_CUSTOM_H_
diff --git a/test/common/test_packet_ipsec.h b/test/common/test_packet_ipsec.h
index 918870c99..9fb6ae2c2 100644
--- a/test/common/test_packet_ipsec.h
+++ b/test/common/test_packet_ipsec.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2021 Nokia
*/
#ifndef TEST_PACKET_IPSEC_H_
diff --git a/test/common/test_packet_ipv4.h b/test/common/test_packet_ipv4.h
index 8dd98d60d..c9c0ef09f 100644
--- a/test/common/test_packet_ipv4.h
+++ b/test/common/test_packet_ipv4.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2021-2022 Nokia
*/
#ifndef TEST_PACKET_IPV4_H_
diff --git a/test/common/test_packet_ipv4_with_crc.h b/test/common/test_packet_ipv4_with_crc.h
index f10c405e1..f0763823b 100644
--- a/test/common/test_packet_ipv4_with_crc.h
+++ b/test/common/test_packet_ipv4_with_crc.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
#ifndef TEST_PACKET_IPV4_WITH_CRC_H_
diff --git a/test/common/test_packet_ipv6.h b/test/common/test_packet_ipv6.h
index 8703aab34..427f7fbdd 100644
--- a/test/common/test_packet_ipv6.h
+++ b/test/common/test_packet_ipv6.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2021 Nokia
*/
#ifndef TEST_PACKET_IPV6_H_
diff --git a/test/m4/configure.m4 b/test/m4/configure.m4
index ea05e954f..97d824c86 100644
--- a/test/m4/configure.m4
+++ b/test/m4/configure.m4
@@ -25,6 +25,7 @@ AC_CONFIG_FILES([test/common/Makefile
test/validation/api/chksum/Makefile
test/validation/api/classification/Makefile
test/validation/api/comp/Makefile
+ test/validation/api/cpu/Makefile
test/validation/api/cpumask/Makefile
test/validation/api/crypto/Makefile
test/validation/api/dma/Makefile
diff --git a/test/miscellaneous/.gitignore b/test/miscellaneous/.gitignore
index 6069e336d..74fb9b3ca 100644
--- a/test/miscellaneous/.gitignore
+++ b/test/miscellaneous/.gitignore
@@ -1,3 +1,4 @@
+odp_dyn_workers
odp_api_from_cpp
odp_api_headers
*.trs
diff --git a/test/miscellaneous/Makefile.am b/test/miscellaneous/Makefile.am
index 95514edcb..449ee7b24 100644
--- a/test/miscellaneous/Makefile.am
+++ b/test/miscellaneous/Makefile.am
@@ -1,12 +1,23 @@
include $(top_srcdir)/test/Makefile.inc
+bin_PROGRAMS = odp_dyn_workers
+
if test_cpp
-bin_PROGRAMS = odp_api_from_cpp
-TESTS = odp_api_from_cpp
+bin_PROGRAMS += odp_api_from_cpp
endif
+odp_dyn_workers_CFLAGS = $(AM_CFLAGS) -Wno-format-nonliteral
+odp_dyn_workers_SOURCES = odp_dyn_workers.c
odp_api_from_cpp_SOURCES = odp_api_from_cpp.cpp
+TESTSCRIPTS = odp_dyn_workers_run.sh
+
+TESTS = $(TESTSCRIPTS)
+
+if test_cpp
+TESTS += odp_api_from_cpp
+endif
+
noinst_PROGRAMS = odp_api_headers
odp_api_headers_CFLAGS = $(AM_CFLAGS) -Wconversion
odp_api_headers_SOURCES = odp_api_headers.c
@@ -43,3 +54,25 @@ endif
endif
DISTCLEANFILES = $(PROGRAM_shared) $(PROGRAM_static)
+
+dist_check_SCRIPTS = $(TESTSCRIPTS)
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS) $(dist_check_DATA); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS) $(dist_check_DATA); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/test/miscellaneous/odp_api_headers.c b/test/miscellaneous/odp_api_headers.c
index 0dd6b0a2e..3884ab307 100644
--- a/test/miscellaneous/odp_api_headers.c
+++ b/test/miscellaneous/odp_api_headers.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2022, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Nokia
*/
#include <odp_api.h>
diff --git a/test/miscellaneous/odp_dyn_workers.c b/test/miscellaneous/odp_dyn_workers.c
new file mode 100644
index 000000000..14cbdaa0d
--- /dev/null
+++ b/test/miscellaneous/odp_dyn_workers.c
@@ -0,0 +1,1357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define S_(x) #x
+#define S(x) S_(x)
+#define MAX_PROGS 8
+#define CMD_DELIMITER ","
+#define PROG_NAME "odp_dyn_workers"
+
+#define FOREACH_CMD(CMD) \
+ CMD(ADD_WORKER) \
+ CMD(REM_WORKER)
+
+#define GENERATE_ENUM(ENUM) ENUM,
+#define GENERATE_STRING(STRING) #STRING,
+#define ADDITION 'a'
+#define REMOVAL 'r'
+#define DELAY 'd'
+#define IDX_DELIMITER ":"
+#define MAX_NIBBLE 15
+#define MAX_WORKERS MAX_NIBBLE
+#define MAX_PATTERN_LEN 32U
+#define ENV_PREFIX "ODP"
+#define ENV_DELIMITER "="
+#define UNKNOWN_CMD MAX_NIBBLE
+#define EXIT_PROG (UNKNOWN_CMD - 1U)
+#define DELAY_PROG (EXIT_PROG - 1U)
+
+ODP_STATIC_ASSERT(MAX_WORKERS <= MAX_NIBBLE, "Too many workers");
+
+enum {
+ FOREACH_CMD(GENERATE_ENUM)
+};
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM
+} parse_result_t;
+
+enum {
+ PARENT,
+ CHILD
+};
+
+typedef enum {
+ DOWN,
+ UP
+} state_t;
+
+enum {
+ CONN_ERR = -1,
+ PEER_ERR,
+ CMD_NOK,
+ CMD_SUMMARY,
+ CMD_OK
+};
+
+static const char *const cmdstrs[] = {
+ FOREACH_CMD(GENERATE_STRING)
+};
+
+ODP_STATIC_ASSERT(ODPH_ARRAY_SIZE(cmdstrs) < DELAY_PROG, "Too many commands");
+
+typedef struct {
+ uint64_t thread_id;
+ uint64_t num_handled;
+ uint64_t enq_errs;
+ uint64_t runtime;
+} summary_t;
+
+typedef struct prog_t {
+ summary_t summary;
+ char *env;
+ char *cpumask;
+ pid_t pid;
+ int socket;
+ state_t state;
+} prog_t;
+
+typedef struct {
+ uint64_t val1;
+ uint8_t val2;
+ uint8_t op;
+} pattern_t;
+
+typedef struct {
+ pattern_t pattern[MAX_PATTERN_LEN];
+ prog_t progs[MAX_PROGS];
+ uint32_t num_p_elems;
+ uint32_t num_progs;
+ uint32_t max_cmd_len;
+ odp_bool_t is_running;
+} global_config_t;
+
+typedef struct worker_config_s worker_config_t;
+
+typedef struct worker_config_s {
+ odph_thread_t thread;
+ odp_barrier_t barrier;
+ summary_t summary;
+ odp_ticketlock_t lock;
+ odp_schedule_group_t grp;
+ odp_queue_t queue;
+ worker_config_t *configs;
+ odp_atomic_u32_t is_running;
+ uint8_t idx;
+} worker_config_t;
+
+typedef struct {
+ worker_config_t worker_config[MAX_WORKERS];
+ odp_instance_t instance;
+ odp_cpumask_t cpumask;
+ odp_pool_t pool;
+ summary_t *pending_summary;
+ uint32_t num_workers;
+ int socket;
+} prog_config_t;
+
+typedef struct {
+ struct {
+ uint16_t is_active;
+ uint16_t cpu;
+ uint32_t thread_id;
+ } workers[MAX_WORKERS];
+} result_t;
+
+typedef odp_bool_t (*input_fn_t)(global_config_t *config, uint8_t *cmd, uint32_t *prog_idx,
+ uint32_t *worker_idx);
+typedef odp_bool_t (*cmd_fn_t)(prog_config_t *config, uint8_t aux);
+
+static global_config_t conf;
+static prog_config_t *prog_conf;
+
+static void terminate(int signal ODP_UNUSED)
+{
+ conf.is_running = false;
+}
+
+static odp_bool_t setup_signals(void)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1)
+ return false;
+
+ return true;
+}
+
+static void init_options(global_config_t *config)
+{
+ uint32_t max_len = 0U, str_len;
+
+ memset(config, 0, sizeof(*config));
+
+ for (uint32_t i = 0U; i < ODPH_ARRAY_SIZE(cmdstrs); ++i) {
+ str_len = strlen(cmdstrs[i]);
+
+ if (str_len > max_len)
+ max_len = str_len;
+ }
+
+ config->max_cmd_len = max_len;
+}
+
+static void parse_masks(global_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp;
+ prog_t *prog;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, CMD_DELIMITER);
+
+ while (tmp && config->num_progs < MAX_PROGS) {
+ prog = &config->progs[config->num_progs];
+ prog->cpumask = strdup(tmp);
+
+ if (prog->cpumask != NULL)
+ ++config->num_progs;
+
+ tmp = strtok(NULL, CMD_DELIMITER);
+ }
+
+ free(tmp_str);
+}
+
+static void parse_pattern(global_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp, op;
+ uint8_t num_elems = 0U;
+ pattern_t *pattern;
+ uint64_t val1;
+ uint32_t val2;
+ int ret;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, CMD_DELIMITER);
+
+ while (tmp && num_elems < MAX_PATTERN_LEN) {
+ pattern = &config->pattern[num_elems];
+ /* Use invalid values to prevent correct values by chance. */
+ val1 = -1;
+ val2 = -1;
+ ret = sscanf(tmp, "%c%" PRIu64 IDX_DELIMITER "%u", &op, &val1, &val2);
+
+ if ((ret == 2 || ret == 3) && (op == ADDITION || op == REMOVAL || op == DELAY)) {
+ pattern->val1 = val1;
+ pattern->val2 = val2;
+ pattern->op = op;
+ ++num_elems;
+ }
+
+ tmp = strtok(NULL, CMD_DELIMITER);
+ }
+
+ free(tmp_str);
+ config->num_p_elems = num_elems;
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Simple interactive ODP dynamic worker tester. Can be used to verify ability of\n"
+ "an implementation to dynamically add and remove workers from one ODP application\n"
+ "to another. Acts as a frontend and forks ODP applications based on\n"
+ "configuration.\n"
+ "\n"
+ "Usage: " PROG_NAME " OPTIONS\n"
+ "\n"
+ " E.g. ODP0=MY_ENV=MY_VAL ODP1=MY_ENV=MY_VAL " PROG_NAME " -c 0x80,0x80\n"
+ " ...\n"
+ " > %s 0 0\n"
+ " > %s 0 0\n"
+ " > %s 1 0\n"
+ " > %s 1 0\n"
+ " ...\n"
+ " " PROG_NAME " -c 0x80,0x80 -p %c0%s0%s%c1000000000%s%c0%s0\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ "\n"
+ " -c, --cpumasks CPU masks for to-be-created ODP processes, comma-separated, no\n"
+ " spaces. CPU mask format should be as expected by\n"
+ " 'odp_cpumask_from_str()'. Parsed amount of CPU masks will be\n"
+ " the number of ODP processes to be created. Theoretical maximum\n"
+ " number of CPU mask entries (and to-be-created ODP processes) is\n"
+ " %u. Theoretical maximum number of workers per ODP process is\n"
+ " %u. These might be further limited by the implementation.\n\n"
+ " A single environment variable can be passed to the processes.\n"
+ " The format should be: 'ODP<x>=<name>=<value>', where <x> is\n"
+ " process index, starting from 0.\n"
+ "\n"
+ "Optional OPTIONS:\n"
+ "\n"
+ " -p, --pattern Non-interactive mode with a pattern of worker additions,\n"
+ " removals and delays, delimited by '%s', no spaces. Additions\n"
+ " are indicated with '%c' prefix, removals with '%c' prefix, both\n"
+ " followed by process index, starting from 0 and worker thread\n"
+ " index within given cpumask delimited by '%s', and delays with\n"
+ " '%c' prefix, followed by a delay in nanoseconds. Process\n"
+ " indexes are based on the parsed process count of '--cpumasks'\n"
+ " option. Additions and removals should be equal in the aggregate\n"
+ " and removals should never outnumber additions at any instant.\n"
+ " Maximum pattern length is %u.\n"
+ " -h, --help This help.\n"
+ "\n", cmdstrs[ADD_WORKER], cmdstrs[REM_WORKER], cmdstrs[ADD_WORKER],
+ cmdstrs[REM_WORKER], ADDITION, IDX_DELIMITER, CMD_DELIMITER, DELAY, CMD_DELIMITER,
+ REMOVAL, IDX_DELIMITER, MAX_PROGS, MAX_WORKERS, CMD_DELIMITER, ADDITION, REMOVAL,
+ IDX_DELIMITER, DELAY, MAX_PATTERN_LEN);
+}
+
+static parse_result_t check_options(const global_config_t *config)
+{
+ const pattern_t *pattern;
+ int64_t num_tot = 0U;
+
+ if (config->num_progs == 0U || config->num_progs > MAX_PROGS) {
+ printf("Invalid number of CPU masks: %u\n", config->num_progs);
+ return PRS_NOK;
+ }
+
+ for (uint32_t i = 0U; i < config->num_p_elems; ++i) {
+ pattern = &config->pattern[i];
+
+ if (pattern->op != DELAY) {
+ if (pattern->val1 >= config->num_progs) {
+ ODPH_ERR("Invalid pattern, invalid process index: %" PRIu64 "\n",
+ pattern->val1);
+ return PRS_NOK;
+ }
+
+ if (pattern->val2 > MAX_WORKERS - 1) {
+ ODPH_ERR("Invalid pattern, invalid worker index: %u\n",
+ pattern->val2);
+ return PRS_NOK;
+ }
+ }
+
+ if (pattern->op == ADDITION)
+ ++num_tot;
+ else if (pattern->op == REMOVAL)
+ --num_tot;
+
+ if (num_tot < 0) {
+ ODPH_ERR("Invalid pattern, removals exceed additions instantaneously\n");
+ return PRS_NOK;
+ }
+ }
+
+ if (num_tot > 0) {
+ ODPH_ERR("Invalid pattern, more additions than removals\n");
+ return PRS_NOK;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, global_config_t *config)
+{
+ int opt, long_index;
+
+ static const struct option longopts[] = {
+ { "cpumasks", required_argument, NULL, 'c' },
+ { "pattern", required_argument, NULL, 'p' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "c:p:h";
+
+ init_options(config);
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ parse_masks(config, optarg);
+ break;
+ case 'p':
+ parse_pattern(config, optarg);
+ break;
+ case 'h':
+ print_usage();
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage();
+ return PRS_NOK;
+ }
+ }
+
+ return check_options(config);
+}
+
+static odp_bool_t setup_pkill(pid_t ppid)
+{
+ return prctl(PR_SET_PDEATHSIG, SIGKILL) != -1 && getppid() == ppid;
+}
+
+ODP_PRINTF_FORMAT(2, 3)
+int log_fn(odp_log_level_t level, const char *fmt, ...);
+
+int log_fn(odp_log_level_t level, const char *fmt, ...)
+{
+ int pri;
+ va_list args;
+
+ switch (level) {
+ case ODP_LOG_DBG:
+ case ODP_LOG_PRINT:
+ pri = LOG_INFO;
+ break;
+ case ODP_LOG_WARN:
+ pri = LOG_WARNING;
+ break;
+ case ODP_LOG_ERR:
+ case ODP_LOG_UNIMPLEMENTED:
+ case ODP_LOG_ABORT:
+ pri = LOG_ERR;
+ break;
+ default:
+ pri = LOG_INFO;
+ break;
+ }
+
+ va_start(args, fmt);
+ vsyslog(pri, fmt, args);
+ va_end(args);
+
+ /* Just return something that's not considered an error. */
+ return 0;
+}
+
+static odp_bool_t disable_stream(int fd, odp_bool_t read)
+{
+ const int null = open("/dev/null", read ? O_RDONLY : O_WRONLY);
+ odp_bool_t ret = false;
+
+ if (null == -1)
+ return ret;
+
+ ret = dup2(null, fd) != -1;
+ close(null);
+
+ return ret;
+}
+
+static odp_bool_t set_odp_env(char *env)
+{
+ char *tmp_str = strdup(env), *tmp;
+ int ret;
+ odp_bool_t func_ret = false;
+
+ if (tmp_str == NULL)
+ return func_ret;
+
+ tmp = strtok(tmp_str, ENV_DELIMITER);
+
+ if (tmp != NULL) {
+ ret = setenv(tmp, strstr(env, ENV_DELIMITER) + 1U, 1);
+
+ if (ret == -1)
+ perror("setenv");
+
+ func_ret = ret != -1;
+ }
+
+ free(tmp_str);
+
+ return func_ret;
+}
+
+static odp_bool_t setup_prog_config(prog_config_t *config, odp_instance_t odp_instance,
+ char *cpumask, int socket)
+{
+ worker_config_t *worker_config;
+ odp_pool_param_t param;
+ odp_pool_t pool;
+
+ memset(config, 0, sizeof(*config));
+
+ for (uint32_t i = 0U; i < MAX_WORKERS; ++i) {
+ worker_config = &config->worker_config[i];
+ worker_config->thread.cpu = -1;
+ odp_ticketlock_init(&worker_config->lock);
+ worker_config->queue = ODP_QUEUE_INVALID;
+ odp_atomic_init_u32(&worker_config->is_running, 0U);
+ }
+
+ config->instance = odp_instance;
+ odp_cpumask_from_str(&config->cpumask, cpumask);
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = 1U;
+ param.buf.size = ODP_CACHE_LINE_SIZE;
+ pool = odp_pool_create(NULL, &param);
+
+ if (pool == ODP_POOL_INVALID) {
+ log_fn(ODP_LOG_ERR, "Error creating process buffer pool\n");
+ return false;
+ }
+
+ config->pool = pool;
+ config->socket = socket;
+
+ return true;
+}
+
+static inline void decode_cmd(uint8_t data, uint8_t *cmd, uint8_t *aux)
+{
+ /* Actual command will be in the high nibble and worker index in the low nibble. */
+ *cmd = data >> 4U;
+ *aux = data & 0xF;
+}
+
+static void build_result(const prog_config_t *config, result_t *result)
+{
+ uint32_t num = 0U;
+ const worker_config_t *worker_config;
+
+ for (uint32_t i = 0U; i < MAX_WORKERS; ++i) {
+ worker_config = &config->worker_config[i];
+
+ if (worker_config->thread.cpu != -1) {
+ result->workers[num].is_active = 1;
+ result->workers[num].thread_id = worker_config->summary.thread_id;
+ result->workers[num].cpu = worker_config->thread.cpu;
+ ++num;
+ }
+ }
+}
+
+static void run_command(cmd_fn_t cmd_fn, uint8_t aux, prog_config_t *config, int socket)
+{
+ const odp_bool_t is_ok = cmd_fn(config, aux);
+ const summary_t *summary = config->pending_summary;
+ uint8_t rep = !is_ok ? CMD_NOK : summary != NULL ? CMD_SUMMARY : CMD_OK;
+ result_t result;
+
+ (void)TEMP_FAILURE_RETRY(send(socket, &rep, sizeof(rep), MSG_NOSIGNAL));
+
+ /* Same machine, no internet in-between, just send the structs as is. */
+ if (rep == CMD_OK) {
+ memset(&result, 0, sizeof(result));
+ build_result(config, &result);
+ (void)TEMP_FAILURE_RETRY(send(socket, (const void *)&result, sizeof(result),
+ MSG_NOSIGNAL));
+ } else if (rep == CMD_SUMMARY) {
+ (void)TEMP_FAILURE_RETRY(send(socket, (const void *)summary, sizeof(*summary),
+ MSG_NOSIGNAL));
+ config->pending_summary = NULL;
+ }
+}
+
+static odp_bool_t setup_worker_config(worker_config_t *config)
+{
+ odp_thrmask_t tmask;
+ odp_schedule_group_t grp;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+
+ odp_thrmask_zero(&tmask);
+ grp = odp_schedule_group_create(NULL, &tmask);
+
+ if (grp == ODP_SCHED_GROUP_INVALID) {
+ log_fn(ODP_LOG_ERR, "Error creating scheduler group\n");
+ return false;
+ }
+
+ config->grp = grp;
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.group = config->grp;
+ queue = odp_queue_create(NULL, &queue_param);
+
+ if (queue == ODP_QUEUE_INVALID) {
+ log_fn(ODP_LOG_ERR, "Error creating queue\n");
+ (void)odp_schedule_group_destroy(config->grp);
+ return false;
+ }
+
+ odp_ticketlock_lock(&config->lock);
+ config->queue = queue;
+ odp_ticketlock_unlock(&config->lock);
+
+ return true;
+}
+
+static inline int get_cpu(odp_cpumask_t *mask, int idx)
+{
+ int cpu = odp_cpumask_first(mask);
+
+ while (idx--) {
+ cpu = odp_cpumask_next(mask, cpu);
+
+ if (cpu < 0)
+ break;
+ }
+
+ return cpu;
+}
+
+static odp_bool_t signal_ready(int socket)
+{
+ uint8_t cmd = CMD_OK;
+ ssize_t ret;
+
+ ret = TEMP_FAILURE_RETRY(send(socket, &cmd, sizeof(cmd), MSG_NOSIGNAL));
+
+ if (ret != 1) {
+ log_fn(ODP_LOG_ERR, "Error signaling process readiness: %s\n", strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+static void enq_to_next_queue(worker_config_t *config, int idx, odp_event_t ev, summary_t *summary)
+{
+ worker_config_t *worker_config;
+ int ret;
+
+ for (uint32_t i = 1U; i <= MAX_WORKERS; ++i) {
+ worker_config = &config[(idx + i) % MAX_WORKERS];
+ odp_ticketlock_lock(&worker_config->lock);
+
+ if (worker_config->queue == ODP_QUEUE_INVALID) {
+ odp_ticketlock_unlock(&worker_config->lock);
+ continue;
+ }
+
+ ret = odp_queue_enq(worker_config->queue, ev);
+ ++summary->num_handled;
+
+ if (ret < 0)
+ ++summary->enq_errs;
+
+ odp_ticketlock_unlock(&worker_config->lock);
+ return;
+ }
+
+ odp_event_free(ev);
+}
+
+static int run_worker(void *args)
+{
+ odp_time_t tm;
+ odp_thrmask_t tmask;
+ const int thread_id = odp_thread_id();
+ worker_config_t *config = args;
+ odp_event_t ev;
+ worker_config_t *configs = config->configs;
+ summary_t *summary = &config->summary;
+ const uint8_t idx = config->idx;
+
+ summary->thread_id = thread_id;
+ tm = odp_time_local_strict();
+ odp_thrmask_zero(&tmask);
+ odp_thrmask_set(&tmask, thread_id);
+
+ if (odp_schedule_group_join(config->grp, &tmask) < 0)
+ /* Log but still continue. */
+ log_fn(ODP_LOG_ERR, "Error joining scheduler group\n");
+
+ odp_barrier_wait(&config->barrier);
+
+ while (odp_atomic_load_u32(&config->is_running)) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ enq_to_next_queue(configs, idx, ev, summary);
+ }
+
+ while (true) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ enq_to_next_queue(configs, idx, ev, summary);
+ }
+
+ summary->runtime = odp_time_diff_ns(odp_time_local_strict(), tm);
+
+ return 0;
+}
+
+static void shutdown_worker(worker_config_t *config)
+{
+ odp_queue_t queue;
+
+ odp_ticketlock_lock(&config->lock);
+ queue = config->queue;
+ config->queue = ODP_QUEUE_INVALID;
+ odp_ticketlock_unlock(&config->lock);
+
+ odp_atomic_store_u32(&config->is_running, 0U);
+ (void)odph_thread_join(&config->thread, 1);
+ (void)odp_queue_destroy(queue);
+ (void)odp_schedule_group_destroy(config->grp);
+}
+
+static odp_bool_t bootstrap_scheduling(worker_config_t *config, odp_pool_t pool)
+{
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ if (buf == ODP_BUFFER_INVALID)
+ /* Event still in circulation. */
+ return true;
+
+ if (odp_queue_enq(config->queue, odp_buffer_to_event(buf)) < 0) {
+ log_fn(ODP_LOG_ERR, "Error enqueueing bootstrap event\n");
+ odp_buffer_free(buf);
+ shutdown_worker(config);
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t add_worker(prog_config_t *config, uint8_t idx)
+{
+ worker_config_t *worker_config;
+ odph_thread_common_param_t thr_common;
+ int set_cpu;
+ odp_cpumask_t cpumask;
+ odph_thread_param_t thr_param;
+
+ if (config->num_workers == MAX_WORKERS) {
+ log_fn(ODP_LOG_WARN, "Maximum number of workers already created\n");
+ return false;
+ }
+
+ if (idx >= MAX_WORKERS) {
+ log_fn(ODP_LOG_ERR, "Worker index out of bounds: %u\n", idx);
+ return false;
+ }
+
+ worker_config = &config->worker_config[idx];
+
+ if (worker_config->thread.cpu != -1) {
+ log_fn(ODP_LOG_WARN, "Worker already created: %u\n", idx);
+ return false;
+ }
+
+ set_cpu = get_cpu(&config->cpumask, idx);
+
+ if (set_cpu < 0) {
+ log_fn(ODP_LOG_ERR, "No CPU found for index: %u\n", idx);
+ return false;
+ }
+
+ memset(&worker_config->summary, 0, sizeof(worker_config->summary));
+
+ if (!setup_worker_config(worker_config))
+ return false;
+
+ worker_config->configs = config->worker_config;
+ worker_config->idx = idx;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->instance;
+
+ odp_cpumask_zero(&cpumask);
+ odp_cpumask_set(&cpumask, set_cpu);
+ thr_common.cpumask = &cpumask;
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_worker;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+ thr_param.arg = worker_config;
+ odp_atomic_store_u32(&worker_config->is_running, 1U);
+ /* Control thread + worker thread = barrier count */
+ odp_barrier_init(&worker_config->barrier, 2);
+
+ if (odph_thread_create(&worker_config->thread, &thr_common, &thr_param, 1) != 1) {
+ log_fn(ODP_LOG_ERR, "Error creating worker\n");
+ (void)odp_queue_destroy(worker_config->queue);
+ (void)odp_schedule_group_destroy(worker_config->grp);
+ return false;
+ }
+
+ odp_barrier_wait(&worker_config->barrier);
+ ++config->num_workers;
+
+ if (config->num_workers == 1U && !bootstrap_scheduling(worker_config, config->pool))
+ return false;
+
+ return true;
+}
+
+static odp_bool_t remove_worker(prog_config_t *config, uint8_t idx)
+{
+ worker_config_t *worker_config;
+
+ if (config->num_workers == 0U) {
+ log_fn(ODP_LOG_WARN, "No more workers to remove\n");
+ return false;
+ }
+
+ if (idx >= MAX_WORKERS) {
+ log_fn(ODP_LOG_ERR, "Worker index out of bounds: %u\n", idx);
+ return false;
+ }
+
+ worker_config = &config->worker_config[idx];
+
+ if (worker_config->thread.cpu == -1) {
+ log_fn(ODP_LOG_WARN, "Worker already removed: %u\n", idx);
+ return false;
+ }
+
+ shutdown_worker(worker_config);
+ --config->num_workers;
+ worker_config->thread.cpu = -1;
+ config->pending_summary = &worker_config->summary;
+
+ return true;
+}
+
+static odp_bool_t do_exit(prog_config_t *config, uint8_t aux ODP_UNUSED)
+{
+ for (uint32_t i = 0U; i < MAX_WORKERS; ++i)
+ remove_worker(config, i);
+
+ return true;
+}
+
+static void run_prog(prog_config_t *config)
+{
+ odp_bool_t is_running = true;
+ int socket = config->socket;
+ ssize_t ret;
+ uint8_t data, cmd, aux;
+
+ while (is_running) {
+ ret = TEMP_FAILURE_RETRY(recv(socket, &data, sizeof(data), 0));
+
+ if (ret != 1)
+ continue;
+
+ decode_cmd(data, &cmd, &aux);
+
+ switch (cmd) {
+ case ADD_WORKER:
+ run_command(add_worker, aux, config, socket);
+ break;
+ case REM_WORKER:
+ run_command(remove_worker, aux, config, socket);
+ break;
+ case EXIT_PROG:
+ run_command(do_exit, aux, config, socket);
+ is_running = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void teardown_prog(prog_config_t *config)
+{
+ (void)odp_pool_destroy(config->pool);
+}
+
+static void run_odp(char *cpumask, int socket)
+{
+ odp_instance_t odp_instance;
+ odp_init_t param;
+ odp_shm_t shm_cfg = ODP_SHM_INVALID;
+
+ odp_init_param_init(&param);
+ param.log_fn = log_fn;
+
+ if (odp_init_global(&odp_instance, &param, NULL)) {
+ log_fn(ODP_LOG_ERR, "ODP global init failed\n");
+ return;
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL)) {
+ log_fn(ODP_LOG_ERR, "ODP local init failed\n");
+ return;
+ }
+
+ shm_cfg = odp_shm_reserve(NULL, sizeof(prog_config_t), ODP_CACHE_LINE_SIZE, 0U);
+
+ if (shm_cfg == ODP_SHM_INVALID) {
+ log_fn(ODP_LOG_ERR, "Error reserving shared memory\n");
+ return;
+ }
+
+ prog_conf = odp_shm_addr(shm_cfg);
+
+ if (prog_conf == NULL) {
+ log_fn(ODP_LOG_ERR, "Error resolving shared memory address\n");
+ return;
+ }
+
+ if (odp_schedule_config(NULL) < 0) {
+ log_fn(ODP_LOG_ERR, "Error configuring scheduler\n");
+ return;
+ }
+
+ if (!setup_prog_config(prog_conf, odp_instance, cpumask, socket))
+ return;
+
+ if (!signal_ready(prog_conf->socket))
+ return;
+
+ run_prog(prog_conf);
+ teardown_prog(prog_conf);
+ (void)odp_shm_free(shm_cfg);
+
+ if (odp_term_local()) {
+ log_fn(ODP_LOG_ERR, "ODP local terminate failed\n");
+ return;
+ }
+
+ if (odp_term_global(odp_instance)) {
+ log_fn(ODP_LOG_ERR, "ODP global terminate failed\n");
+ return;
+ }
+}
+
+static odp_bool_t wait_process_ready(int socket)
+{
+ uint8_t data;
+ ssize_t ret;
+
+ ret = TEMP_FAILURE_RETRY(recv(socket, &data, sizeof(data), 0));
+
+ if (ret <= 0) {
+ if (ret < 0)
+ perror("recv");
+
+ return false;
+ }
+
+ return true;
+}
+
+static inline odp_bool_t is_interactive(const global_config_t *config)
+{
+ return config->num_p_elems == 0U;
+}
+
+static void print_cli_usage(void)
+{
+ printf("\nValid commands are:\n\n");
+
+ for (uint32_t i = 0U; i < ODPH_ARRAY_SIZE(cmdstrs); ++i)
+ printf(" %s <process index> <worker index>\n", cmdstrs[i]);
+
+ printf("\n");
+}
+
+static char *get_format_str(uint32_t max_cmd_len)
+{
+ const int cmd_len = snprintf(NULL, 0U, "%u", max_cmd_len);
+ uint32_t str_len;
+
+ if (cmd_len <= 0)
+ return NULL;
+
+ str_len = strlen("%s %u %u") + cmd_len + 1U;
+
+ char fmt[str_len];
+
+ snprintf(fmt, str_len, "%%%ds %%u %%u", max_cmd_len);
+
+ return strdup(fmt);
+}
+
+static uint8_t map_str_to_command(const char *cmdstr, uint32_t len)
+{
+ for (uint32_t i = 0U; i < ODPH_ARRAY_SIZE(cmdstrs); ++i)
+ if (strncmp(cmdstr, cmdstrs[i], len) == 0)
+ return i;
+
+ return UNKNOWN_CMD;
+}
+
+static odp_bool_t get_stdin_command(global_config_t *config, uint8_t *cmd, uint32_t *prog_idx,
+ uint32_t *worker_idx)
+{
+ char *input, cmdstr[config->max_cmd_len + 1U], *fmt;
+ size_t size;
+ ssize_t ret;
+
+ input = NULL;
+ memset(cmdstr, 0, sizeof(cmdstr));
+ printf("> ");
+ ret = getline(&input, &size, stdin);
+
+ if (ret == -1)
+ return false;
+
+ fmt = get_format_str(config->max_cmd_len);
+
+ if (fmt == NULL) {
+ printf("Unable to parse command\n");
+ return false;
+ }
+
+ ret = sscanf(input, fmt, cmdstr, prog_idx, worker_idx);
+ free(input);
+ free(fmt);
+
+ if (ret == EOF)
+ return false;
+
+ if (ret != 3) {
+ printf("Unable to parse command\n");
+ return false;
+ }
+
+ *cmd = map_str_to_command(cmdstr, config->max_cmd_len);
+ return true;
+}
+
+static uint8_t map_char_to_command(char cmdchar)
+{
+ switch (cmdchar) {
+ case ADDITION:
+ return ADD_WORKER;
+ case REMOVAL:
+ return REM_WORKER;
+ case DELAY:
+ return DELAY_PROG;
+ default:
+ return UNKNOWN_CMD;
+ }
+}
+
+static odp_bool_t get_pattern_command(global_config_t *config, uint8_t *cmd, uint32_t *prog_idx,
+ uint32_t *worker_idx)
+{
+ static uint32_t i;
+ const pattern_t *pattern;
+ struct timespec ts;
+
+ if (i == config->num_p_elems) {
+ config->is_running = false;
+ return false;
+ }
+
+ pattern = &config->pattern[i++];
+ *cmd = map_char_to_command(pattern->op);
+
+ if (*cmd == DELAY_PROG) {
+ ts.tv_sec = pattern->val1 / ODP_TIME_SEC_IN_NS;
+ ts.tv_nsec = pattern->val1 % ODP_TIME_SEC_IN_NS;
+ nanosleep(&ts, NULL);
+ return false;
+ }
+
+ *prog_idx = pattern->val1;
+ *worker_idx = pattern->val2;
+
+ return true;
+}
+
+static inline uint8_t encode_cmd(uint8_t cmd, uint8_t worker_idx)
+{
+ /* Actual command will be in the high nibble and worker index in the low nibble. */
+ cmd <<= 4U;
+ cmd |= worker_idx;
+
+ return cmd;
+}
+
+static odp_bool_t is_peer_down(int error)
+{
+ return error == ECONNRESET || error == EPIPE || error == ETIMEDOUT;
+}
+
+static int send_command(int socket, uint8_t cmd)
+{
+ uint8_t data;
+ ssize_t ret;
+ odp_bool_t is_down;
+
+ ret = TEMP_FAILURE_RETRY(send(socket, &cmd, sizeof(cmd), MSG_NOSIGNAL));
+
+ if (ret != 1) {
+ is_down = is_peer_down(errno);
+ perror("send");
+ return is_down ? PEER_ERR : CONN_ERR;
+ }
+
+ ret = TEMP_FAILURE_RETRY(recv(socket, &data, sizeof(data), 0));
+
+ if (ret <= 0) {
+ is_down = ret == 0 || is_peer_down(errno);
+
+ if (ret < 0)
+ perror("recv");
+
+ return is_down ? PEER_ERR : CONN_ERR;
+ }
+
+ return data;
+}
+
+static odp_bool_t recv_summary(int socket, summary_t *summary)
+{
+ const ssize_t size = sizeof(*summary),
+ ret = TEMP_FAILURE_RETRY(recv(socket, summary, size, 0));
+
+ return ret == size;
+}
+
+static void dump_summary(pid_t pid, const summary_t *summary)
+{
+ printf("\nremoved worker summary:\n"
+ " ODP process ID: %d\n"
+ " thread ID: %" PRIu64 "\n"
+ " events handled: %" PRIu64 "\n"
+ " enqueue errors: %" PRIu64 "\n"
+ " runtime: %" PRIu64 " (ns)\n\n", pid, summary->thread_id,
+ summary->num_handled, summary->enq_errs, summary->runtime);
+}
+
+static odp_bool_t check_summary(const summary_t *summary)
+{
+ if (summary->num_handled == 0U) {
+ printf("Summary check failure: no events handled\n");
+ return false;
+ }
+
+ if (summary->enq_errs > 0U) {
+ printf("Summary check failure: enqueue errors\n");
+ return false;
+ }
+
+ if (summary->runtime == 0U) {
+ printf("Summary check failure: no run time recorded\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void dump_result(int socket, pid_t pid)
+{
+ result_t result;
+ const ssize_t size = sizeof(result),
+ ret = TEMP_FAILURE_RETRY(recv(socket, &result, size, 0));
+
+ if (ret != size)
+ return;
+
+ printf("\nODP process %d:\n"
+ "|\n", pid);
+
+ for (uint32_t i = 0U; i < MAX_WORKERS; i++)
+ if (result.workers[i].is_active)
+ printf("|--- Worker thread ID %u on CPU %u\n",
+ result.workers[i].thread_id, result.workers[i].cpu);
+
+ printf("\n");
+}
+
+static odp_bool_t run_global(global_config_t *config)
+{
+ input_fn_t input_fn;
+ uint32_t prog_idx, worker_idx;
+ uint8_t cmd;
+ prog_t *prog;
+ ssize_t ret;
+ odp_bool_t is_recv, func_ret = true;
+
+ print_cli_usage();
+ input_fn = is_interactive(config) ? get_stdin_command : get_pattern_command;
+ config->is_running = true;
+
+ while (config->is_running) {
+ if (!input_fn(config, &cmd, &prog_idx, &worker_idx))
+ continue;
+
+ if (cmd == UNKNOWN_CMD) {
+ printf("Unrecognized command\n");
+ continue;
+ }
+
+ if (prog_idx >= config->num_progs) {
+ printf("Invalid process index: %u\n", prog_idx);
+ continue;
+ }
+
+ prog = &config->progs[prog_idx];
+
+ if (prog->state == DOWN) {
+ printf("ODP process index %u has already exited\n", prog_idx);
+ continue;
+ }
+
+ ret = send_command(prog->socket, encode_cmd(cmd, worker_idx));
+
+ if (ret == CONN_ERR) {
+ printf("Fatal connection error, aborting\n");
+ abort();
+ }
+
+ if (ret == PEER_ERR) {
+ printf("ODP process index %u has exited\n", prog_idx);
+ prog->state = DOWN;
+ continue;
+ }
+
+ if (ret == CMD_NOK) {
+ printf("ODP process index %u was unable to execute the command\n",
+ prog_idx);
+ continue;
+ }
+
+ if (ret == CMD_SUMMARY) {
+ is_recv = recv_summary(prog->socket, &prog->summary);
+
+ if (is_recv)
+ dump_summary(prog->pid, &prog->summary);
+
+ if (!is_interactive(config) &&
+ !(is_recv && check_summary(&prog->summary))) {
+ config->is_running = false;
+ func_ret = false;
+ }
+
+ continue;
+ }
+
+ if (ret == CMD_OK)
+ dump_result(prog->socket, prog->pid);
+ }
+
+ for (uint32_t i = 0U; i < config->num_progs; ++i) {
+ prog = &config->progs[i];
+
+ if (prog->state == UP) {
+ for (uint32_t j = 0U; j < MAX_WORKERS; ++j) {
+ ret = send_command(prog->socket, encode_cmd(REM_WORKER, j));
+
+ if (ret == CONN_ERR || ret == PEER_ERR)
+ break;
+
+ if (ret != CMD_SUMMARY)
+ continue;
+
+ if (recv_summary(prog->socket, &prog->summary))
+ dump_summary(prog->pid, &prog->summary);
+ }
+
+ (void)send_command(prog->socket, encode_cmd(EXIT_PROG, 0));
+ (void)TEMP_FAILURE_RETRY(waitpid(prog->pid, NULL, 0));
+ }
+ }
+
+ return func_ret;
+}
+
+static void teardown_global(const global_config_t *config)
+{
+ const prog_t *prog;
+
+ for (uint32_t i = 0U; i < config->num_progs; ++i) {
+ prog = &config->progs[i];
+ close(prog->socket);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ parse_result_t res;
+ int ret, func_ret = EXIT_SUCCESS;
+ prog_t *prog;
+ pid_t pid, ppid;
+ const size_t envsize = strlen(ENV_PREFIX S(MAX_PROGS)) + 1U;
+ char *env, prog_env[envsize];
+
+ if (!setup_signals()) {
+ printf("Error setting up signals, exiting\n");
+ return EXIT_FAILURE;
+ }
+
+ res = parse_options(argc, argv, &conf);
+
+ if (res == PRS_NOK)
+ return EXIT_FAILURE;
+
+ if (res == PRS_TERM)
+ return EXIT_SUCCESS;
+
+ printf("*** ODP dynamic worker tester ***\n\n");
+
+ for (uint32_t i = 0U; i < conf.num_progs; ++i) {
+ int sockets[2U];
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sockets);
+
+ if (ret == -1) {
+ perror("socketpair");
+ return EXIT_FAILURE;
+ }
+
+ prog = &conf.progs[i];
+ snprintf(prog_env, envsize, "%s%u", ENV_PREFIX, i);
+ env = getenv(prog_env);
+
+ if (env != NULL)
+ prog->env = strdup(env);
+
+ prog->socket = sockets[PARENT];
+ ppid = getpid();
+ pid = fork();
+
+ if (pid == -1) {
+ perror("fork");
+ return EXIT_FAILURE;
+ }
+
+ if (pid == 0) {
+ close(sockets[PARENT]);
+
+ if (!setup_pkill(ppid)) {
+ log_fn(ODP_LOG_ERR, "Error setting up pdeath signal, exiting\n");
+ return EXIT_FAILURE;
+ }
+
+ if (!disable_stream(STDIN_FILENO, true) ||
+ !disable_stream(STDERR_FILENO, false) ||
+ !disable_stream(STDOUT_FILENO, false)) {
+ log_fn(ODP_LOG_ERR, "Error disabling streams, exiting\n");
+ return EXIT_FAILURE;
+ }
+
+ if (prog->env != NULL && !set_odp_env(prog->env)) {
+ log_fn(ODP_LOG_ERR, "Error setting up environment, exiting\n");
+ return EXIT_FAILURE;
+ }
+
+ run_odp(prog->cpumask, sockets[CHILD]);
+ goto exit;
+ } else {
+ close(sockets[CHILD]);
+ prog->pid = pid;
+
+ if (!wait_process_ready(prog->socket)) {
+ printf("Error launching process: %d, exiting\n", prog->pid);
+ return EXIT_FAILURE;
+ }
+
+ prog->state = UP;
+ printf("Created ODP process, pid: %d, CPU mask: %s, process index: %u\n",
+ prog->pid, prog->cpumask, i);
+ }
+ }
+
+ func_ret = run_global(&conf) ? EXIT_SUCCESS : EXIT_FAILURE;
+ teardown_global(&conf);
+
+exit:
+ return func_ret;
+}
diff --git a/test/miscellaneous/odp_dyn_workers_run.sh b/test/miscellaneous/odp_dyn_workers_run.sh
new file mode 100755
index 000000000..188713d77
--- /dev/null
+++ b/test/miscellaneous/odp_dyn_workers_run.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2024 Nokia
+#
+
+MAX_CPUS=$(nproc)
+# Frontend and control threads on one core and to-be-swapped worker thread on another core,
+# otherwise weird issues might occur
+REQ_CPUS=2
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+BIN=odp_dyn_workers
+DEL=100000000
+
+export ODP0="ODP_PLATFORM_PARAMS=-m 256 --file-prefix odp0 --proc-type auto --no-pci"
+export ODP1="ODP_PLATFORM_PARAMS=-m 256 --file-prefix odp1 --proc-type auto --no-pci"
+
+if [ ${MAX_CPUS} -lt ${REQ_CPUS} ]; then
+ echo "Not enough CPUs (requested ${REQ_CPUS}, available ${MAX_CPUS}). Skipping test."
+ exit 77
+fi
+
+taskset -c 0 ${TEST_DIR}/${BIN}${EXEEXT} -c 0x2,0x2 -p a0:0,d${DEL},r0:0,d${DEL},a1:0,d${DEL},r1:0
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
index 46d9e9c2c..d5ab7df24 100644
--- a/test/performance/.gitignore
+++ b/test/performance/.gitignore
@@ -27,7 +27,7 @@ odp_random
odp_sched_latency
odp_sched_perf
odp_sched_pktio
-odp_scheduling
odp_stash_perf
odp_stress
+odp_timer_accuracy
odp_timer_perf
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index 356e98a2d..8142d5db9 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -30,7 +30,7 @@ COMPILE_ONLY = odp_cpu_bench \
odp_sched_latency \
odp_sched_perf \
odp_sched_pktio \
- odp_scheduling \
+ odp_timer_accuracy \
odp_timer_perf
if LIBCONFIG
@@ -46,7 +46,7 @@ TESTSCRIPTS = odp_cpu_bench_run.sh \
odp_sched_latency_run.sh \
odp_sched_perf_run.sh \
odp_sched_pktio_run.sh \
- odp_scheduling_run.sh \
+ odp_timer_accuracy_run.sh \
odp_timer_perf_run.sh
if ODP_PKTIO_PCAP
@@ -80,7 +80,6 @@ odp_packet_gen_SOURCES = odp_packet_gen.c
odp_pktio_ordered_SOURCES = odp_pktio_ordered.c dummy_crc.h
odp_sched_latency_SOURCES = odp_sched_latency.c
odp_sched_pktio_SOURCES = odp_sched_pktio.c
-odp_scheduling_SOURCES = odp_scheduling.c
odp_pktio_perf_SOURCES = odp_pktio_perf.c
odp_pool_latency_SOURCES = odp_pool_latency.c
odp_pool_perf_SOURCES = odp_pool_perf.c
@@ -88,6 +87,7 @@ odp_queue_perf_SOURCES = odp_queue_perf.c
odp_random_SOURCES = odp_random.c
odp_sched_perf_SOURCES = odp_sched_perf.c
odp_stress_SOURCES = odp_stress.c
+odp_timer_accuracy_SOURCES = odp_timer_accuracy.c
odp_timer_perf_SOURCES = odp_timer_perf.c
if LIBCONFIG
@@ -95,11 +95,6 @@ odp_ipsecfwd_SOURCES = odp_ipsecfwd.c
AM_CFLAGS += $(LIBCONFIG_CFLAGS)
endif
-# l2fwd test depends on generator example
-EXTRA_odp_l2fwd_DEPENDENCIES = $(top_builddir)/example/generator/odp_generator$(EXEEXT)
-$(top_builddir)/example/generator/odp_generator$(EXEEXT):
- $(MAKE) -C $(top_builddir)/example/generator odp_generator$(EXEEXT)
-
dist_check_SCRIPTS = $(TESTSCRIPTS)
dist_check_DATA = udp64.pcap
diff --git a/test/performance/dummy_crc.h b/test/performance/dummy_crc.h
index 01e6c2433..8491b8fdc 100644
--- a/test/performance/dummy_crc.h
+++ b/test/performance/dummy_crc.h
@@ -1,40 +1,8 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018 Linaro Limited
*
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright(c) 2010-2014 Intel Corporation
+ * - lib/hash/rte_crc_sw.h
*/
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/test/performance/odp_atomic_perf.c b/test/performance/odp_atomic_perf.c
index e665081a2..af0a37921 100644
--- a/test/performance/odp_atomic_perf.c
+++ b/test/performance/odp_atomic_perf.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2021, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
/**
diff --git a/test/performance/odp_bench_buffer.c b/test/performance/odp_bench_buffer.c
index ce14ec8b3..838617f78 100644
--- a/test/performance/odp_bench_buffer.c
+++ b/test/performance/odp_bench_buffer.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2022-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2022-2023 Nokia
*/
/**
diff --git a/test/performance/odp_bench_misc.c b/test/performance/odp_bench_misc.c
index 61afdc398..a0e9476e6 100644
--- a/test/performance/odp_bench_misc.c
+++ b/test/performance/odp_bench_misc.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2022-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2023 Nokia
*/
/**
diff --git a/test/performance/odp_bench_packet.c b/test/performance/odp_bench_packet.c
index cb9e3ca03..a8494bd28 100644
--- a/test/performance/odp_bench_packet.c
+++ b/test/performance/odp_bench_packet.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2022-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2022-2023 Nokia
*/
/**
diff --git a/test/performance/odp_bench_pktio_sp.c b/test/performance/odp_bench_pktio_sp.c
index 017e7565f..179db129d 100644
--- a/test/performance/odp_bench_pktio_sp.c
+++ b/test/performance/odp_bench_pktio_sp.c
@@ -824,7 +824,7 @@ static int parse_interface(appl_args_t *appl_args, const char *optarg)
ODPH_ERR("Unable to store interface name (MAX_NAME_LEN=%d)\n", MAX_NAME_LEN);
return -1;
}
- strncpy(appl_args->opt.name, optarg, MAX_NAME_LEN);
+ odph_strcpy(appl_args->opt.name, optarg, MAX_NAME_LEN);
return 0;
}
@@ -849,7 +849,7 @@ static int parse_args(int argc, char *argv[])
static const char *shortopts = "i:m:o:p:q:r:s:t:h";
- strncpy(gbl_args->opt.name, "loop", MAX_NAME_LEN);
+ odph_strcpy(gbl_args->opt.name, "loop", MAX_NAME_LEN);
gbl_args->opt.rounds = ROUNDS;
gbl_args->opt.in_mode = ODP_PKTIN_MODE_DIRECT;
gbl_args->opt.out_mode = ODP_PKTOUT_MODE_DIRECT;
diff --git a/test/performance/odp_bench_timer.c b/test/performance/odp_bench_timer.c
index 65c7a9168..ad80367d1 100644
--- a/test/performance/odp_bench_timer.c
+++ b/test/performance/odp_bench_timer.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
*/
/**
diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c
index 39eff620d..674015d8a 100644
--- a/test/performance/odp_cpu_bench.c
+++ b/test/performance/odp_cpu_bench.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
*/
/**
@@ -192,7 +190,7 @@ static void sig_handler(int signo ODP_UNUSED)
static inline void init_packet(odp_packet_t pkt, uint32_t seq, uint16_t group)
{
- uint32_t *payload;
+ odp_una_u32_t *payload;
test_hdr_t *hdr;
odp_packet_parse_param_t param;
@@ -224,7 +222,7 @@ static inline odp_queue_t work_on_event(odp_event_t event)
odph_udphdr_t *udp_hdr;
test_hdr_t *hdr;
lookup_entry_t *lookup_entry;
- uint32_t *payload;
+ odp_una_u32_t *payload;
uint32_t crc;
uint32_t pkt_len;
uint8_t *data;
diff --git a/test/performance/odp_cpu_bench_run.sh b/test/performance/odp_cpu_bench_run.sh
index c33e0b38e..15be2e729 100755
--- a/test/performance/odp_cpu_bench_run.sh
+++ b/test/performance/odp_cpu_bench_run.sh
@@ -1,9 +1,8 @@
#!/bin/sh
#
-# Copyright (c) 2022, Nokia
-# All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2022 Nokia
#
-# SPDX-License-Identifier: BSD-3-Clause
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
diff --git a/test/performance/odp_crc.c b/test/performance/odp_crc.c
index 89e2e971f..1b631c691 100644
--- a/test/performance/odp_crc.c
+++ b/test/performance/odp_crc.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2021, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
/**
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
index a644da5e1..380e798c9 100644
--- a/test/performance/odp_crypto.c
+++ b/test/performance/odp_crypto.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
*/
/**
diff --git a/test/performance/odp_crypto_run.sh b/test/performance/odp_crypto_run.sh
index f50311ae0..fcb7435fd 100755
--- a/test/performance/odp_crypto_run.sh
+++ b/test/performance/odp_crypto_run.sh
@@ -1,9 +1,8 @@
#!/bin/sh
#
-# Copyright (c) 2022, Nokia
-# All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2022 Nokia
#
-# SPDX-License-Identifier: BSD-3-Clause
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
diff --git a/test/performance/odp_dma_perf_run.sh b/test/performance/odp_dma_perf_run.sh
index 31948e40a..fb7b2bb34 100755
--- a/test/performance/odp_dma_perf_run.sh
+++ b/test/performance/odp_dma_perf_run.sh
@@ -1,9 +1,8 @@
#!/bin/sh
#
-# Copyright (c) 2022-2023, Nokia
-# All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2022-2023 Nokia
#
-# SPDX-License-Identifier: BSD-3-Clause
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
BIN_NAME=odp_dma_perf
diff --git a/test/performance/odp_dmafwd_run.sh b/test/performance/odp_dmafwd_run.sh
index ebb9b153a..38fcc8dc2 100755
--- a/test/performance/odp_dmafwd_run.sh
+++ b/test/performance/odp_dmafwd_run.sh
@@ -2,6 +2,7 @@
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Nokia
+#
TEST_DIR="${TEST_DIR:-$PWD}"
TEST_SRC_DIR=$(dirname $0)
diff --git a/test/performance/odp_ipsec.c b/test/performance/odp_ipsec.c
index 3ea93ec96..58be03dad 100644
--- a/test/performance/odp_ipsec.c
+++ b/test/performance/odp_ipsec.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2022, Marvell
- * Copyright (c) 2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2022 Marvell
+ * Copyright (c) 2022 Nokia
*/
/**
diff --git a/test/performance/odp_ipsec_run.sh b/test/performance/odp_ipsec_run.sh
index 2ddb48d07..f050cb8e0 100755
--- a/test/performance/odp_ipsec_run.sh
+++ b/test/performance/odp_ipsec_run.sh
@@ -1,9 +1,8 @@
#!/bin/sh
#
-# Copyright (c) 2022, Nokia
-# All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2022 Nokia
#
-# SPDX-License-Identifier: BSD-3-Clause
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index b993de4cb..5f3efd464 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -1,15 +1,19 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2024, Nokia
- * Copyright (c) 2020-2021, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
+ * Copyright (c) 2020-2021 Marvell
*/
/**
* @example odp_l2fwd.c
*
- * L2 forwarding example application
+ * This L2 forwarding application can be used as example as well as performance
+ * test for different ODP packet I/O modes (direct, queue or scheduled).
+ *
+ * Note that this example is tuned for performance. As a result, when using
+ * scheduled packet input mode with direct or queued output mode and multiple
+ * output queues, packet order is not guaranteed. To maintain packet order,
+ * use a single worker thread or output interfaces with one output queue.
*
* @cond _ODP_HIDE_FROM_DOXYGEN_
*/
@@ -91,6 +95,9 @@ typedef struct {
/* Some extra features (e.g. error checks) have been enabled */
uint8_t extra_feat;
+ /* Has some state that needs to be maintained across tx and/or rx */
+ uint8_t has_state;
+
/* Prefetch packet data */
uint8_t prefetch;
@@ -133,6 +140,9 @@ typedef struct {
int rx_queues; /* RX queues per interface */
int pool_per_if; /* Create pool per interface */
uint32_t num_pkt; /* Number of packets per pool */
+ int flow_control; /* Flow control mode */
+ bool pause_rx; /* Reception of pause frames enabled */
+ bool pause_tx; /* Transmission of pause frames enabled */
bool vector_mode; /* Vector mode enabled */
uint32_t num_vec; /* Number of vectors per pool */
uint64_t vec_tmo_ns; /* Vector formation timeout in ns */
@@ -144,7 +154,17 @@ typedef struct {
int flow_aware; /* Flow aware scheduling enabled */
uint8_t input_ts; /* Packet input timestamping enabled */
int mtu; /* Interface MTU */
+ int num_om;
int num_prio;
+
+ struct {
+ odp_packet_tx_compl_mode_t mode;
+ uint32_t nth;
+ uint32_t thr_compl_id;
+ uint32_t tot_compl_id;
+ } tx_compl;
+
+ char *output_map[MAX_PKTIOS]; /* Destination port mappings for interfaces */
odp_schedule_prio_t prio[MAX_PKTIOS]; /* Priority of input queues of an interface */
} appl_args_t;
@@ -158,6 +178,10 @@ typedef union ODP_ALIGNED_CACHE {
uint64_t rx_drops;
/* Packets dropped due to transmit error */
uint64_t tx_drops;
+ /* Number of transmit completion start misses (previous incomplete) */
+ uint64_t tx_c_misses;
+ /* Number of transmit completion start failures */
+ uint64_t tx_c_fails;
/* Number of failed packet copies */
uint64_t copy_fails;
/* Dummy sum of packet data */
@@ -167,9 +191,37 @@ typedef union ODP_ALIGNED_CACHE {
uint8_t padding[ODP_CACHE_LINE_SIZE];
} stats_t;
+/* Transmit completion specific state data */
+typedef struct {
+ /* Options that are passed to transmit completion requests */
+ odp_packet_tx_compl_opt_t opt;
+ /* Thread specific initial value for transmit completion IDs */
+ uint32_t init;
+ /* Thread specific maximum value for transmit completion IDs */
+ uint32_t max;
+ /* Next free completion ID to be used for a transmit completion request */
+ uint32_t free_head;
+ /* Next completion ID to be polled for transmit completion readiness */
+ uint32_t poll_head;
+ /* Number of active requests */
+ uint32_t num_act;
+ /* Maximum number of active requests */
+ uint32_t max_act;
+ /* Transmit completion request interval for packets */
+ int interval;
+ /* Next packet in a send burst for which to request transmit completion */
+ int next_req;
+} tx_compl_t;
+
+/* Thread specific state data */
+typedef struct {
+ tx_compl_t tx_compl;
+} state_t;
+
/* Thread specific data */
typedef struct thread_args_t {
stats_t stats;
+ state_t state;
struct {
odp_pktin_queue_t pktin;
@@ -217,6 +269,7 @@ typedef struct {
odp_pktout_queue_t pktout[MAX_QUEUES];
odp_queue_t rx_q[MAX_QUEUES];
odp_queue_t tx_q[MAX_QUEUES];
+ odp_queue_t compl_q;
int num_rx_thr;
int num_tx_thr;
int num_rx_queue;
@@ -251,6 +304,16 @@ static void sig_handler(int signo ODP_UNUSED)
odp_atomic_store_u32(&gbl_args->exit_threads, 1);
}
+static int setup_sig_handler(void)
+{
+ struct sigaction action = { .sa_handler = sig_handler };
+
+ if (sigemptyset(&action.sa_mask) || sigaction(SIGINT, &action, NULL))
+ return -1;
+
+ return 0;
+}
+
/*
* Drop packets which input parsing marked as containing errors.
*
@@ -471,16 +534,122 @@ static inline int process_extra_features(const appl_args_t *appl_args, odp_packe
return pkts;
}
+static inline void handle_tx_event_compl(tx_compl_t *tx_c, odp_packet_t pkts[], int num,
+ int tx_idx, stats_t *stats)
+{
+ odp_packet_t pkt;
+ int next_req = tx_c->next_req;
+ const int interval = tx_c->interval;
+
+ tx_c->opt.queue = gbl_args->pktios[tx_idx].compl_q;
+
+ while (next_req <= num) {
+ pkt = pkts[next_req - 1];
+
+ if (odp_packet_tx_compl_request(pkt, &tx_c->opt) < 0) {
+ stats->s.tx_c_fails++;
+ /* Missed one, try requesting for the first packet of next burst. */
+ next_req = num + 1;
+ break;
+ }
+
+ next_req += interval;
+ }
+
+ tx_c->next_req = next_req - num;
+}
+
+static inline void handle_tx_poll_compl(tx_compl_t *tx_c, odp_packet_t pkts[], int num, int tx_idx,
+ stats_t *stats)
+{
+ uint32_t num_act = tx_c->num_act, poll_head = tx_c->poll_head, free_head = tx_c->free_head;
+ const uint32_t max = tx_c->max, init = tx_c->init, max_act = tx_c->max_act;
+ odp_pktio_t pktio = gbl_args->pktios[tx_idx].pktio;
+ int next_req = tx_c->next_req;
+ odp_packet_t pkt;
+ const int interval = tx_c->interval;
+
+ while (num_act > 0) {
+ if (odp_packet_tx_compl_done(pktio, poll_head) < 1)
+ break;
+
+ --num_act;
+
+ if (++poll_head > max)
+ poll_head = init;
+ }
+
+ while (next_req <= num) {
+ pkt = pkts[next_req - 1];
+
+ if (num_act == max_act) {
+ stats->s.tx_c_misses++;
+ /* Missed one, try requesting for the first packet of next burst. */
+ next_req = num + 1;
+ break;
+ }
+
+ tx_c->opt.compl_id = free_head;
+
+ if (odp_packet_tx_compl_request(pkt, &tx_c->opt) < 0) {
+ stats->s.tx_c_fails++;
+ /* Missed one, try requesting for the first packet of next burst. */
+ next_req = num + 1;
+ break;
+ }
+
+ if (++free_head > max)
+ free_head = init;
+
+ ++num_act;
+ next_req += interval;
+ }
+
+ tx_c->free_head = free_head;
+ tx_c->poll_head = poll_head;
+ tx_c->num_act = num_act;
+ tx_c->next_req = next_req - num;
+}
+
+static inline void handle_tx_state(state_t *state, odp_packet_t pkts[], int num, int tx_idx,
+ stats_t *stats)
+{
+ tx_compl_t *tx_c = &state->tx_compl;
+
+ if (tx_c->opt.mode == ODP_PACKET_TX_COMPL_EVENT)
+ handle_tx_event_compl(tx_c, pkts, num, tx_idx, stats);
+ else if (tx_c->opt.mode == ODP_PACKET_TX_COMPL_POLL)
+ handle_tx_poll_compl(tx_c, pkts, num, tx_idx, stats);
+}
+
+static inline void handle_state_failure(state_t *state, odp_packet_t packet)
+{
+ if (odp_packet_has_tx_compl_request(packet) != 0) {
+ --state->tx_compl.num_act;
+ --state->tx_compl.free_head;
+
+ if (state->tx_compl.free_head == UINT32_MAX ||
+ state->tx_compl.free_head < state->tx_compl.init)
+ state->tx_compl.free_head = state->tx_compl.max;
+ }
+}
+
static inline void send_packets(odp_packet_t *pkt_tbl,
int pkts,
int use_event_queue,
+ int tx_idx,
odp_queue_t tx_queue,
odp_pktout_queue_t pktout_queue,
+ state_t *state,
stats_t *stats)
{
int sent;
unsigned int tx_drops;
int i;
+ odp_packet_t pkt;
+
+ if (odp_unlikely(state != NULL))
+ handle_tx_state(state, pkt_tbl, pkts, tx_idx, stats);
if (odp_unlikely(use_event_queue))
sent = event_queue_send(tx_queue, pkt_tbl, pkts);
@@ -494,13 +663,27 @@ static inline void send_packets(odp_packet_t *pkt_tbl,
stats->s.tx_drops += tx_drops;
/* Drop rejected packets */
- for (i = sent; i < pkts; i++)
- odp_packet_free(pkt_tbl[i]);
+ for (i = sent; i < pkts; i++) {
+ pkt = pkt_tbl[i];
+ handle_state_failure(state, pkt);
+ odp_packet_free(pkt);
+ }
}
stats->s.packets += pkts;
}
+static int handle_rx_state(state_t *state, odp_event_t evs[], int num)
+{
+ if (state->tx_compl.opt.mode != ODP_PACKET_TX_COMPL_EVENT ||
+ odp_event_type(evs[0]) != ODP_EVENT_PACKET_TX_COMPL)
+ return num;
+
+ odp_event_free_multi(evs, num);
+
+ return 0;
+}
+
/*
* Packet IO worker thread using scheduled queues and vector mode.
*
@@ -518,6 +701,7 @@ static int run_worker_sched_mode_vector(void *arg)
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
+ state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
@@ -566,19 +750,23 @@ static int run_worker_sched_mode_vector(void *arg)
for (i = 0; i < events; i++) {
odp_packet_vector_t pkt_vec = ODP_PACKET_VECTOR_INVALID;
- odp_packet_t *pkt_tbl;
+ odp_packet_t *pkt_tbl = NULL;
odp_packet_t pkt;
int src_idx, dst_idx;
- int pkts;
+ int pkts = 0;
if (odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET) {
pkt = odp_packet_from_event(ev_tbl[i]);
pkt_tbl = &pkt;
pkts = 1;
- } else {
- ODPH_ASSERT(odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR);
+ } else if (odp_event_type(ev_tbl[i]) == ODP_EVENT_PACKET_VECTOR) {
pkt_vec = odp_packet_vector_from_event(ev_tbl[i]);
pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
+ } else if (state != NULL) {
+ pkts = handle_rx_state(state, ev_tbl, events);
+
+ if (pkts <= 0)
+ continue;
}
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
@@ -597,11 +785,8 @@ static int run_worker_sched_mode_vector(void *arg)
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- send_packets(pkt_tbl, pkts,
- use_event_queue,
- tx_queue[dst_idx],
- pktout[dst_idx],
- stats);
+ send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue[dst_idx],
+ pktout[dst_idx], state, stats);
if (pkt_vec != ODP_PACKET_VECTOR_INVALID)
odp_packet_vector_free(pkt_vec);
@@ -668,6 +853,7 @@ static int run_worker_sched_mode(void *arg)
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
+ state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
@@ -729,6 +915,13 @@ static int run_worker_sched_mode(void *arg)
if (pkts <= 0)
continue;
+ if (odp_unlikely(state != NULL)) {
+ pkts = handle_rx_state(state, ev_tbl, pkts);
+
+ if (pkts <= 0)
+ continue;
+ }
+
odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts);
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
@@ -744,11 +937,8 @@ static int run_worker_sched_mode(void *arg)
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- send_packets(pkt_tbl, pkts,
- use_event_queue,
- tx_queue[dst_idx],
- pktout[dst_idx],
- stats);
+ send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue[dst_idx],
+ pktout[dst_idx], state, stats);
}
/*
@@ -809,6 +999,7 @@ static int run_worker_plain_queue_mode(void *arg)
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
+ state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
int i;
@@ -857,10 +1048,7 @@ static int run_worker_plain_queue_mode(void *arg)
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- send_packets(pkt_tbl, pkts,
- use_event_queue,
- tx_queue,
- pktout,
+ send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue, pktout, state,
stats);
}
@@ -910,6 +1098,7 @@ static int run_worker_direct_mode(void *arg)
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
+ state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
thr = odp_thread_id();
@@ -953,10 +1142,7 @@ static int run_worker_direct_mode(void *arg)
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- send_packets(pkt_tbl, pkts,
- use_event_queue,
- tx_queue,
- pktout,
+ send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue, pktout, state,
stats);
}
@@ -1036,6 +1222,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
odp_pktio_config_t config;
odp_pktin_queue_param_t pktin_param;
odp_pktout_queue_param_t pktout_param;
+ odp_queue_param_t compl_queue;
odp_pktio_op_mode_t mode_rx;
odp_pktio_op_mode_t mode_tx;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
@@ -1052,6 +1239,12 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (gbl_args->appl.out_mode != PKTOUT_DIRECT)
pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
+ if (num_rx == 0)
+ pktio_param.in_mode = ODP_PKTIN_MODE_DISABLED;
+
+ if (num_tx == 0)
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DISABLED;
+
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
ODPH_ERR("Pktio open failed: %s\n", dev);
@@ -1063,9 +1256,6 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
return -1;
}
- if (gbl_args->appl.verbose)
- odp_pktio_print(pktio);
-
if (odp_pktio_capability(pktio, &pktio_capa)) {
ODPH_ERR("Pktio capability query failed: %s\n", dev);
return -1;
@@ -1091,9 +1281,48 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
config.pktout.bit.tcp_chksum_ena = 1;
}
+ if (gbl_args->appl.tx_compl.mode != ODP_PACKET_TX_COMPL_DISABLED) {
+ if (gbl_args->appl.tx_compl.mode == ODP_PACKET_TX_COMPL_EVENT &&
+ !(pktio_capa.tx_compl.mode_event && pktio_capa.tx_compl.queue_type_sched)) {
+ ODPH_ERR("Transmit event completion not supported: %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.tx_compl.mode == ODP_PACKET_TX_COMPL_POLL &&
+ !(pktio_capa.tx_compl.mode_poll &&
+ pktio_capa.tx_compl.max_compl_id >= gbl_args->appl.tx_compl.tot_compl_id)) {
+ ODPH_ERR("Transmit poll completion not supported: %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.tx_compl.mode == ODP_PACKET_TX_COMPL_EVENT)
+ config.tx_compl.mode_event = 1;
+
+ if (gbl_args->appl.tx_compl.mode == ODP_PACKET_TX_COMPL_POLL) {
+ config.tx_compl.mode_poll = 1;
+ config.tx_compl.max_compl_id = gbl_args->appl.tx_compl.tot_compl_id;
+ }
+ }
+
/* Provide hint to pktio that packet references are not used */
config.pktout.bit.no_packet_refs = 1;
+ if (gbl_args->appl.pause_rx) {
+ if (!pktio_capa.flow_control.pause_rx) {
+ ODPH_ERR("Reception of pause frames not supported: %s\n", dev);
+ return -1;
+ }
+ config.flow_control.pause_rx = ODP_PKTIO_LINK_PAUSE_ON;
+ }
+
+ if (gbl_args->appl.pause_tx) {
+ if (!pktio_capa.flow_control.pause_tx) {
+ ODPH_ERR("Transmission of pause frames not supported: %s\n", dev);
+ return -1;
+ }
+ config.flow_control.pause_tx = ODP_PKTIO_LINK_PAUSE_ON;
+ }
+
odp_pktio_config(pktio, &config);
if (gbl_args->appl.promisc_mode && odp_pktio_promisc_mode(pktio) != 1) {
@@ -1169,6 +1398,20 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
pktin_param.queue_param.sched.prio = prio;
pktin_param.queue_param.sched.sync = sync_mode;
pktin_param.queue_param.sched.group = group;
+
+ if (gbl_args->appl.tx_compl.mode == ODP_PACKET_TX_COMPL_EVENT) {
+ odp_queue_param_init(&compl_queue);
+ compl_queue.type = ODP_QUEUE_TYPE_SCHED;
+ compl_queue.sched.prio = prio;
+ compl_queue.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ compl_queue.sched.group = group;
+ gbl_args->pktios[idx].compl_q = odp_queue_create(NULL, &compl_queue);
+
+ if (gbl_args->pktios[idx].compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Creating completion queue failed: %s\n", dev);
+ return -1;
+ }
+ }
}
if (num_rx > (int)pktio_capa.max_input_queues) {
@@ -1205,37 +1448,45 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
return -1;
}
- if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ if (num_rx > 0 && odp_pktin_queue_config(pktio, &pktin_param)) {
ODPH_ERR("Input queue config failed: %s\n", dev);
return -1;
}
- if (odp_pktout_queue_config(pktio, &pktout_param)) {
+ if (num_tx > 0 && odp_pktout_queue_config(pktio, &pktout_param)) {
ODPH_ERR("Output queue config failed: %s\n", dev);
return -1;
}
- if (gbl_args->appl.in_mode == DIRECT_RECV) {
- if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin, num_rx) != num_rx) {
- ODPH_ERR("Pktin queue query failed: %s\n", dev);
- return -1;
- }
- } else {
- if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].rx_q, num_rx) != num_rx) {
- ODPH_ERR("Pktin event queue query failed: %s\n", dev);
- return -1;
+ if (num_rx > 0) {
+ if (gbl_args->appl.in_mode == DIRECT_RECV) {
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin, num_rx)
+ != num_rx) {
+ ODPH_ERR("Pktin queue query failed: %s\n", dev);
+ return -1;
+ }
+ } else {
+ if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].rx_q, num_rx)
+ != num_rx) {
+ ODPH_ERR("Pktin event queue query failed: %s\n", dev);
+ return -1;
+ }
}
}
- if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
- if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx) != num_tx) {
- ODPH_ERR("Pktout queue query failed: %s\n", dev);
- return -1;
- }
- } else {
- if (odp_pktout_event_queue(pktio, gbl_args->pktios[idx].tx_q, num_tx) != num_tx) {
- ODPH_ERR("Event queue query failed: %s\n", dev);
- return -1;
+ if (num_tx > 0) {
+ if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx)
+ != num_tx) {
+ ODPH_ERR("Pktout queue query failed: %s\n", dev);
+ return -1;
+ }
+ } else {
+ if (odp_pktout_event_queue(pktio, gbl_args->pktios[idx].tx_q, num_tx)
+ != num_tx) {
+ ODPH_ERR("Event queue query failed: %s\n", dev);
+ return -1;
+ }
}
}
@@ -1250,6 +1501,9 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
"%02x:%02x:%02x:%02x:%02x:%02x\n", dev, info.drv_name, num_rx, num_tx,
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ if (gbl_args->appl.verbose)
+ odp_pktio_print(pktio);
+
gbl_args->pktios[idx].num_rx_queue = num_rx;
gbl_args->pktios[idx].num_tx_queue = num_tx;
gbl_args->pktios[idx].pktio = pktio;
@@ -1271,7 +1525,7 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
uint64_t pkts = 0;
uint64_t pkts_prev = 0;
uint64_t pps;
- uint64_t rx_drops, tx_drops, copy_fails;
+ uint64_t rx_drops, tx_drops, tx_c_misses, tx_c_fails, copy_fails;
uint64_t maximum_pps = 0;
int i;
int elapsed = 0;
@@ -1289,6 +1543,8 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
pkts = 0;
rx_drops = 0;
tx_drops = 0;
+ tx_c_misses = 0;
+ tx_c_fails = 0;
copy_fails = 0;
sleep(timeout);
@@ -1297,6 +1553,8 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
pkts += thr_stats[i]->s.packets;
rx_drops += thr_stats[i]->s.rx_drops;
tx_drops += thr_stats[i]->s.tx_drops;
+ tx_c_misses += thr_stats[i]->s.tx_c_misses;
+ tx_c_fails += thr_stats[i]->s.tx_c_fails;
copy_fails += thr_stats[i]->s.copy_fails;
}
if (stats_enabled) {
@@ -1309,6 +1567,10 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
if (gbl_args->appl.packet_copy)
printf("%" PRIu64 " copy fails, ", copy_fails);
+ if (gbl_args->appl.tx_compl.mode != ODP_PACKET_TX_COMPL_DISABLED)
+ printf("%" PRIu64 " tx compl misses, %" PRIu64 " tx compl fails, ",
+ tx_c_misses, tx_c_fails);
+
printf("%" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
rx_drops, tx_drops);
@@ -1358,6 +1620,14 @@ static void print_port_mapping(void)
*/
static int find_dest_port(int port)
{
+ const char *output = gbl_args->appl.output_map[port];
+
+ /* Check output mappings first */
+ if (output != NULL)
+ for (int i = 0; i < gbl_args->appl.if_count; i++)
+ if (strcmp(output, gbl_args->appl.if_names[i]) == 0)
+ return i;
+
/* Even number of ports */
if (gbl_args->appl.if_count % 2 == 0)
return (port % 2 == 0) ? port + 1 : port - 1;
@@ -1522,6 +1792,21 @@ static void bind_queues(void)
printf("\n");
}
+static void init_state(const appl_args_t *args, state_t *state, int thr_idx)
+{
+ const uint32_t cnt = args->tx_compl.thr_compl_id + 1;
+
+ state->tx_compl.opt.mode = args->tx_compl.mode;
+ state->tx_compl.init = thr_idx * cnt;
+ state->tx_compl.max = state->tx_compl.init + cnt - 1;
+ state->tx_compl.free_head = state->tx_compl.init;
+ state->tx_compl.poll_head = state->tx_compl.init;
+ state->tx_compl.num_act = 0;
+ state->tx_compl.max_act = state->tx_compl.max - state->tx_compl.init + 1;
+ state->tx_compl.interval = args->tx_compl.nth;
+ state->tx_compl.next_req = state->tx_compl.interval;
+}
+
static void init_port_lookup_tbl(void)
{
int rx_idx, if_count;
@@ -1560,88 +1845,119 @@ static void usage(char *progname)
" eth2 will send pkts to eth3 and vice versa\n"
"\n"
"Mandatory OPTIONS:\n"
- " -i, --interface <name> Eth interfaces (comma-separated, no spaces)\n"
- " Interface count min 1, max %i\n"
+ " -i, --interface <name> Eth interfaces (comma-separated, no spaces)\n"
+ " Interface count min 1, max %i\n"
"\n"
"Optional OPTIONS:\n"
- " -m, --mode <arg> Packet input mode\n"
- " 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
- " 1: Scheduler mode with parallel queues:\n"
- " PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
- " 2: Scheduler mode with atomic queues:\n"
- " PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n"
- " 3: Scheduler mode with ordered queues:\n"
- " PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n"
- " 4: Plain queue mode: PKTIN_MODE_QUEUE\n"
- " -o, --out_mode <arg> Packet output mode\n"
- " 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
- " 1: Queue mode: PKTOUT_MODE_QUEUE\n"
- " -c, --count <num> CPU count, 0=all available, default=1\n"
- " -t, --time <sec> Time in seconds to run.\n"
- " -a, --accuracy <sec> Time in seconds get print statistics\n"
- " (default is 1 second).\n"
- " -d, --dst_change <arg> 0: Don't change packets' dst eth addresses\n"
- " 1: Change packets' dst eth addresses (default)\n"
- " -s, --src_change <arg> 0: Don't change packets' src eth addresses\n"
- " 1: Change packets' src eth addresses (default)\n"
- " -r, --dst_addr <addr> Destination addresses (comma-separated, no spaces)\n"
- " Requires also the -d flag to be set\n"
- " -e, --error_check <arg> 0: Don't check packet errors (default)\n"
- " 1: Check packet errors\n"
- " -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
- " 1: Use checksum offload\n",
+ " -m, --mode <arg> Packet input mode\n"
+ " 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
+ " 1: Scheduler mode with parallel queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
+ " 2: Scheduler mode with atomic queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n"
+ " 3: Scheduler mode with ordered queues:\n"
+ " PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n"
+ " 4: Plain queue mode: PKTIN_MODE_QUEUE\n"
+ " -o, --out_mode <arg> Packet output mode\n"
+ " 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
+ " 1: Queue mode: PKTOUT_MODE_QUEUE\n"
+ " -O, --output_map <list> List of destination ports for passed interfaces\n"
+ " (comma-separated, no spaces). Ordering follows\n"
+ " the '--interface' option, e.g. passing\n"
+ " '-i eth0,eth1' and '-O eth0,eth1' would result\n"
+ " in eth0 and eth1 looping packets back.\n"
+ " -c, --count <num> CPU count, 0=all available, default=1\n"
+ " -t, --time <sec> Time in seconds to run.\n"
+ " -a, --accuracy <sec> Time in seconds get print statistics\n"
+ " (default is 1 second).\n"
+ " -d, --dst_change <arg> 0: Don't change packets' dst eth addresses\n"
+ " 1: Change packets' dst eth addresses (default)\n"
+ " -s, --src_change <arg> 0: Don't change packets' src eth addresses\n"
+ " 1: Change packets' src eth addresses (default)\n"
+ " -r, --dst_addr <addr> Destination addresses (comma-separated, no\n"
+ " spaces) Requires also the -d flag to be set\n"
+ " -e, --error_check <arg> 0: Don't check packet errors (default)\n"
+ " 1: Check packet errors\n"
+ " -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
+ " 1: Use checksum offload\n",
NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS);
- printf(" -g, --groups <num> Number of new groups to create (1 ... num). Interfaces\n"
- " are placed into the groups in round robin.\n"
- " 0: Use SCHED_GROUP_ALL (default)\n"
- " -1: Use SCHED_GROUP_WORKER\n"
- " -G, --group_mode <arg> Select how threads join new groups (when -g > 0)\n"
- " 0: All threads join all created groups (default)\n"
- " 1: All threads join first N created groups.\n"
- " N is number of interfaces (== active groups).\n"
- " 2: Each thread joins a part of the first N groups\n"
- " (in round robin).\n"
- " -I, --prio <prio list> Schedule priority of packet input queues.\n"
- " Comma separated list of priorities (no spaces). A value\n"
- " per interface. All queues of an interface have the same\n"
- " priority. Values must be between odp_schedule_min_prio\n"
- " and odp_schedule_max_prio. odp_schedule_default_prio is\n"
- " used by default.\n"
- " -b, --burst_rx <num> 0: Use max burst size (default)\n"
- " num: Max number of packets per receive call\n"
- " -q, --rx_queues <num> Number of RX queues per interface in scheduler mode\n"
- " 0: RX queue per worker CPU (default)\n"
- " -p, --packet_copy 0: Don't copy packet (default)\n"
- " 1: Create and send copy of the received packet.\n"
- " Free the original packet.\n"
- " -R, --data_rd <num> Number of packet data words (uint64_t) to read from\n"
- " every received packet. Number of words is rounded down\n"
- " to fit into the first segment of a packet. Default\n"
- " is 0.\n"
- " -y, --pool_per_if Create a packet (and packet vector) pool per interface.\n"
- " 0: Share a single pool between all interfaces (default)\n"
- " 1: Create a pool per interface\n"
- " -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
- " the maximum capability. Use 0 for the default.\n"
- " -u, --vector_mode Enable vector mode.\n"
- " Supported only with scheduler packet input modes (1-3).\n"
- " -w, --num_vec <num> Number of vectors per pool.\n"
- " Default is num_pkts divided by vec_size.\n"
- " -x, --vec_size <num> Vector size (default %i).\n"
- " -z, --vec_tmo_ns <ns> Vector timeout in ns (default %llu ns).\n"
- " -M, --mtu <len> Interface MTU in bytes.\n"
- " -P, --promisc_mode Enable promiscuous mode.\n"
- " -l, --packet_len <len> Maximum length of packets supported (default %d).\n"
- " -L, --seg_len <len> Packet pool segment length\n"
- " (default equal to packet length).\n"
- " -F, --prefetch <num> Prefetch packet data in 64 byte multiples (default 1).\n"
- " -f, --flow_aware Enable flow aware scheduling.\n"
- " -T, --input_ts Enable packet input timestamping.\n"
- " -v, --verbose Verbose output.\n"
- " -V, --verbose_pkt Print debug information on every received packet.\n"
- " -h, --help Display help and exit.\n\n"
- "\n", DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
+ printf(" -g, --groups <num> Number of new groups to create (1 ... num).\n"
+ " Interfaces are placed into the groups in round\n"
+ " robin.\n"
+ " 0: Use SCHED_GROUP_ALL (default)\n"
+ " -1: Use SCHED_GROUP_WORKER\n"
+ " -G, --group_mode <arg> Select how threads join new groups\n"
+ " (when -g > 0)\n"
+ " 0: All threads join all created groups\n"
+ " (default)\n"
+ " 1: All threads join first N created groups.\n"
+ " N is number of interfaces (== active\n"
+ " groups).\n"
+ " 2: Each thread joins a part of the first N\n"
+ " groups (in round robin).\n"
+ " -I, --prio <prio list> Schedule priority of packet input queues.\n"
+ " Comma separated list of priorities (no spaces).\n"
+ " A value per interface. All queues of an\n"
+ " interface have the same priority. Values must\n"
+ " be between odp_schedule_min_prio and\n"
+ " odp_schedule_max_prio.\n"
+ " odp_schedule_default_prio is used by default.\n"
+ " -b, --burst_rx <num> 0: Use max burst size (default)\n"
+ " num: Max number of packets per receive call\n"
+ " -q, --rx_queues <num> Number of RX queues per interface in scheduler\n"
+ " mode\n"
+ " 0: RX queue per worker CPU (default)\n"
+ " -p, --packet_copy 0: Don't copy packet (default)\n"
+ " 1: Create and send copy of the received packet.\n"
+ " Free the original packet.\n"
+ " -R, --data_rd <num> Number of packet data words (uint64_t) to read\n"
+ " from every received packet. Number of words is\n"
+ " rounded down to fit into the first segment of a\n"
+ " packet. Default is 0.\n"
+ " -y, --pool_per_if Create a packet (and packet vector) pool per\n"
+ " interface.\n"
+ " 0: Share a single pool between all interfaces\n"
+ " (default)\n"
+ " 1: Create a pool per interface\n"
+ " -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
+ " the maximum capability. Use 0 for the default.\n"
+ " -u, --vector_mode Enable vector mode.\n"
+ " Supported only with scheduler packet input\n"
+ " modes (1-3).\n"
+ " -w, --num_vec <num> Number of vectors per pool.\n"
+ " Default is num_pkts divided by vec_size.\n"
+ " -x, --vec_size <num> Vector size (default %i).\n"
+ " -z, --vec_tmo_ns <ns> Vector timeout in ns (default %llu ns).\n"
+ " -M, --mtu <len> Interface MTU in bytes.\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
+ " -l, --packet_len <len> Maximum length of packets supported\n"
+ " (default %d).\n"
+ " -L, --seg_len <len> Packet pool segment length\n"
+ " (default equal to packet length).\n"
+ " -F, --prefetch <num> Prefetch packet data in 64 byte multiples\n"
+ " (default 1).\n"
+ " -f, --flow_aware Enable flow aware scheduling.\n"
+ " -T, --input_ts Enable packet input timestamping.\n",
+ DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
+
+ printf(" -C, --tx_compl <mode,n,max_id> Enable transmit completion with a specified\n"
+ " completion mode for nth packet, with maximum\n"
+ " completion ID per worker thread in case of poll\n"
+ " completion (comma-separated, no spaces).\n"
+ " 0: Event completion mode\n"
+ " 1: Poll completion mode\n"
+ " -X, --flow_control <mode> Ethernet flow control mode.\n"
+ " 0: Flow control disabled (default)\n"
+ " 1: Enable reception of pause frames\n"
+ " 2: Enable transmission of pause frames\n"
+ " 3: Enable reception and transmission of pause\n"
+ " frames\n"
+ " -v, --verbose Verbose output.\n"
+ " -V, --verbose_pkt Print debug information on every received\n"
+ " packet.\n"
+ " -h, --help Display help and exit.\n\n"
+ "\n");
}
/*
@@ -1656,7 +1972,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
int opt;
int long_index;
char *token;
- char *tmp_str;
+ char *tmp_str, *tmp;
size_t str_len, len;
int i;
static const struct option longopts[] = {
@@ -1666,6 +1982,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"interface", required_argument, NULL, 'i'},
{"mode", required_argument, NULL, 'm'},
{"out_mode", required_argument, NULL, 'o'},
+ {"output_map", required_argument, NULL, 'O'},
{"dst_addr", required_argument, NULL, 'r'},
{"dst_change", required_argument, NULL, 'd'},
{"src_change", required_argument, NULL, 's'},
@@ -1691,14 +2008,16 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"prefetch", required_argument, NULL, 'F'},
{"flow_aware", no_argument, NULL, 'f'},
{"input_ts", no_argument, NULL, 'T'},
+ {"tx_compl", required_argument, NULL, 'C'},
+ {"flow_control", required_argument, NULL, 'X'},
{"verbose", no_argument, NULL, 'v'},
{"verbose_pkt", no_argument, NULL, 'V'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:"
- "b:q:p:R:y:n:l:L:w:x:z:M:F:uPfTvVh";
+ static const char *shortopts = "+c:t:a:i:m:o:O:r:d:s:e:k:g:G:I:"
+ "b:q:p:R:y:n:l:L:w:x:X:z:M:F:uPfTC:vVh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1729,6 +2048,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->num_prio = 0;
appl_args->prefetch = 1;
appl_args->data_rd = 0;
+ appl_args->flow_control = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1838,6 +2158,40 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
if (i != 0)
appl_args->out_mode = PKTOUT_QUEUE;
break;
+ case 'O':
+ if (strlen(optarg) == 0) {
+ ODPH_ERR("Bad output map string\n");
+ exit(EXIT_FAILURE);
+ }
+
+ tmp_str = strdup(optarg);
+
+ if (tmp_str == NULL) {
+ ODPH_ERR("Output map string duplication failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ token = strtok(tmp_str, ",");
+
+ while (token) {
+ if (appl_args->num_om >= MAX_PKTIOS) {
+ ODPH_ERR("Bad output map element count\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->output_map[appl_args->num_om] = strdup(token);
+
+ if (appl_args->output_map[appl_args->num_om] == NULL) {
+ ODPH_ERR("Output map element duplication failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->num_om++;
+ token = strtok(NULL, ",");
+ }
+
+ free(tmp_str);
+ break;
case 'd':
appl_args->dst_change = atoi(optarg);
break;
@@ -1930,6 +2284,13 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'x':
appl_args->vec_size = atoi(optarg);
break;
+ case 'X':
+ appl_args->flow_control = atoi(optarg);
+ if (appl_args->flow_control == 1 || appl_args->flow_control == 3)
+ appl_args->pause_rx = true;
+ if (appl_args->flow_control == 2 || appl_args->flow_control == 3)
+ appl_args->pause_tx = true;
+ break;
case 'z':
appl_args->vec_tmo_ns = atoi(optarg);
break;
@@ -1942,6 +2303,56 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'T':
appl_args->input_ts = 1;
break;
+ case 'C':
+ if (strlen(optarg) == 0) {
+ ODPH_ERR("Bad transmit completion parameter string\n");
+ exit(EXIT_FAILURE);
+ }
+
+ tmp_str = strdup(optarg);
+
+ if (tmp_str == NULL) {
+ ODPH_ERR("Transmit completion parameter string duplication"
+ " failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ tmp = strtok(tmp_str, ",");
+
+ if (tmp == NULL) {
+ ODPH_ERR("Invalid transmit completion parameter format\n");
+ exit(EXIT_FAILURE);
+ }
+
+ i = atoi(tmp);
+
+ if (i == 0)
+ appl_args->tx_compl.mode = ODP_PACKET_TX_COMPL_EVENT;
+ else if (i == 1)
+ appl_args->tx_compl.mode = ODP_PACKET_TX_COMPL_POLL;
+
+ tmp = strtok(NULL, ",");
+
+ if (tmp == NULL) {
+ ODPH_ERR("Invalid transmit completion parameter format\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->tx_compl.nth = atoi(tmp);
+
+ if (appl_args->tx_compl.mode == ODP_PACKET_TX_COMPL_POLL) {
+ tmp = strtok(NULL, ",");
+
+ if (tmp == NULL) {
+ ODPH_ERR("Invalid transmit completion parameter format\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->tx_compl.thr_compl_id = atoi(tmp);
+ }
+
+ free(tmp_str);
+ break;
case 'v':
appl_args->verbose = 1;
break;
@@ -1962,6 +2373,11 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
exit(EXIT_FAILURE);
}
+ if (appl_args->num_om && appl_args->num_om != appl_args->if_count) {
+ ODPH_ERR("Different number of output mappings and pktio interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
if (appl_args->num_prio && appl_args->num_prio != appl_args->if_count) {
ODPH_ERR("Different number of priorities and pktio interfaces\n");
exit(EXIT_FAILURE);
@@ -1978,6 +2394,23 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
exit(EXIT_FAILURE);
}
+ if (appl_args->tx_compl.mode != ODP_PACKET_TX_COMPL_DISABLED &&
+ appl_args->tx_compl.nth == 0) {
+ ODPH_ERR("Invalid packet interval for transmit completion: %u\n",
+ appl_args->tx_compl.nth);
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->tx_compl.mode == ODP_PACKET_TX_COMPL_EVENT &&
+ (appl_args->in_mode == PLAIN_QUEUE || appl_args->in_mode == DIRECT_RECV)) {
+ ODPH_ERR("Transmit event completion mode not supported with plain queue or direct "
+ "input modes\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->tx_compl.tot_compl_id = (appl_args->tx_compl.thr_compl_id + 1) *
+ appl_args->cpu_count - 1;
+
if (appl_args->burst_rx == 0)
appl_args->burst_rx = MAX_PKT_BURST;
@@ -1986,6 +2419,10 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->packet_copy || appl_args->data_rd || appl_args->verbose_pkt)
appl_args->extra_feat = 1;
+ appl_args->has_state = 0;
+ if (appl_args->tx_compl.mode != ODP_PACKET_TX_COMPL_DISABLED)
+ appl_args->has_state = 1;
+
optind = 1; /* reset 'extern optind' from the getopt lib */
}
@@ -2020,6 +2457,15 @@ static void print_options(void)
else
printf("PKTOUT_DIRECT\n");
+ if (appl_args->num_om > 0) {
+ printf("Output mappings: ");
+
+ for (i = 0; i < appl_args->num_om; ++i)
+ printf(" %s", appl_args->output_map[i]);
+
+ printf("\n");
+ }
+
printf("MTU: ");
if (appl_args->mtu)
printf("%i bytes\n", appl_args->mtu);
@@ -2027,6 +2473,10 @@ static void print_options(void)
printf("interface default\n");
printf("Promisc mode: %s\n", appl_args->promisc_mode ?
"enabled" : "disabled");
+ if (appl_args->flow_control)
+ printf("Flow control: %s%s\n",
+ appl_args->pause_rx ? "rx " : "",
+ appl_args->pause_tx ? "tx" : "");
printf("Flow aware: %s\n", appl_args->flow_aware ?
"yes" : "no");
printf("Input TS: %s\n", appl_args->input_ts ? "yes" : "no");
@@ -2035,12 +2485,13 @@ static void print_options(void)
printf("Number of pools: %i\n", appl_args->pool_per_if ?
appl_args->if_count : 1);
- if (appl_args->extra_feat) {
- printf("Extra features: %s%s%s%s%s\n",
+ if (appl_args->extra_feat || appl_args->has_state) {
+ printf("Extra features: %s%s%s%s%s%s\n",
appl_args->error_check ? "error_check " : "",
appl_args->chksum ? "chksum " : "",
appl_args->packet_copy ? "packet_copy " : "",
appl_args->data_rd ? "data_rd" : "",
+ appl_args->tx_compl.mode != ODP_PACKET_TX_COMPL_DISABLED ? "tx_compl" : "",
appl_args->verbose_pkt ? "verbose_pkt" : "");
}
@@ -2082,7 +2533,11 @@ static void gbl_args_init(args_t *args)
for (queue = 0; queue < MAX_QUEUES; queue++)
args->pktios[pktio].rx_q[queue] = ODP_QUEUE_INVALID;
+
+ args->pktios[pktio].compl_q = ODP_QUEUE_INVALID;
}
+
+ args->appl.tx_compl.mode = ODP_PACKET_TX_COMPL_DISABLED;
}
static void create_groups(int num, odp_schedule_group_t *group)
@@ -2199,9 +2654,10 @@ int main(int argc, char *argv[])
init.mem_model = helper_options.mem_model;
- /* Signal handler has to be registered before global init in case ODP
- * implementation creates internal threads/processes. */
- signal(SIGINT, sig_handler);
+ if (setup_sig_handler()) {
+ ODPH_ERR("Signal handler setup failed\n");
+ exit(EXIT_FAILURE);
+ }
/* Init ODP before calling anything else */
if (odp_init_global(&instance, &init, NULL)) {
@@ -2518,6 +2974,7 @@ int main(int argc, char *argv[])
int num_join;
int mode = gbl_args->appl.group_mode;
+ init_state(&gbl_args->appl, &gbl_args->thread_args[i].state, i);
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = thr_run_func;
thr_param[i].arg = &gbl_args->thread_args[i];
@@ -2584,11 +3041,20 @@ int main(int argc, char *argv[])
if (gbl_args->appl.in_mode != DIRECT_RECV)
odp_barrier_wait(&gbl_args->term_barrier);
+ odph_thread_join_result_t res[num_workers];
+
/* Master thread waits for other threads to exit */
- num_thr = odph_thread_join(gbl_args->thread_tbl, num_workers);
- if (num_thr != num_workers) {
- ODPH_ERR("Worker join failed: %i\n", num_thr);
- exit(EXIT_FAILURE);
+ if (odph_thread_join_result(gbl_args->thread_tbl, res, num_workers) != num_workers) {
+ ODPH_ERR("Worker join failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < num_workers; i++) {
+ if (res[i].is_sig || res[i].ret != 0) {
+ ODPH_ERR("Worker thread failure%s: %d\n", res[i].is_sig ?
+ " (signaled)" : "", res[i].ret);
+ exit(EXIT_FAILURE);
+ }
}
for (i = 0; i < if_count; ++i) {
@@ -2599,6 +3065,9 @@ int main(int argc, char *argv[])
odp_pktio_extra_stats_print(pktio);
}
+ if (gbl_args->pktios[i].compl_q != ODP_QUEUE_INVALID)
+ (void)odp_queue_destroy(gbl_args->pktios[i].compl_q);
+
if (odp_pktio_close(pktio)) {
ODPH_ERR("Pktio close failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
@@ -2607,6 +3076,10 @@ int main(int argc, char *argv[])
free(gbl_args->appl.if_names);
free(gbl_args->appl.if_str);
+
+ for (i = 0; i < gbl_args->appl.num_om; i++)
+ free(gbl_args->appl.output_map[i]);
+
gbl_args = NULL;
odp_mb_full();
diff --git a/test/performance/odp_l2fwd_run.sh b/test/performance/odp_l2fwd_run.sh
index cd750ca35..626b6da72 100755
--- a/test/performance/odp_l2fwd_run.sh
+++ b/test/performance/odp_l2fwd_run.sh
@@ -1,9 +1,7 @@
#!/bin/bash
#
-# Copyright (c) 2015-2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Linaro Limited
#
# TEST_DIR is set by Makefile, when we add a rule to Makefile for odp_l2fwd_run
@@ -25,7 +23,7 @@ TEST_DIR="${TEST_DIR:-$PWD}"
# directory where test sources are, including scripts
TEST_SRC_DIR=$(dirname $0)
-PATH=$TEST_DIR:$TEST_DIR/../../example/generator:$PATH
+PATH=$TEST_DIR:$PATH
# exit codes expected by automake for skipped tests
TEST_SKIPPED=77
@@ -33,8 +31,6 @@ TEST_SKIPPED=77
VALIDATION_TESTDIR=platform/$ODP_PLATFORM/test/validation
PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$VALIDATION_TESTDIR
-FLOOD_MODE=0
-
# Use installed pktio env or for make check take it from platform directory
if [ -f "./pktio_env" ]; then
. ./pktio_env
@@ -60,40 +56,33 @@ run_l2fwd()
exit $TEST_SKIPPED
fi
- type odp_generator > /dev/null
+ type odp_packet_gen > /dev/null
if [ $? -ne 0 ]; then
- echo "odp_generator not installed. Aborting."
+ echo "odp_packet_gen not installed. Aborting."
cleanup_pktio_env
exit 1
fi
- export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="gen" \
+ export ODP_PLATFORM_PARAMS="-m 512 --file-prefix="gen" \
--proc-type auto --no-pci \
--vdev net_pcap0,iface=$IF0"
- # Run generator with one worker
- (odp_generator${EXEEXT} --interval $FLOOD_MODE -I 0 \
- --srcip 192.168.0.1 --dstip 192.168.0.2 \
- -m u -w 1 2>&1 > /dev/null) \
+ # Run odp_packet_gen with one tx thread
+ (odp_packet_gen${EXEEXT} --gap 0 -i 0 \
+ --ipv4_src 192.168.0.1 --ipv4_dst 192.168.0.2 \
+ -r 0 -t 1 2>&1 > /dev/null) \
2>&1 > /dev/null &
GEN_PID=$!
- # this just turns off output buffering so that you still get periodic
- # output while piping to tee, as long as stdbuf is available.
- if [ "$(which stdbuf)" != "" ]; then
- STDBUF="stdbuf -o 0"
- else
- STDBUF=
- fi
LOG=odp_l2fwd_tmp.log
- export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="l2fwd" \
+ export ODP_PLATFORM_PARAMS="-m 512 --file-prefix="l2fwd" \
--proc-type auto --no-pci --vdev net_pcap1,iface=$IF1 \
--vdev net_pcap2,iface=$IF2"
# Max 2 workers
- $STDBUF odp_l2fwd${EXEEXT} -i 0,1 -m 0 -t 5 -c 2 | tee $LOG
+ odp_l2fwd${EXEEXT} -i 0,1 -m 0 -t 5 -c 2 | tee $LOG
ret=${PIPESTATUS[0]}
kill -2 ${GEN_PID}
diff --git a/test/performance/odp_lock_perf.c b/test/performance/odp_lock_perf.c
index 0f78db3b8..43dea0728 100644
--- a/test/performance/odp_lock_perf.c
+++ b/test/performance/odp_lock_perf.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2021, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
/**
diff --git a/test/performance/odp_mem_perf.c b/test/performance/odp_mem_perf.c
index 241128b1f..5a7642a10 100644
--- a/test/performance/odp_mem_perf.c
+++ b/test/performance/odp_mem_perf.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2021, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Nokia
*/
/**
diff --git a/test/performance/odp_packet_gen.c b/test/performance/odp_packet_gen.c
index c88535791..7954a08bb 100644
--- a/test/performance/odp_packet_gen.c
+++ b/test/performance/odp_packet_gen.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2020-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2024 Nokia
*/
/**
@@ -38,8 +36,8 @@
#define MAX_WORKERS (MAX_THREADS - 1)
-/* At least one control and two worker threads */
-ODP_STATIC_ASSERT(MAX_WORKERS >= 2, "Too few threads");
+/* At least one control and one worker thread */
+ODP_STATIC_ASSERT(MAX_WORKERS >= 1, "Too few threads");
/* Maximum number of packet IO interfaces */
#define MAX_PKTIOS 16
@@ -57,10 +55,12 @@ ODP_STATIC_ASSERT(MAX_WORKERS >= 2, "Too few threads");
/* Max retries to generate random data */
#define MAX_RAND_RETRIES 1000
-/* Used don't free */
+/* Use don't free */
#define TX_MODE_DF 0
/* Use static references */
#define TX_MODE_REF 1
+/* Use packet copy */
+#define TX_MODE_COPY 2
/* Minimum number of packets to receive in CI test */
#define MIN_RX_PACKETS_CI 800
@@ -68,6 +68,11 @@ ODP_STATIC_ASSERT(MAX_WORKERS >= 2, "Too few threads");
/* Identifier for payload-timestamped packets */
#define TS_MAGIC 0xff88ee99ddaaccbb
+enum {
+ L4_PROTO_UDP = 0,
+ L4_PROTO_TCP
+};
+
ODP_STATIC_ASSERT(MAX_PKTIOS <= UINT8_MAX, "Interface index must fit into uint8_t\n");
typedef struct test_options_t {
@@ -91,11 +96,12 @@ typedef struct test_options_t {
uint32_t num_vlan;
uint32_t ipv4_src;
uint32_t ipv4_dst;
- uint16_t udp_src;
- uint16_t udp_dst;
+ uint16_t src_port;
+ uint16_t dst_port;
uint32_t wait_sec;
uint32_t wait_start_sec;
uint32_t mtu;
+ uint8_t l4_proto;
int tx_mode;
odp_bool_t promisc_mode;
odp_bool_t calc_latency;
@@ -108,8 +114,8 @@ typedef struct test_options_t {
} vlan[MAX_VLANS];
struct {
- uint32_t udp_src;
- uint32_t udp_dst;
+ uint32_t src_port;
+ uint32_t dst_port;
} c_mode;
char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
@@ -252,15 +258,18 @@ static void print_usage(void)
" num_tx * burst_size * bursts * (10^9 / gap)\n"
" -s, --ipv4_src IPv4 source address. Default: 192.168.0.1\n"
" -d, --ipv4_dst IPv4 destination address. Default: 192.168.0.2\n"
- " -o, --udp_src UDP source port. Default: 10000\n"
- " -p, --udp_dst UDP destination port. Default: 20000\n"
+ " -o, --src_port UDP/TCP source port. Default: 10000\n"
+ " -p, --dst_port UDP/TCP destination port. Default: 20000\n"
+ " -N, --proto L4 protocol. Default: 0\n"
+ " 0: UDP\n"
+ " 1: TCP\n"
" -P, --promisc_mode Enable promiscuous mode.\n"
" -a, --latency Calculate latency. Cannot be used with packet\n"
" references (see \"--tx_mode\").\n"
- " -c, --c_mode <counts> Counter mode for incrementing UDP port numbers.\n"
+ " -c, --c_mode <counts> Counter mode for incrementing UDP/TCP port numbers.\n"
" Specify the number of port numbers used starting from\n"
- " udp_src/udp_dst. Comma-separated (no spaces) list of\n"
- " count values: <udp_src count>,<udp_dst count>\n"
+ " src_port/dst_port. Comma-separated (no spaces) list of\n"
+ " count values: <src_port count>,<dst_port count>\n"
" Default value: 0,0\n"
" -C, --no_udp_checksum Do not calculate UDP checksum. Instead, set it to\n"
" zero in every packet.\n"
@@ -354,7 +363,7 @@ static int init_bins(test_global_t *global)
static int parse_options(int argc, char *argv[], test_global_t *global)
{
- int opt, i, len, str_len, long_index, udp_port;
+ int opt, i, len, str_len, long_index, port;
unsigned long int count;
uint32_t min_packets, num_tx_pkt, num_tx_alloc, pkt_len, val, bins;
char *name, *str, *end;
@@ -368,6 +377,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{"num_rx", required_argument, NULL, 'r'},
{"num_tx", required_argument, NULL, 't'},
{"num_pkt", required_argument, NULL, 'n'},
+ {"proto", required_argument, NULL, 'N'},
{"len", required_argument, NULL, 'l'},
{"len_range", required_argument, NULL, 'L'},
{"direct_rx", required_argument, NULL, 'D'},
@@ -378,8 +388,8 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{"vlan", required_argument, NULL, 'v'},
{"ipv4_src", required_argument, NULL, 's'},
{"ipv4_dst", required_argument, NULL, 'd'},
- {"udp_src", required_argument, NULL, 'o'},
- {"udp_dst", required_argument, NULL, 'p'},
+ {"src_port", required_argument, NULL, 'o'},
+ {"dst_port", required_argument, NULL, 'p'},
{"promisc_mode", no_argument, NULL, 'P'},
{"latency", no_argument, NULL, 'a'},
{"c_mode", required_argument, NULL, 'c'},
@@ -394,7 +404,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+i:e:r:t:n:l:L:D:m:M:b:x:g:v:s:d:o:p:c:CAq:u:w:W:Pah";
+ static const char *shortopts = "+i:e:r:t:n:N:l:L:D:m:M:b:x:g:v:s:d:o:p:c:CAq:u:w:W:Pah";
test_options->num_pktio = 0;
test_options->num_rx = 1;
@@ -412,10 +422,10 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
test_options->calc_latency = 0;
test_options->calc_cs = 1;
test_options->fill_pl = 1;
- strncpy(test_options->ipv4_src_s, "192.168.0.1",
- sizeof(test_options->ipv4_src_s) - 1);
- strncpy(test_options->ipv4_dst_s, "192.168.0.2",
- sizeof(test_options->ipv4_dst_s) - 1);
+ odph_strcpy(test_options->ipv4_src_s, "192.168.0.1",
+ sizeof(test_options->ipv4_src_s));
+ odph_strcpy(test_options->ipv4_dst_s, "192.168.0.2",
+ sizeof(test_options->ipv4_dst_s));
if (odph_ipv4_addr_parse(&test_options->ipv4_src, test_options->ipv4_src_s)) {
ODPH_ERR("Address parse failed\n");
return -1;
@@ -424,15 +434,16 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
ODPH_ERR("Address parse failed\n");
return -1;
}
- test_options->udp_src = 10000;
- test_options->udp_dst = 20000;
- test_options->c_mode.udp_src = 0;
- test_options->c_mode.udp_dst = 0;
+ test_options->src_port = 10000;
+ test_options->dst_port = 20000;
+ test_options->c_mode.src_port = 0;
+ test_options->c_mode.dst_port = 0;
test_options->quit = 0;
test_options->update_msec = 0;
test_options->wait_sec = 0;
test_options->wait_start_sec = 0;
test_options->mtu = 0;
+ test_options->l4_proto = L4_PROTO_UDP;
for (i = 0; i < MAX_PKTIOS; i++) {
memcpy(global->pktio[i].eth_dst.addr, default_eth_dst, 6);
@@ -504,22 +515,22 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
}
break;
case 'o':
- udp_port = atoi(optarg);
- if (udp_port < 0 || udp_port > UINT16_MAX) {
- ODPH_ERR("Error: Bad UDP source port: %d\n", udp_port);
+ port = atoi(optarg);
+ if (port < 0 || port > UINT16_MAX) {
+ ODPH_ERR("Error: Bad source port: %d\n", port);
ret = -1;
break;
}
- test_options->udp_src = udp_port;
+ test_options->src_port = port;
break;
case 'p':
- udp_port = atoi(optarg);
- if (udp_port < 0 || udp_port > UINT16_MAX) {
- ODPH_ERR("Error: Bad UDP destination port: %d\n", udp_port);
+ port = atoi(optarg);
+ if (port < 0 || port > UINT16_MAX) {
+ ODPH_ERR("Error: Bad destination port: %d\n", port);
ret = -1;
break;
}
- test_options->udp_dst = udp_port;
+ test_options->dst_port = port;
break;
case 'P':
test_options->promisc_mode = 1;
@@ -536,6 +547,9 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
case 'n':
test_options->num_pkt = atoi(optarg);
break;
+ case 'N':
+ test_options->l4_proto = atoi(optarg);
+ break;
case 'l':
test_options->pkt_len = atoi(optarg);
break;
@@ -581,8 +595,8 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
ODPH_ERR("Error: Bad IPv4 source address: %s\n", optarg);
ret = -1;
}
- strncpy(test_options->ipv4_src_s, optarg,
- sizeof(test_options->ipv4_src_s) - 1);
+ odph_strcpy(test_options->ipv4_src_s, optarg,
+ sizeof(test_options->ipv4_src_s));
break;
case 'd':
if (odph_ipv4_addr_parse(&test_options->ipv4_dst,
@@ -590,16 +604,16 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
ODPH_ERR("Error: Bad IPv4 destination address: %s\n", optarg);
ret = -1;
}
- strncpy(test_options->ipv4_dst_s, optarg,
- sizeof(test_options->ipv4_dst_s) - 1);
+ odph_strcpy(test_options->ipv4_dst_s, optarg,
+ sizeof(test_options->ipv4_dst_s));
break;
case 'c':
count = strtoul(optarg, &end, 0);
- test_options->c_mode.udp_src = count;
+ test_options->c_mode.src_port = count;
end++;
count = strtoul(end, NULL, 0);
- test_options->c_mode.udp_dst = count;
+ test_options->c_mode.dst_port = count;
break;
case 'C':
test_options->calc_cs = 0;
@@ -637,8 +651,8 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
return -1;
}
- if (test_options->num_rx < 1 || test_options->num_tx < 1) {
- ODPH_ERR("Error: At least one rx and tx thread needed.\n");
+ if (test_options->num_rx < 1 && test_options->num_tx < 1) {
+ ODPH_ERR("Error: At least one rx or tx thread needed.\n");
return -1;
}
@@ -684,6 +698,10 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
ODPH_ERR("Error: Latency test is not supported with packet references (--tx_mode 1)\n");
return -1;
}
+ if (test_options->calc_latency && (test_options->num_rx < 1 || test_options->num_tx < 1)) {
+ ODPH_ERR("Error: Latency test requires both rx and tx threads\n");
+ return -1;
+ }
if (test_options->gap_nsec) {
double gap_hz = 1000000000.0 / test_options->gap_nsec;
@@ -702,17 +720,25 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
ODPH_ERR("\nWARNING: Not enough packets for every packet length bin.\n\n");
}
- if (test_options->c_mode.udp_dst &&
- num_tx_pkt % test_options->c_mode.udp_dst)
- ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by UDP destination port count.\n\n");
+ if (test_options->c_mode.dst_port && num_tx_pkt % test_options->c_mode.dst_port)
+ ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by destination port count.\n\n");
+
+ if (test_options->c_mode.src_port && num_tx_pkt % test_options->c_mode.src_port)
+ ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by source port count.\n\n");
- if (test_options->c_mode.udp_src &&
- num_tx_pkt % test_options->c_mode.udp_src)
- ODPH_ERR("\nWARNING: Transmit packet count is not evenly divisible by UDP source port count.\n\n");
+ if (test_options->l4_proto != L4_PROTO_TCP && test_options->l4_proto != L4_PROTO_UDP) {
+ ODPH_ERR("Error: Invalid L4 protocol: %" PRIu8 "\n", test_options->l4_proto);
+ return -1;
+ }
+ if (test_options->l4_proto == L4_PROTO_TCP && test_options->tx_mode != TX_MODE_COPY) {
+ ODPH_ERR("Error: TCP protocol supported only with copy transmit mode\n");
+ return -1;
+ }
- test_options->hdr_len = ODPH_ETHHDR_LEN +
- (test_options->num_vlan * ODPH_VLANHDR_LEN) +
- ODPH_IPV4HDR_LEN + ODPH_UDPHDR_LEN;
+ test_options->hdr_len = ODPH_ETHHDR_LEN + (test_options->num_vlan * ODPH_VLANHDR_LEN) +
+ ODPH_IPV4HDR_LEN;
+ test_options->hdr_len += test_options->l4_proto == L4_PROTO_UDP ?
+ ODPH_UDPHDR_LEN : ODPH_TCPHDR_LEN;
pkt_len = test_options->use_rand_pkt_len ?
test_options->rand_pkt_len_min : test_options->pkt_len;
@@ -784,8 +810,6 @@ static int open_pktios(test_global_t *global)
uint32_t num_pkt = test_options->num_pkt;
uint32_t pkt_len = test_options->use_rand_pkt_len ?
test_options->rand_pkt_len_max : test_options->pkt_len;
- odp_pktout_queue_t pktout[num_tx];
- odp_pktin_queue_t pktin[num_rx];
printf("\nODP packet generator\n");
printf(" quit test after %" PRIu64 " rounds\n",
@@ -822,10 +846,12 @@ static int open_pktios(test_global_t *global)
}
printf(" IPv4 source %s\n", test_options->ipv4_src_s);
printf(" IPv4 destination %s\n", test_options->ipv4_dst_s);
- printf(" UDP source %u\n", test_options->udp_src);
- printf(" UDP destination %u\n", test_options->udp_dst);
- printf(" UDP src count %u\n", test_options->c_mode.udp_src);
- printf(" UDP dst count %u\n", test_options->c_mode.udp_dst);
+ printf(" L4 protocol: %s\n",
+ test_options->l4_proto == L4_PROTO_UDP ? "UDP" : "TCP");
+ printf(" source port %u\n", test_options->src_port);
+ printf(" destination port %u\n", test_options->dst_port);
+ printf(" src port count %u\n", test_options->c_mode.src_port);
+ printf(" dst port count %u\n", test_options->c_mode.dst_port);
printf(" num pktio %u\n", num_pktio);
printf(" interfaces names: ");
@@ -891,12 +917,11 @@ static int open_pktios(test_global_t *global)
odp_pktio_param_init(&pktio_param);
- if (test_options->direct_rx)
- pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
- else
- pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.in_mode = num_rx ? (test_options->direct_rx ?
+ ODP_PKTIN_MODE_DIRECT : ODP_PKTIN_MODE_SCHED) :
+ ODP_PKTIN_MODE_DISABLED;
- pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ pktio_param.out_mode = num_tx ? ODP_PKTOUT_MODE_DIRECT : ODP_PKTOUT_MODE_DISABLED;
for (i = 0; i < num_pktio; i++)
global->pktio[i].pktio = ODP_PKTIO_INVALID;
@@ -1034,15 +1059,21 @@ static int open_pktios(test_global_t *global)
return -1;
}
- if (odp_pktout_queue(pktio, pktout, num_tx) != num_tx) {
- ODPH_ERR("Error (%s): Pktout queue request failed.\n", name);
- return -1;
+ if (num_tx > 0) {
+ odp_pktout_queue_t pktout[MAX_THREADS];
+
+ if (odp_pktout_queue(pktio, pktout, num_tx) != num_tx) {
+ ODPH_ERR("Error (%s): Pktout queue request failed.\n", name);
+ return -1;
+ }
+
+ for (j = 0; j < num_tx; j++)
+ global->pktio[i].pktout[j] = pktout[j];
}
- for (j = 0; j < num_tx; j++)
- global->pktio[i].pktout[j] = pktout[j];
+ if (num_rx > 0 && test_options->direct_rx) {
+ odp_pktin_queue_t pktin[MAX_THREADS];
- if (test_options->direct_rx) {
if (odp_pktin_queue(pktio, pktin, num_rx) != num_rx) {
ODPH_ERR("Error (%s): Pktin queue request failed.\n", name);
return -1;
@@ -1082,6 +1113,7 @@ static int print_link_info(odp_pktio_t pktio)
return 0;
}
+
static int start_pktios(test_global_t *global)
{
uint32_t i;
@@ -1386,15 +1418,16 @@ static int init_packets(test_global_t *global, int pktio,
uint8_t *u8;
odph_ethhdr_t *eth;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
uint16_t tpid;
test_options_t *test_options = &global->test_options;
+ const odp_bool_t use_tcp = test_options->l4_proto == L4_PROTO_TCP;
uint32_t num_vlan = test_options->num_vlan;
uint32_t hdr_len = test_options->hdr_len;
- uint16_t udp_src = test_options->udp_src;
- uint16_t udp_dst = test_options->udp_dst;
- uint32_t udp_src_cnt = 0;
- uint32_t udp_dst_cnt = 0;
+ uint16_t src_port = test_options->src_port;
+ uint16_t dst_port = test_options->dst_port;
+ uint32_t src_cnt = 0;
+ uint32_t dst_cnt = 0;
+ uint32_t tcp_seqnum = 0x1234;
odph_vlanhdr_t *vlan = NULL; /* Fixes bogus compiler warning */
if (num_vlan > MAX_VLANS)
@@ -1446,56 +1479,75 @@ static int init_packets(test_global_t *global, int pktio,
ip->tot_len = odp_cpu_to_be_16(pkt_len - l2_len);
ip->id = odp_cpu_to_be_16(seq + i);
ip->ttl = 64;
- ip->proto = ODPH_IPPROTO_UDP;
+ ip->proto = use_tcp ? ODPH_IPPROTO_TCP : ODPH_IPPROTO_UDP;
ip->src_addr = odp_cpu_to_be_32(test_options->ipv4_src);
ip->dst_addr = odp_cpu_to_be_32(test_options->ipv4_dst);
ip->chksum = ~odp_chksum_ones_comp16(ip, ODPH_IPV4HDR_LEN);
- /* UDP */
- udp = (odph_udphdr_t *)((uint8_t *)data + l2_len +
- ODPH_IPV4HDR_LEN);
- memset(udp, 0, ODPH_UDPHDR_LEN);
- udp->src_port = odp_cpu_to_be_16(udp_src);
- udp->dst_port = odp_cpu_to_be_16(udp_dst);
- udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
- udp->chksum = 0;
+ u8 = ((uint8_t *)data + l2_len + ODPH_IPV4HDR_LEN);
+
+ if (use_tcp) {
+ odph_tcphdr_t *tcp = (odph_tcphdr_t *)u8;
+
+ memset(tcp, 0, ODPH_TCPHDR_LEN);
+ tcp->src_port = odp_cpu_to_be_16(src_port);
+ tcp->dst_port = odp_cpu_to_be_16(dst_port);
+ tcp->seq_no = odp_cpu_to_be_32(tcp_seqnum);
+ tcp->ack_no = odp_cpu_to_be_32(0x12345678);
+ tcp->window = odp_cpu_to_be_16(0x4000);
+ tcp->hl = 5;
+ tcp->ack = 1;
+ tcp_seqnum += payload_len;
+ } else {
+ odph_udphdr_t *udp = (odph_udphdr_t *)u8;
+
+ memset(udp, 0, ODPH_UDPHDR_LEN);
+ udp->src_port = odp_cpu_to_be_16(src_port);
+ udp->dst_port = odp_cpu_to_be_16(dst_port);
+ udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
+ udp->chksum = 0;
+ }
u8 = data;
u8 += hdr_len;
if (test_options->fill_pl) {
- /* Init UDP payload until the end of the first segment */
+ /* Init payload until the end of the first segment */
for (j = 0; j < seg_len - hdr_len; j++)
u8[j] = j;
}
- /* Insert UDP checksum */
+ /* Insert checksum */
odp_packet_l3_offset_set(pkt, l2_len);
odp_packet_l4_offset_set(pkt, l2_len + ODPH_IPV4HDR_LEN);
odp_packet_has_eth_set(pkt, 1);
odp_packet_has_ipv4_set(pkt, 1);
- odp_packet_has_udp_set(pkt, 1);
-
- udp->chksum = !test_options->calc_latency && test_options->calc_cs ?
- odph_ipv4_udp_chksum(pkt) : 0;
+ if (use_tcp) {
+ odp_packet_has_tcp_set(pkt, 1);
+ /* TCP checksum is always updated before TX */
+ } else {
+ odp_packet_has_udp_set(pkt, 1);
+ if (!test_options->calc_latency && test_options->calc_cs)
+ odph_udp_chksum_set(pkt);
+ }
/* Increment port numbers */
- if (test_options->c_mode.udp_src) {
- udp_src_cnt++;
- if (udp_src_cnt < test_options->c_mode.udp_src) {
- udp_src++;
+ if (test_options->c_mode.src_port) {
+ src_cnt++;
+ if (src_cnt < test_options->c_mode.src_port) {
+ src_port++;
} else {
- udp_src = test_options->udp_src;
- udp_src_cnt = 0;
+ src_port = test_options->src_port;
+ src_cnt = 0;
}
}
- if (test_options->c_mode.udp_dst) {
- udp_dst_cnt++;
- if (udp_dst_cnt < test_options->c_mode.udp_dst) {
- udp_dst++;
+ if (test_options->c_mode.dst_port) {
+ dst_cnt++;
+ if (dst_cnt < test_options->c_mode.dst_port) {
+ dst_port++;
} else {
- udp_dst = test_options->udp_dst;
- udp_dst_cnt = 0;
+ dst_port = test_options->dst_port;
+ dst_cnt = 0;
}
}
}
@@ -1503,6 +1555,20 @@ static int init_packets(test_global_t *global, int pktio,
return 0;
}
+static inline void update_tcp_hdr(odp_packet_t pkt, odp_packet_t base_pkt, uint32_t hdr_len)
+{
+ odph_tcphdr_t *tcp = odp_packet_l4_ptr(pkt, NULL);
+ odph_tcphdr_t *tcp_base = odp_packet_l4_ptr(base_pkt, NULL);
+ uint32_t prev_seqnum = odp_be_to_cpu_32(tcp_base->seq_no);
+
+ tcp->seq_no = odp_cpu_to_be_32(prev_seqnum + (odp_packet_len(pkt) - hdr_len));
+
+ /* Last used sequence number is stored in the base packet */
+ tcp_base->seq_no = tcp->seq_no;
+
+ odph_tcp_chksum_set(pkt);
+}
+
static inline int update_rand_data(uint8_t *data, uint32_t data_len)
{
uint32_t generated = 0;
@@ -1528,13 +1594,11 @@ static inline int update_rand_data(uint8_t *data, uint32_t data_len)
return 0;
}
-static inline void set_timestamp(odp_packet_t pkt, uint32_t ts_off, odp_bool_t calc_cs)
+static inline void set_timestamp(odp_packet_t pkt, uint32_t ts_off)
{
const ts_data_t ts_data = { .magic = TS_MAGIC, .tx_ts = odp_time_global_ns() };
- odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
(void)odp_packet_copy_from_mem(pkt, ts_off, sizeof(ts_data), &ts_data);
- udp->chksum = calc_cs ? odph_ipv4_udp_chksum(pkt) : 0;
}
static int alloc_packets(odp_pool_t pool, odp_packet_t *pkt_tbl, uint32_t num,
@@ -1570,8 +1634,8 @@ static int alloc_packets(odp_pool_t pool, odp_packet_t *pkt_tbl, uint32_t num,
static inline uint32_t form_burst(odp_packet_t out_pkt[], uint32_t burst_size, uint32_t num_bins,
uint32_t burst, odp_packet_t *pkt_tbl, odp_pool_t pool,
- int tx_mode, uint32_t ts_off, odp_bool_t calc_cs,
- uint64_t *total_bytes)
+ int tx_mode, odp_bool_t calc_latency, uint32_t hdr_len,
+ odp_bool_t calc_udp_cs, uint64_t *total_bytes, uint8_t l4_proto)
{
uint32_t i, idx;
odp_packet_t pkt;
@@ -1615,8 +1679,13 @@ static inline uint32_t form_burst(odp_packet_t out_pkt[], uint32_t burst_size, u
if (odp_unlikely(out_pkt[i] == ODP_PACKET_INVALID))
break;
- if (ts_off)
- set_timestamp(out_pkt[i], ts_off, calc_cs);
+ if (calc_latency)
+ set_timestamp(out_pkt[i], hdr_len);
+
+ if (l4_proto == L4_PROTO_TCP)
+ update_tcp_hdr(out_pkt[i], pkt, hdr_len);
+ else if (calc_latency && calc_udp_cs)
+ odph_udp_chksum_set(out_pkt[i]);
}
bytes += odp_packet_len(out_pkt[i]);
@@ -1675,16 +1744,19 @@ static int tx_thread(void *arg)
uint64_t tx_packets = 0;
uint64_t tx_drops = 0;
int ret = 0;
+ const uint32_t hdr_len = test_options->hdr_len;
const uint32_t burst_size = test_options->burst_size;
const uint32_t bursts = test_options->bursts;
const uint32_t num_tx = test_options->num_tx;
+ const uint8_t l4_proto = test_options->l4_proto;
const int tx_mode = test_options->tx_mode;
- odp_bool_t calc_cs = test_options->calc_cs;
+ const odp_bool_t calc_cs = test_options->calc_cs;
+ const odp_bool_t calc_latency = test_options->calc_latency;
int num_pktio = test_options->num_pktio;
odp_pktout_queue_t pktout[num_pktio];
- uint32_t ts_off = test_options->calc_latency ? test_options->hdr_len : 0;
uint32_t tot_packets = 0;
uint32_t num_bins = global->num_bins;
+
thr = odp_thread_id();
tx_thr = thread_arg->tx_thr;
global->stat[thr].thread_type = TX_THREAD;
@@ -1754,7 +1826,8 @@ static int tx_thread(void *arg)
for (j = 0; j < bursts; j++) {
num = form_burst(pkt, burst_size, num_bins, j, pkt_tbl, pool,
- tx_mode, ts_off, calc_cs, &total_bytes);
+ tx_mode, calc_latency, hdr_len, calc_cs,
+ &total_bytes, l4_proto);
if (odp_unlikely(num == 0)) {
ret = -1;
@@ -1777,7 +1850,6 @@ static int tx_thread(void *arg)
if (odp_unlikely(periodic_stat))
global->stat[thr].pktio[i].tx_packets += sent;
-
}
}
}
@@ -1884,16 +1956,19 @@ static void print_periodic_stat(test_global_t *global, uint64_t nsec)
num_tx[i] += global->stat[j].pktio[i].tx_packets;
}
}
+ if (global->test_options.num_tx) {
+ printf(" TX: %12.6fs", sec);
+ for (i = 0; i < num_pktio; i++)
+ printf(" %10" PRIu64 "", num_tx[i]);
+ printf("\n");
+ }
- printf(" TX: %12.6fs", sec);
- for (i = 0; i < num_pktio; i++)
- printf(" %10" PRIu64 "", num_tx[i]);
-
- printf("\n RX: %12.6fs", sec);
- for (i = 0; i < num_pktio; i++)
- printf(" %10" PRIu64 "", num_rx[i]);
-
- printf("\n");
+ if (global->test_options.num_rx) {
+ printf(" RX: %12.6fs", sec);
+ for (i = 0; i < num_pktio; i++)
+ printf(" %10" PRIu64 "", num_rx[i]);
+ printf("\n");
+ }
}
static void periodic_print_loop(test_global_t *global)
@@ -1948,7 +2023,7 @@ static void print_humanised_latency(double lat_nsec, double lat_min_nsec, double
static int print_final_stat(test_global_t *global)
{
int i, num_thr;
- double rx_pkt_ave, rx_mbit_per_sec, tx_mbit_per_sec;
+ double rx_mbit_per_sec, tx_mbit_per_sec;
test_options_t *test_options = &global->test_options;
int num_rx = test_options->num_rx;
int num_tx = test_options->num_tx;
@@ -1965,6 +2040,7 @@ static int print_final_stat(test_global_t *global)
uint64_t tx_byte_sum = 0;
uint64_t tx_drop_sum = 0;
uint64_t tx_tmo_sum = 0;
+ double rx_pkt_ave = 0.0;
double rx_pkt_per_sec = 0.0;
double rx_byte_per_sec = 0.0;
double rx_pkt_len = 0.0;
@@ -2036,7 +2112,8 @@ static int print_final_stat(test_global_t *global)
}
}
- rx_pkt_ave = (double)rx_pkt_sum / num_rx;
+ if (num_rx)
+ rx_pkt_ave = (double)rx_pkt_sum / num_rx;
rx_sec = rx_nsec_sum / 1000000000.0;
tx_sec = tx_nsec_sum / 1000000000.0;
diff --git a/test/performance/odp_packet_gen_run.sh b/test/performance/odp_packet_gen_run.sh
index af272f619..437513d47 100755
--- a/test/performance/odp_packet_gen_run.sh
+++ b/test/performance/odp_packet_gen_run.sh
@@ -1,9 +1,7 @@
#!/bin/sh
#
-# Copyright (c) 2020, Nokia
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2020 Nokia
#
# directory where test binaries have been built
diff --git a/test/performance/odp_pktio_ordered.c b/test/performance/odp_pktio_ordered.c
index 6177a8160..18845a5df 100644
--- a/test/performance/odp_pktio_ordered.c
+++ b/test/performance/odp_pktio_ordered.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018 Linaro Limited
*/
/**
@@ -190,13 +188,13 @@ typedef union ODP_ALIGNED_CACHE {
* IPv4 5-tuple
*/
typedef struct {
- int32_t src_ip;
- int32_t dst_ip;
- int16_t src_port;
- int16_t dst_port;
- int8_t proto;
- int8_t pad0;
- int16_t pad1;
+ uint32_t src_ip;
+ uint32_t dst_ip;
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint8_t proto;
+ uint8_t pad0;
+ uint16_t pad1;
} ipv4_tuple5_t;
/**
@@ -335,7 +333,7 @@ static inline uint64_t calc_ipv4_5tuple_hash(ipv4_tuple5_t *tuple)
mix(a, b, c);
- a += (tuple->src_port << 16) + tuple->dst_port + JHASH_GOLDEN_RATIO;
+ a += ((uint32_t)tuple->src_port << 16) + tuple->dst_port + JHASH_GOLDEN_RATIO;
final(a, b, c);
return c;
diff --git a/test/performance/odp_pktio_ordered_run.sh b/test/performance/odp_pktio_ordered_run.sh
index b4584753f..4c573731b 100755
--- a/test/performance/odp_pktio_ordered_run.sh
+++ b/test/performance/odp_pktio_ordered_run.sh
@@ -1,10 +1,9 @@
#!/bin/bash
#
-# Copyright (c) 2016-2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016-2018 Linaro Limited
#
+
TEST_SRC_DIR=$(dirname $0)
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
@@ -20,19 +19,11 @@ if [ ! -f ${PCAP_IN} ]; then
exit 1
fi
-# This just turns off output buffering so that you still get periodic
-# output while piping to tee, as long as stdbuf is available.
-if [ "$(which stdbuf)" != "" ]; then
- STDBUF="stdbuf -o 0"
-else
- STDBUF=
-fi
-
export ODP_PLATFORM_PARAMS="--no-pci \
--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT} \
--vdev net_pcap1,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT}"
-$STDBUF ${TEST_DIR}/odp_pktio_ordered${EXEEXT} \
+${TEST_DIR}/odp_pktio_ordered${EXEEXT} \
-i 0,1 \
-t $DURATION | tee $LOG
ret=${PIPESTATUS[0]}
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 4cfeb50cf..8ca9d076e 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
/**
diff --git a/test/performance/odp_pool_latency.c b/test/performance/odp_pool_latency.c
index 6b964e773..0afe2f317 100644
--- a/test/performance/odp_pool_latency.c
+++ b/test/performance/odp_pool_latency.c
@@ -16,6 +16,7 @@
#endif
#include <inttypes.h>
+#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
@@ -107,6 +108,7 @@ typedef struct {
uint64_t reallocs;
uint64_t alloc_errs;
uint64_t pattern_errs;
+ uint64_t act_num_rounds;
uint8_t max_alloc_pt;
uint8_t min_alloc_pt;
uint8_t max_uarea_pt;
@@ -150,13 +152,14 @@ typedef struct prog_config_s {
alloc_fn_t alloc_fn;
free_fn_t free_fn;
int64_t cache_size;
+ uint64_t num_rounds;
+ uint64_t num_ignore;
+ odp_atomic_u32_t is_running;
uint32_t num_data_elems;
uint32_t seg_len;
uint32_t handle_size;
uint32_t num_evs;
uint32_t data_size;
- uint32_t num_rounds;
- uint32_t num_ignore;
uint32_t num_workers;
uint32_t uarea_size;
uint8_t num_elems;
@@ -166,6 +169,11 @@ typedef struct prog_config_s {
static prog_config_t *prog_conf;
+static void terminate(int signal ODP_UNUSED)
+{
+ odp_atomic_store_u32(&prog_conf->is_running, 0U);
+}
+
static void init_config(prog_config_t *config)
{
alloc_elem_t *alloc_elem;
@@ -298,7 +306,8 @@ static void print_usage(const dynamic_defs_t *dyn_defs)
" Policies:\n"
" 0: One pool shared by workers\n"
" 1: One pool per worker\n"
- " -r, --round_count Number of rounds to run. %u by default.\n"
+ " -r, --round_count Number of rounds to run. Use 0 to run indefinitely. %u by\n"
+ " default.\n"
" -i, --ignore_rounds Ignore an amount of initial rounds. %u by default.\n"
" -c, --worker_count Number of workers. %u by default.\n"
" -C, --cache_size Maximum cache size for pools. Defaults:\n"
@@ -547,14 +556,9 @@ static parse_result_t check_options(prog_config_t *config)
return PRS_NOK;
}
- if (config->num_rounds == 0U) {
- ODPH_ERR("Invalid round count: %u (min: 1)\n", config->num_rounds);
- return PRS_NOK;
- }
-
- if (config->num_ignore >= config->num_rounds) {
- ODPH_ERR("Invalid round ignorance count: %u (max: %u)\n", config->num_ignore,
- config->num_rounds - 1U);
+ if (config->num_rounds > 0U && config->num_ignore >= config->num_rounds) {
+ ODPH_ERR("Invalid round ignore count: %" PRIu64 " (max: %" PRIu64 ")\n",
+ config->num_ignore, config->num_rounds - 1U);
return PRS_NOK;
}
@@ -607,10 +611,10 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
config->policy = atoi(optarg);
break;
case 'r':
- config->num_rounds = atoi(optarg);
+ config->num_rounds = atoll(optarg);
break;
case 'i':
- config->num_ignore = atoi(optarg);
+ config->num_ignore = atoll(optarg);
break;
case 'c':
config->num_workers = atoi(optarg);
@@ -634,6 +638,21 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
return check_options(config);
}
+static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
+ ODPH_ERR("Error installing signal handler\n");
+ return PRS_NOK;
+ }
+
+ return parse_options(argc, argv, config);
+}
+
static inline void save_alloc_stats(odp_time_t t1, odp_time_t t2, uint32_t num_alloc,
uint64_t round, uint8_t pattern, stats_t *stats)
{
@@ -1040,8 +1059,10 @@ static int run_test(void *args)
{
worker_config_t *config = args;
odp_time_t t1, t2;
- uint32_t head_idx, cur_idx, num_ignore = config->prog_config->num_ignore, val, num_alloc,
- idx;
+ uint64_t i, num_ignore = config->prog_config->num_ignore;
+ const uint64_t num_rnds = config->prog_config->num_rounds;
+ odp_atomic_u32_t *is_running = &config->prog_config->is_running;
+ uint32_t head_idx, cur_idx, val, num_alloc, idx;
odp_bool_t is_saved;
const uint8_t num_elems = config->prog_config->num_elems;
const alloc_elem_t *elems = config->prog_config->alloc_elems, *elem;
@@ -1054,7 +1075,7 @@ static int run_test(void *args)
odp_barrier_wait(&config->prog_config->init_barrier);
t1 = odp_time_local_strict();
- for (uint32_t i = 0U; i < config->prog_config->num_rounds; ++i) {
+ for (i = 0U; (i < num_rnds || num_rnds == 0U) && odp_atomic_load_u32(is_running); ++i) {
head_idx = 0U;
cur_idx = head_idx;
is_saved = (num_ignore > 0U ? num_ignore-- : num_ignore) == 0U;
@@ -1093,6 +1114,7 @@ static int run_test(void *args)
t2 = odp_time_local_strict();
stats->tot_tm = odp_time_diff_ns(t2, t1);
+ stats->act_num_rounds = i;
odp_barrier_wait(&config->prog_config->term_barrier);
return 0;
@@ -1151,20 +1173,21 @@ static void print_stats(const prog_config_t *config)
printf("\n==================\n\n"
"Pool latency test done\n\n"
- " type: %s\n"
- " event count: %u\n", config->type == BUFFER ? "buffer" :
+ " type: %s\n"
+ " event count: %u\n", config->type == BUFFER ? "buffer" :
config->type == PACKET ? "packet" : config->type == TMO ? "timeout" : "vector",
config->num_evs);
if (config->type != TMO)
- printf(" %s %u\n", config->type != VECTOR ? "data size: " : "vector size:",
+ printf(" %s %u\n",
+ config->type != VECTOR ? "data size: " : "vector size: ",
config->data_size);
- printf(" pool policy: %s\n"
- " round count: %u\n"
- " ignore count: %u\n"
- " cache size: %" PRIi64 "\n"
- " user area: %u (B)\n"
+ printf(" pool policy: %s\n"
+ " target round count: %" PRIu64 "\n"
+ " ignore count: %" PRIu64 "\n"
+ " cache size: %" PRIi64 "\n"
+ " user area: %u (B)\n"
" burst pattern:\n", config->policy == SINGLE ? "shared" : "per-worker",
config->num_rounds, config->num_ignore, config->cache_size, config->uarea_size);
@@ -1194,6 +1217,7 @@ static void print_stats(const prog_config_t *config)
ave_free_tm = stats->alloc_cnt > 0U ? stats->free_tm / stats->alloc_cnt : 0U;
printf(" worker %d:\n"
+ " actual round count: %" PRIu64 "\n"
" significant events allocated/freed: %" PRIu64 "\n"
" allocation retries: %" PRIu64 "\n"
" allocation errors: %" PRIu64 "\n"
@@ -1208,9 +1232,9 @@ static void print_stats(const prog_config_t *config)
" per free burst: %" PRIu64 " (min: %" PRIu64 " (round: %"
PRIu64 ", pattern: %u), max: %" PRIu64 " (round: %" PRIu64 ", pattern: %u))"
"\n"
- " per free: %" PRIu64 "\n", i, stats->alloc_cnt,
- stats->reallocs, stats->alloc_errs, stats->pattern_errs, stats->tot_tm,
- ev_rate, ave_b_alloc_tm, b_alloc_min, stats->min_alloc_rnd,
+ " per free: %" PRIu64 "\n", i, stats->act_num_rounds,
+ stats->alloc_cnt, stats->reallocs, stats->alloc_errs, stats->pattern_errs,
+ stats->tot_tm, ev_rate, ave_b_alloc_tm, b_alloc_min, stats->min_alloc_rnd,
stats->min_alloc_pt, b_alloc_max, stats->max_alloc_rnd, stats->max_alloc_pt,
ave_alloc_tm, ave_b_free_tm, b_free_min, stats->min_free_rnd,
stats->min_free_pt, b_free_max, stats->max_free_rnd, stats->max_free_pt,
@@ -1339,7 +1363,7 @@ int main(int argc, char **argv)
goto out;
}
- parse_res = parse_options(argc, argv, prog_conf);
+ parse_res = setup_program(argc, argv, prog_conf);
if (parse_res == PRS_NOK) {
ret = EXIT_FAILURE;
@@ -1352,6 +1376,7 @@ int main(int argc, char **argv)
}
prog_conf->odp_instance = odp_instance;
+ odp_atomic_init_u32(&prog_conf->is_running, 1U);
if (!setup_test(prog_conf)) {
ret = EXIT_FAILURE;
diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c
index 43a39a21e..c79465e53 100644
--- a/test/performance/odp_pool_perf.c
+++ b/test/performance/odp_pool_perf.c
@@ -1,9 +1,6 @@
-/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2019-2022 Nokia
*/
/**
diff --git a/test/performance/odp_queue_perf.c b/test/performance/odp_queue_perf.c
index 7d4612cb8..153f87d10 100644
--- a/test/performance/odp_queue_perf.c
+++ b/test/performance/odp_queue_perf.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
/**
diff --git a/test/performance/odp_random.c b/test/performance/odp_random.c
index 99714d7b3..4a689e440 100644
--- a/test/performance/odp_random.c
+++ b/test/performance/odp_random.c
@@ -1,8 +1,5 @@
-/* Copyright (c) 2021-2022, Nokia
- *
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2024 Nokia
*/
/**
@@ -379,11 +376,20 @@ static void test_type(odp_instance_t instance, test_global_t *global, odp_random
exit(EXIT_FAILURE);
}
- if (odph_thread_join(thr_worker, num_threads) != num_threads) {
+ odph_thread_join_result_t res[num_threads];
+
+ if (odph_thread_join_result(thr_worker, res, num_threads) != num_threads) {
ODPH_ERR("Failed to join worker threads.\n");
exit(EXIT_FAILURE);
}
+ for (i = 0; i < num_threads; i++) {
+ if (res[i].ret != 0) {
+ ODPH_ERR("Worker thread failure: %d.\n", res[i].ret);
+ exit(EXIT_FAILURE);
+ }
+ }
+
double mb, seconds, nsec = 0;
for (i = 0; i < num_threads; i++)
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index 0fec49fb9..f3230cc17 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * Copyright (c) 2020-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018 Linaro Limited
+ * Copyright (c) 2020-2022 Nokia
*/
/**
diff --git a/test/performance/odp_sched_latency_run.sh b/test/performance/odp_sched_latency_run.sh
index b051c1a4e..8cd6dd480 100755
--- a/test/performance/odp_sched_latency_run.sh
+++ b/test/performance/odp_sched_latency_run.sh
@@ -1,9 +1,7 @@
#!/bin/sh
#
-# Copyright (c) 2016-2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016-2018 Linaro Limited
#
# Script that passes command line arguments to odp_sched_latency test when
# launched by 'make check'
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index 47f703338..85a158c9e 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2020-2024, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2020-2024 Nokia
*/
/**
@@ -31,6 +29,9 @@
#define MAX_QUEUES (256 * 1024)
#define MAX_GROUPS 256
+/* Limit data values to 16 bits. Large data values are costly on square root calculation. */
+#define DATA_MASK 0xffff
+
/* Max time to wait for new events in nanoseconds */
#define MAX_SCHED_WAIT_NS (10 * ODP_TIME_SEC_IN_NS)
@@ -60,6 +61,7 @@ typedef struct test_options_t {
uint32_t tot_queue;
uint32_t tot_event;
int touch_data;
+ uint32_t stress;
uint32_t rd_words;
uint32_t rw_words;
uint32_t ctx_size;
@@ -156,8 +158,15 @@ static void print_usage(void)
" -b, --burst Maximum number of events per operation. Default: 100.\n"
" -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n"
" -f, --forward 0: Keep event in the original queue, 1: Forward event to the next queue. Default: 0.\n"
- " -a, --fairness 0: Don't count events per queue, 1: Count and report events relative to average. Default: 0.\n"
+ " -F, --fairness 0: Don't count events per queue, 1: Count and report events relative to average. Default: 0.\n"
" -w, --wait_ns Number of nsec to wait before enqueueing events. Default: 0.\n"
+ " -S, --stress CPU stress function(s) to be called for each event data word (requires -n or -m).\n"
+ " Data is processed as uint32_t words. Multiple flags may be selected.\n"
+ " 0: No extra data processing (default)\n"
+ " 0x1: Calculate square of each uint32_t\n"
+ " 0x2: Calculate log2 of each uint32_t\n"
+ " 0x4: Calculate square root of each uint32_t\n"
+ " 0x8: Calculate square root of each uint32_t in floating point\n"
" -k, --ctx_rd_words Number of queue context words (uint64_t) to read on every event. Default: 0.\n"
" -l, --ctx_rw_words Number of queue context words (uint64_t) to modify on every event. Default: 0.\n"
" -n, --rd_words Number of event data words (uint64_t) to read before enqueueing it. Default: 0.\n"
@@ -190,8 +199,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{"burst", required_argument, NULL, 'b'},
{"type", required_argument, NULL, 't'},
{"forward", required_argument, NULL, 'f'},
- {"fairness", required_argument, NULL, 'a'},
+ {"fairness", required_argument, NULL, 'F'},
{"wait_ns", required_argument, NULL, 'w'},
+ {"stress", required_argument, NULL, 'S'},
{"ctx_rd_words", required_argument, NULL, 'k'},
{"ctx_rw_words", required_argument, NULL, 'l'},
{"rd_words", required_argument, NULL, 'n'},
@@ -204,7 +214,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:a:w:k:l:n:m:p:u:U:vh";
+ static const char *shortopts = "+c:q:L:H:d:e:s:g:j:b:t:f:F:w:S:k:l:n:m:p:u:U:vh";
test_options->num_cpu = 1;
test_options->num_queue = 1;
@@ -219,6 +229,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->queue_type = 0;
test_options->forward = 0;
test_options->fairness = 0;
+ test_options->stress = 0;
test_options->ctx_rd_words = 0;
test_options->ctx_rw_words = 0;
test_options->rd_words = 0;
@@ -271,9 +282,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'f':
test_options->forward = atoi(optarg);
break;
- case 'a':
+ case 'F':
test_options->fairness = atoi(optarg);
break;
+ case 'S':
+ test_options->stress = strtoul(optarg, NULL, 0);
+ break;
case 'k':
test_options->ctx_rd_words = atoi(optarg);
break;
@@ -321,6 +335,11 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->touch_data = test_options->rd_words ||
test_options->rw_words;
+ if (test_options->stress && test_options->touch_data == 0) {
+ ODPH_ERR("Use -n or/and -m to select event data size with a stress function\n");
+ ret = -1;
+ }
+
if ((test_options->num_queue + test_options->num_dummy) > MAX_QUEUES) {
ODPH_ERR("Too many queues. Max supported %i.\n", MAX_QUEUES);
ret = -1;
@@ -420,6 +439,19 @@ static int set_num_cpu(test_global_t *global)
return 0;
}
+static uint64_t init_data(uint64_t init, uint64_t *data, uint32_t words)
+{
+ uint32_t i;
+ uint64_t val = init;
+
+ for (i = 0; i < words; i++) {
+ data[i] = val;
+ val = (val + 1) & DATA_MASK;
+ }
+
+ return val;
+}
+
static int create_pool(test_global_t *global)
{
odp_pool_capability_t pool_capa;
@@ -474,6 +506,7 @@ static int create_pool(test_global_t *global)
printf(" queue size %u\n", queue_size);
printf(" max burst size %u\n", max_burst);
printf(" total events %u\n", tot_event);
+ printf(" stress 0x%x\n", test_options->stress);
printf(" event size %u bytes", event_size);
if (touch_data)
printf(" (rd: %u, rw: %u)", 8 * test_options->rd_words, 8 * test_options->rw_words);
@@ -612,6 +645,7 @@ static int create_queues(test_global_t *global)
odp_pool_t pool = global->pool;
uint8_t *ctx = NULL;
uint32_t ctx_size = test_options->ctx_size;
+ uint64_t init_val = 0;
if (type == 0) {
type_str = "parallel";
@@ -755,6 +789,8 @@ static int create_queues(test_global_t *global)
for (j = 0; j < num_event; j++) {
odp_event_t ev;
+ uint64_t *data;
+ uint32_t words;
if (test_options->pool_type == ODP_POOL_BUFFER) {
odp_buffer_t buf = odp_buffer_alloc(pool);
@@ -764,6 +800,9 @@ static int create_queues(test_global_t *global)
return -1;
}
ev = odp_buffer_to_event(buf);
+
+ data = odp_buffer_addr(buf);
+ words = odp_buffer_size(buf) / 8;
} else {
odp_packet_t pkt = odp_packet_alloc(pool, event_size);
@@ -772,7 +811,13 @@ static int create_queues(test_global_t *global)
return -1;
}
ev = odp_packet_to_event(pkt);
+
+ data = odp_packet_data(pkt);
+ words = odp_packet_seg_len(pkt) / 8;
}
+
+ init_val = init_data(init_val, data, words);
+
if (odp_queue_enq(queue, ev)) {
ODPH_ERR("Error: enqueue failed %u/%u\n", i, j);
return -1;
@@ -952,15 +997,14 @@ static inline uint64_t rw_ctx_data(void *ctx, uint32_t offset,
return sum;
}
-static uint64_t rw_data(odp_event_t ev[], int num,
- uint32_t rd_words, uint32_t rw_words, odp_pool_type_t pool_type)
+static uint64_t rw_data(odp_event_t ev[], int num, uint32_t rd_words, uint32_t rw_words,
+ odp_pool_type_t pool_type)
{
uint64_t *data;
- int i;
uint32_t j;
uint64_t sum = 0;
- for (i = 0; i < num; i++) {
+ for (int i = 0; i < num; i++) {
if (pool_type == ODP_POOL_BUFFER)
data = odp_buffer_addr(odp_buffer_from_event(ev[i]));
else
@@ -978,6 +1022,40 @@ static uint64_t rw_data(odp_event_t ev[], int num,
return sum;
}
+static uint64_t rw_data_stress(odp_event_t ev[], int num, uint32_t rd_words, uint32_t rw_words,
+ uint32_t stress, odp_pool_type_t pool_type)
+{
+ uint64_t *data;
+ uint64_t word;
+ uint32_t j;
+ uint64_t sum = 0;
+
+ for (int i = 0; i < num; i++) {
+ if (pool_type == ODP_POOL_BUFFER)
+ data = odp_buffer_addr(odp_buffer_from_event(ev[i]));
+ else
+ data = odp_packet_data(odp_packet_from_event(ev[i]));
+
+ for (j = 0; j < rd_words + rw_words; j++) {
+ word = data[j];
+
+ if (stress & 0x1)
+ sum += odph_stress_pow2_u32(word);
+ if (stress & 0x2)
+ sum += odph_stress_log2_u32(word);
+ if (stress & 0x4)
+ sum += odph_stress_sqrt_u32(word);
+ if (stress & 0x8)
+ sum += odph_stress_sqrt_f32(word);
+
+ if (j >= rd_words)
+ data[j] = (word + 1) & DATA_MASK;
+ }
+ }
+
+ return sum;
+}
+
static int test_sched(void *arg)
{
int num, num_enq, ret, thr;
@@ -994,16 +1072,17 @@ static int test_sched(void *arg)
int num_group = test_options->num_group;
int forward = test_options->forward;
int fairness = test_options->fairness;
- int touch_data = test_options->touch_data;
- uint32_t rd_words = test_options->rd_words;
- uint32_t rw_words = test_options->rw_words;
+ const int touch_data = test_options->touch_data;
+ const uint32_t stress = test_options->stress;
+ const uint32_t rd_words = test_options->rd_words;
+ const uint32_t rw_words = test_options->rw_words;
uint32_t ctx_size = test_options->ctx_size;
uint32_t ctx_rd_words = test_options->ctx_rd_words;
uint32_t ctx_rw_words = test_options->ctx_rw_words;
const uint32_t uarea_size = test_options->uarea_size;
const uint32_t uarea_rd = test_options->uarea_rd;
const uint32_t uarea_rw = test_options->uarea_rw;
- odp_pool_type_t pool_type = test_options->pool_type;
+ const odp_pool_type_t pool_type = test_options->pool_type;
int touch_ctx = ctx_rd_words || ctx_rw_words;
odp_atomic_u32_t *exit_threads = &global->exit_threads;
uint32_t ctx_offset = 0;
@@ -1095,9 +1174,14 @@ static int test_sched(void *arg)
ctx_rw_words);
}
- if (odp_unlikely(touch_data))
- data_sum += rw_data(ev, num, rd_words,
- rw_words, pool_type);
+ if (odp_unlikely(touch_data)) {
+ if (stress) {
+ data_sum += rw_data_stress(ev, num, rd_words, rw_words,
+ stress, pool_type);
+ } else {
+ data_sum += rw_data(ev, num, rd_words, rw_words, pool_type);
+ }
+ }
if (odp_unlikely(wait_ns)) {
waits++;
diff --git a/test/performance/odp_sched_perf_run.sh b/test/performance/odp_sched_perf_run.sh
index 8e7911290..d4c8ebf6e 100755
--- a/test/performance/odp_sched_perf_run.sh
+++ b/test/performance/odp_sched_perf_run.sh
@@ -1,33 +1,45 @@
#!/bin/sh
#
-# Copyright (c) 2021, Nokia
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2021-2024 Nokia
#
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-echo odp_sched_perf: buffer pool
-echo ===============================================
-
-$TEST_DIR/odp_sched_perf${EXEEXT} -p 0
-
-RET_VAL=$?
-if [ $RET_VAL -ne 0 ]; then
- echo odp_sched_perf -p 0: FAILED
- exit $RET_VAL
-fi
-
-echo odp_sched_perf: packet pool
-echo ===============================================
-
-$TEST_DIR/odp_sched_perf${EXEEXT} -p 1
-
-RET_VAL=$?
-if [ $RET_VAL -ne 0 ]; then
- echo odp_sched_perf -p 1: FAILED
- exit $RET_VAL
-fi
+run()
+{
+ # Maximum number of workers may be less than the number of available processors. One worker
+ # should be always available.
+ MAX_WORKERS=$(($(nproc) - 2))
+ if [ $MAX_WORKERS -lt 1 ]; then
+ MAX_WORKERS=1
+ fi
+
+ if [ $MAX_WORKERS -lt $1 ]; then
+ echo "Not enough CPU cores (requested $1, available $MAX_WORKERS). Skipping test."
+ else
+ echo odp_sched_perf -p 0 -c $1
+ echo ===============================================
+ $TEST_DIR/odp_sched_perf${EXEEXT} -p 0 -c $1
+ RET_VAL=$?
+ if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf FAILED
+ exit $RET_VAL
+ fi
+
+ echo odp_sched_perf -p 1 -c $1
+ echo ===============================================
+ $TEST_DIR/odp_sched_perf${EXEEXT} -p 1 -c $1
+ RET_VAL=$?
+ if [ $RET_VAL -ne 0 ]; then
+ echo odp_sched_perf FAILED
+ exit $RET_VAL
+ fi
+ fi
+}
+
+run 1
+run 2
+run 6
exit 0
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index d8ab1b279..eb79b6b69 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
*/
/**
diff --git a/test/performance/odp_sched_pktio_run.sh b/test/performance/odp_sched_pktio_run.sh
index dd332c191..828a83029 100755
--- a/test/performance/odp_sched_pktio_run.sh
+++ b/test/performance/odp_sched_pktio_run.sh
@@ -1,9 +1,7 @@
#!/bin/sh
#
-# Copyright (c) 2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Linaro Limited
#
# directory where test binaries have been built
@@ -11,7 +9,7 @@ TEST_DIR="${TEST_DIR:-$PWD}"
# directory where test sources are, including scripts
TEST_SRC_DIR=$(dirname $0)
-PATH=$TEST_DIR:$TEST_DIR/../../example/generator:$PATH
+PATH=$TEST_DIR:$PATH
# exit codes expected by automake for skipped tests
TEST_SKIPPED=77
@@ -19,8 +17,6 @@ TEST_SKIPPED=77
VALIDATION_TESTDIR=platform/$ODP_PLATFORM/test/validation
PLATFORM_VALIDATION=${TEST_SRC_DIR}/../../$VALIDATION_TESTDIR
-FLOOD_MODE=0
-
# Use installed pktio env or for make check take it from platform directory
if [ -f "./pktio_env" ]; then
. ./pktio_env
@@ -47,9 +43,9 @@ run_sched_pktio()
exit $TEST_SKIPPED
fi
- type odp_generator > /dev/null
+ type odp_packet_gen > /dev/null
if [ $? -ne 0 ]; then
- echo "odp_generator not installed. Aborting."
+ echo "odp_packet_gen not installed. Aborting."
cleanup_pktio_env
exit 1
fi
@@ -65,14 +61,14 @@ run_sched_pktio()
sleep 1
- # Run generator with one worker
- export ODP_PLATFORM_PARAMS="-m 256 --file-prefix="gen" \
+ # Run odp_packet_gen with one tx thread
+ export ODP_PLATFORM_PARAMS="-m 512 --file-prefix="gen" \
--proc-type auto --no-pci \
--vdev net_pcap0,iface=$IF0"
- (odp_generator${EXEEXT} --interval $FLOOD_MODE -I 0 \
- --srcip 192.168.0.1 --dstip 192.168.0.2 \
- -m u -w 1 2>&1 > /dev/null) \
+ (odp_packet_gen${EXEEXT} --gap 0 -i 0 \
+ --ipv4_src 192.168.0.1 --ipv4_dst 192.168.0.2 \
+ -r 0 -t 1 2>&1 > /dev/null) \
2>&1 > /dev/null &
GEN_PID=$!
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
deleted file mode 100644
index c9f3eb89f..000000000
--- a/test/performance/odp_scheduling.c
+++ /dev/null
@@ -1,1042 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2013-2018 Linaro Limited
- * Copyright (c) 2019-2023 Nokia
- */
-
-/**
- * @example odp_scheduling.c
- *
- * Performance test application for miscellaneous scheduling operations
- *
- * @cond _ODP_HIDE_FROM_DOXYGEN_
- */
-
-#include <string.h>
-#include <stdlib.h>
-#include <inttypes.h>
-
-/* ODP main header */
-#include <odp_api.h>
-
-/* ODP helper for Linux apps */
-#include <odp/helper/odph_api.h>
-
-/* Needs librt*/
-#include <time.h>
-
-/* GNU lib C */
-#include <getopt.h>
-
-#define MAX_BUF (512 * 1024) /**< Maximum pool size */
-#define MAX_ALLOCS 32 /**< Alloc burst size */
-#define QUEUES_PER_PRIO 64 /**< Queue per priority */
-#define NUM_PRIOS 2 /**< Number of tested priorities */
-#define QUEUE_ROUNDS (512 * 1024) /**< Queue test rounds */
-#define ALLOC_ROUNDS (1024 * 1024) /**< Alloc test rounds */
-#define MULTI_BUFS_MAX 4 /**< Buffer burst size */
-#define TEST_SEC 2 /**< Time test duration in sec */
-#define STATS_PER_LINE 8 /**< Stats per printed line */
-
-/** Dummy message */
-typedef struct {
- int msg_id; /**< Message ID */
- int seq; /**< Sequence number */
-} test_message_t;
-
-#define MSG_HELLO 1 /**< Hello */
-#define MSG_ACK 2 /**< Ack */
-
-/** Test arguments */
-typedef struct {
- double test_sec; /**< CPU frequency test duration in seconds */
- unsigned int cpu_count; /**< CPU count */
- int fairness; /**< Check fairness */
-} test_args_t;
-
-typedef struct ODP_ALIGNED_CACHE {
- uint64_t num_ev;
-} queue_context_t;
-
-/** Test global variables */
-typedef struct {
- odp_barrier_t barrier;
- odp_spinlock_t lock;
- odp_pool_t pool;
- int first_thr;
- int queues_per_prio;
- test_args_t args;
- odp_queue_t queue[NUM_PRIOS][QUEUES_PER_PRIO];
- queue_context_t queue_ctx[NUM_PRIOS][QUEUES_PER_PRIO];
-} test_globals_t;
-
-/* Prints and initializes queue statistics */
-static void print_stats(int prio, test_globals_t *globals)
-{
- int i, j, k;
-
- if (prio == odp_schedule_max_prio())
- i = 0;
- else
- i = 1;
-
- printf("\nQueue fairness\n-----+--------\n");
-
- for (j = 0; j < globals->queues_per_prio;) {
- printf(" %2i | ", j);
-
- for (k = 0; k < STATS_PER_LINE - 1; k++) {
- printf(" %8" PRIu64,
- globals->queue_ctx[i][j].num_ev);
- globals->queue_ctx[i][j++].num_ev = 0;
- }
-
- printf(" %8" PRIu64 "\n", globals->queue_ctx[i][j].num_ev);
- globals->queue_ctx[i][j++].num_ev = 0;
- }
-
- printf("\n");
-}
-
-/**
- * @internal Clear all scheduled queues. Retry to be sure that all
- * buffers have been scheduled.
- */
-static void clear_sched_queues(void)
-{
- odp_event_t ev;
-
- while (1) {
- ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
-
- if (ev == ODP_EVENT_INVALID)
- break;
-
- odp_event_free(ev);
- }
-}
-
-/**
- * @internal Enqueue events into queues
- *
- * @param thr Thread
- * @param prio Queue priority
- * @param num_queues Number of queues
- * @param num_events Number of events
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int enqueue_events(int thr, int prio, int num_queues, int num_events,
- test_globals_t *globals)
-{
- odp_buffer_t buf[num_events];
- odp_event_t ev[num_events];
- odp_queue_t queue;
- int i, j, k, ret;
-
- if (prio == odp_schedule_max_prio())
- i = 0;
- else
- i = 1;
-
- /* Alloc and enqueue a buffer per queue */
- for (j = 0; j < num_queues; j++) {
- queue = globals->queue[i][j];
-
- ret = odp_buffer_alloc_multi(globals->pool, buf, num_events);
- if (ret != num_events) {
- ODPH_ERR(" [%i] buffer alloc failed\n", thr);
- ret = ret < 0 ? 0 : ret;
- ret = ret > num_events ? num_events : ret; /* GCC-9 -O3 workaround */
- odp_buffer_free_multi(buf, ret);
- return -1;
- }
- for (k = 0; k < num_events; k++) {
- if (!odp_buffer_is_valid(buf[k])) {
- ODPH_ERR(" [%i] buffer alloc failed\n", thr);
- odp_buffer_free_multi(buf, num_events);
- return -1;
- }
- ev[k] = odp_buffer_to_event(buf[k]);
- }
-
- ret = odp_queue_enq_multi(queue, ev, num_events);
- if (ret != num_events) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- ret = ret < 0 ? 0 : ret;
- odp_buffer_free_multi(&buf[ret], num_events - ret);
- return -1;
- }
- }
-
- return 0;
-}
-
-/**
- * @internal Test single buffer alloc and free
- *
- * @param thr Thread
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_alloc_single(int thr, test_globals_t *globals)
-{
- int i;
- odp_buffer_t temp_buf;
- uint64_t c1, c2, cycles;
-
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < ALLOC_ROUNDS; i++) {
- temp_buf = odp_buffer_alloc(globals->pool);
-
- if (!odp_buffer_is_valid(temp_buf)) {
- ODPH_ERR(" [%i] alloc_single failed\n", thr);
- return -1;
- }
-
- odp_buffer_free(temp_buf);
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
- cycles = cycles / ALLOC_ROUNDS;
-
- printf(" [%i] alloc_sng alloc+free %6" PRIu64 " CPU cycles\n",
- thr, cycles);
-
- return 0;
-}
-
-/**
- * @internal Test multiple buffers alloc and free
- *
- * @param thr Thread
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_alloc_multi(int thr, test_globals_t *globals)
-{
- int i, j, ret;
- const int num_alloc = MAX_ALLOCS;
- odp_buffer_t temp_buf[num_alloc];
- uint64_t c1, c2, cycles;
-
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < ALLOC_ROUNDS; i++) {
- ret = odp_buffer_alloc_multi(globals->pool, temp_buf, num_alloc);
- if (ret != num_alloc) {
- ODPH_ERR(" [%i] buffer alloc failed\n", thr);
- ret = ret < 0 ? 0 : ret;
- odp_buffer_free_multi(temp_buf, ret);
- return -1;
- }
-
- for (j = 0; j < num_alloc; j++) {
- if (!odp_buffer_is_valid(temp_buf[j])) {
- ODPH_ERR(" [%i] alloc_multi failed\n", thr);
- odp_buffer_free_multi(temp_buf, num_alloc);
- return -1;
- }
- }
- odp_buffer_free_multi(temp_buf, num_alloc);
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
- cycles = cycles / (ALLOC_ROUNDS * num_alloc);
-
- printf(" [%i] alloc_multi alloc+free %6" PRIu64 " CPU cycles\n",
- thr, cycles);
-
- return 0;
-}
-
-/**
- * @internal Test plain queues
- *
- * Enqueue to and dequeue to/from a single shared queue.
- *
- * @param thr Thread
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_plain_queue(int thr, test_globals_t *globals)
-{
- odp_event_t ev;
- odp_buffer_t buf;
- test_message_t *t_msg;
- odp_queue_t queue;
- uint64_t c1, c2, cycles;
- int i, j;
-
- /* Alloc test message */
- buf = odp_buffer_alloc(globals->pool);
-
- if (!odp_buffer_is_valid(buf)) {
- ODPH_ERR(" [%i] buffer alloc failed\n", thr);
- return -1;
- }
-
- /* odp_buffer_print(buf); */
-
- t_msg = odp_buffer_addr(buf);
- t_msg->msg_id = MSG_HELLO;
- t_msg->seq = 0;
-
- queue = odp_queue_lookup("plain_queue");
-
- if (queue == ODP_QUEUE_INVALID) {
- printf(" [%i] Queue lookup failed.\n", thr);
- return -1;
- }
-
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < QUEUE_ROUNDS; i++) {
- ev = odp_buffer_to_event(buf);
-
- if (odp_queue_enq(queue, ev)) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- odp_buffer_free(buf);
- return -1;
- }
-
- /* When enqueue and dequeue are decoupled (e.g. not using a
- * common lock), an enqueued event may not be immediately
- * visible to dequeue. So we just try again for a while. */
- for (j = 0; j < 100; j++) {
- ev = odp_queue_deq(queue);
- if (ev != ODP_EVENT_INVALID)
- break;
- odp_cpu_pause();
- }
-
- buf = odp_buffer_from_event(ev);
-
- if (!odp_buffer_is_valid(buf)) {
- ODPH_ERR(" [%i] Queue empty.\n", thr);
- return -1;
- }
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
- cycles = cycles / QUEUE_ROUNDS;
-
- printf(" [%i] plain_queue enq+deq %6" PRIu64 " CPU cycles\n",
- thr, cycles);
-
- odp_buffer_free(buf);
- return 0;
-}
-
-/**
- * @internal Test scheduling of a single queue - with odp_schedule()
- *
- * Enqueue a buffer to the shared queue. Schedule and enqueue the received
- * buffer back into the queue.
- *
- * @param str Test case name string
- * @param thr Thread
- * @param prio Priority
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_schedule_single(const char *str, int thr,
- int prio, test_globals_t *globals)
-{
- odp_event_t ev;
- odp_queue_t queue;
- uint64_t c1, c2, cycles;
- uint32_t i;
- uint32_t tot;
-
- if (enqueue_events(thr, prio, 1, 1, globals))
- return -1;
-
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < QUEUE_ROUNDS; i++) {
- ev = odp_schedule(&queue, ODP_SCHED_WAIT);
-
- if (odp_queue_enq(queue, ev)) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- odp_event_free(ev);
- return -1;
- }
- }
-
- /* Clear possible locally stored buffers */
- odp_schedule_pause();
-
- tot = i;
-
- while (1) {
- ev = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
-
- if (ev == ODP_EVENT_INVALID)
- break;
-
- tot++;
-
- if (odp_queue_enq(queue, ev)) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- odp_event_free(ev);
- return -1;
- }
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
-
- odp_barrier_wait(&globals->barrier);
-
- odp_schedule_resume();
-
- clear_sched_queues();
-
- cycles = cycles / tot;
-
- printf(" [%i] %s enq+deq %6" PRIu64 " CPU cycles\n", thr, str, cycles);
-
- return 0;
-}
-
-/**
- * @internal Test scheduling of multiple queues - with odp_schedule()
- *
- * Enqueue a buffer to each queue. Schedule and enqueue the received
- * buffer back into the queue it came from.
- *
- * @param str Test case name string
- * @param thr Thread
- * @param prio Priority
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_schedule_many(const char *str, int thr,
- int prio, test_globals_t *globals)
-{
- odp_event_t ev;
- odp_queue_t queue;
- uint64_t c1, c2, cycles;
- uint32_t i;
- uint32_t tot;
-
- if (enqueue_events(thr, prio, globals->queues_per_prio, 1, globals))
- return -1;
-
- /* Start sched-enq loop */
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < QUEUE_ROUNDS; i++) {
- ev = odp_schedule(&queue, ODP_SCHED_WAIT);
-
- if (odp_queue_enq(queue, ev)) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- odp_event_free(ev);
- return -1;
- }
- }
-
- /* Clear possible locally stored buffers */
- odp_schedule_pause();
-
- tot = i;
-
- while (1) {
- ev = odp_schedule(&queue, ODP_SCHED_NO_WAIT);
-
- if (ev == ODP_EVENT_INVALID)
- break;
-
- tot++;
-
- if (odp_queue_enq(queue, ev)) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- odp_event_free(ev);
- return -1;
- }
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
-
- odp_barrier_wait(&globals->barrier);
-
- odp_schedule_resume();
-
- clear_sched_queues();
-
- cycles = cycles / tot;
-
- printf(" [%i] %s enq+deq %6" PRIu64 " CPU cycles\n", thr, str, cycles);
-
- return 0;
-}
-
-/**
- * @internal Test scheduling of multiple queues with multi_sched and multi_enq
- *
- * @param str Test case name string
- * @param thr Thread
- * @param prio Priority
- * @param globals Test shared data
- *
- * @return 0 if successful
- */
-static int test_schedule_multi(const char *str, int thr,
- int prio, test_globals_t *globals)
-{
- odp_event_t ev[MULTI_BUFS_MAX];
- odp_queue_t queue;
- uint64_t c1, c2, cycles;
- int i;
- int num;
- uint32_t tot = 0;
-
- if (enqueue_events(thr, prio, globals->queues_per_prio, MULTI_BUFS_MAX,
- globals))
- return -1;
-
- /* Start sched-enq loop */
- c1 = odp_cpu_cycles();
-
- for (i = 0; i < QUEUE_ROUNDS; i++) {
- num = odp_schedule_multi(&queue, ODP_SCHED_WAIT, ev,
- MULTI_BUFS_MAX);
-
- tot += num;
-
- if (globals->args.fairness) {
- queue_context_t *queue_ctx;
-
- queue_ctx = odp_queue_context(queue);
- queue_ctx->num_ev += num;
- }
-
- /* Assume we can enqueue all events */
- if (odp_queue_enq_multi(queue, ev, num) != num) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- return -1;
- }
- }
-
- /* Clear possible locally stored events */
- odp_schedule_pause();
-
- while (1) {
- num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, ev,
- MULTI_BUFS_MAX);
-
- if (num == 0)
- break;
-
- tot += num;
-
- if (globals->args.fairness) {
- queue_context_t *queue_ctx;
-
- queue_ctx = odp_queue_context(queue);
- queue_ctx->num_ev += num;
- }
-
- /* Assume we can enqueue all events */
- if (odp_queue_enq_multi(queue, ev, num) != num) {
- ODPH_ERR(" [%i] Queue enqueue failed.\n", thr);
- return -1;
- }
- }
-
- c2 = odp_cpu_cycles();
- cycles = odp_cpu_cycles_diff(c2, c1);
-
- odp_barrier_wait(&globals->barrier);
-
- odp_schedule_resume();
-
- clear_sched_queues();
-
- if (tot)
- cycles = cycles / tot;
- else
- cycles = 0;
-
- printf(" [%i] %s enq+deq %6" PRIu64 " CPU cycles\n", thr, str, cycles);
-
- odp_barrier_wait(&globals->barrier);
-
- if (globals->args.fairness && globals->first_thr == thr)
- print_stats(prio, globals);
-
- return 0;
-}
-
-/**
- * @internal Worker thread
- *
- * @param arg Arguments
- *
- * @return non zero on failure
- */
-static int run_thread(void *arg ODP_UNUSED)
-{
- int thr;
- odp_shm_t shm;
- test_globals_t *globals;
- odp_barrier_t *barrier;
-
- thr = odp_thread_id();
-
- printf("Thread %i starts on CPU %i\n", thr, odp_cpu_id());
-
- shm = odp_shm_lookup("test_globals");
- globals = odp_shm_addr(shm);
-
- if (globals == NULL) {
- ODPH_ERR("Shared mem lookup failed\n");
- return -1;
- }
-
- barrier = &globals->barrier;
-
- /*
- * Test barriers back-to-back
- */
- odp_barrier_wait(barrier);
- odp_barrier_wait(barrier);
- odp_barrier_wait(barrier);
- odp_barrier_wait(barrier);
- odp_barrier_wait(barrier);
-
- /* Select which thread is the first_thr */
- while (globals->first_thr < 0) {
- if (odp_spinlock_trylock(&globals->lock)) {
- globals->first_thr = thr;
- odp_spinlock_unlock(&globals->lock);
- }
- }
-
- odp_barrier_wait(barrier);
-
- if (test_alloc_single(thr, globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_alloc_multi(thr, globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_plain_queue(thr, globals))
- return -1;
-
- /* Low prio */
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_single("sched_____s_lo", thr,
- odp_schedule_min_prio(), globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_many("sched_____m_lo", thr,
- odp_schedule_min_prio(), globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_multi("sched_multi_lo", thr,
- odp_schedule_min_prio(), globals))
- return -1;
-
- /* High prio */
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_single("sched_____s_hi", thr,
- odp_schedule_max_prio(), globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_many("sched_____m_hi", thr,
- odp_schedule_max_prio(), globals))
- return -1;
-
- odp_barrier_wait(barrier);
-
- if (test_schedule_multi("sched_multi_hi", thr,
- odp_schedule_max_prio(), globals))
- return -1;
-
- printf("Thread %i exits\n", thr);
- fflush(NULL);
- return 0;
-}
-
-/**
- * @internal Test cycle counter frequency
- */
-static void test_cpu_freq(double test_sec)
-{
- odp_time_t cur_time, test_time, start_time, end_time;
- uint64_t c1, c2, cycles;
- uint64_t nsec;
- double diff_max_hz, max_cycles;
-
- printf("\nCPU cycle count frequency test (runs about %f sec)\n",
- test_sec);
-
- test_time = odp_time_local_from_ns(test_sec * ODP_TIME_SEC_IN_NS);
- start_time = odp_time_local();
- end_time = odp_time_sum(start_time, test_time);
-
- /* Start the measurement */
- c1 = odp_cpu_cycles();
-
- do {
- cur_time = odp_time_local();
- } while (odp_time_cmp(end_time, cur_time) > 0);
-
- c2 = odp_cpu_cycles();
-
- test_time = odp_time_diff(cur_time, start_time);
- nsec = odp_time_to_ns(test_time);
-
- cycles = odp_cpu_cycles_diff(c2, c1);
- max_cycles = (nsec * odp_cpu_hz_max()) / 1000000000.0;
-
- /* Compare measured CPU cycles to maximum theoretical CPU cycle count */
- diff_max_hz = ((double)(cycles) - max_cycles) / max_cycles;
-
- printf("odp_time %" PRIu64 " ns\n", nsec);
- printf("odp_cpu_cycles %" PRIu64 " CPU cycles\n", cycles);
- printf("odp_sys_cpu_hz %" PRIu64 " hz\n", odp_cpu_hz_max());
- printf("Diff from max CPU freq %f%%\n", diff_max_hz * 100.0);
-
- printf("\n");
-}
-
-/**
- * @internal Print help
- */
-static void print_usage(void)
-{
- printf("\n\nUsage: ./odp_example [options]\n");
- printf("Options:\n");
- printf(" -t, --time <number> test duration, default=%.1f\n", (double)TEST_SEC);
- printf(" -c, --count <number> CPU count, 0=all available, default=1\n");
- printf(" -h, --help this help\n");
- printf(" -f, --fair collect fairness statistics\n");
- printf("\n\n");
-}
-
-/**
- * @internal Parse arguments
- *
- * @param argc Argument count
- * @param argv Argument vector
- * @param args Test arguments
- */
-static void parse_args(int argc, char *argv[], test_args_t *args)
-{
- int opt;
- int long_index;
-
- static const struct option longopts[] = {
- {"time", required_argument, NULL, 't'},
- {"count", required_argument, NULL, 'c'},
- {"fair", no_argument, NULL, 'f'},
- {"help", no_argument, NULL, 'h'},
- {NULL, 0, NULL, 0}
- };
-
- static const char *shortopts = "+t:c:fh";
-
- args->cpu_count = 1; /* use one worker by default */
- args->test_sec = TEST_SEC;
-
- while (1) {
- opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
-
- if (opt == -1)
- break; /* No more options */
-
- switch (opt) {
- case 'f':
- args->fairness = 1;
- break;
-
- case 't':
- args->test_sec = atof(optarg);
- break;
-
- case 'c':
- args->cpu_count = atoi(optarg);
- break;
-
- case 'h':
- print_usage();
- exit(EXIT_SUCCESS);
- break;
-
- default:
- break;
- }
- }
-}
-
-/**
- * Test main function
- */
-int main(int argc, char *argv[])
-{
- odph_helper_options_t helper_options;
- odph_thread_t *thread_tbl;
- test_args_t args;
- int num_workers;
- odp_cpumask_t cpumask;
- odp_pool_t pool;
- odp_queue_t plain_queue;
- int i, j;
- odp_shm_t shm;
- test_globals_t *globals;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- odp_pool_param_t params;
- int ret = 0;
- odp_instance_t instance;
- odp_init_t init_param;
- odph_thread_common_param_t thr_common;
- odph_thread_param_t thr_param;
- odp_queue_capability_t capa;
- odp_pool_capability_t pool_capa;
- odp_schedule_config_t schedule_config;
- uint32_t num_queues, num_buf;
-
- printf("\nODP example starts\n\n");
-
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
- if (odph_options(&helper_options)) {
- ODPH_ERR("Error: reading ODP helper options failed.\n");
- exit(EXIT_FAILURE);
- }
-
- odp_init_param_init(&init_param);
- init_param.mem_model = helper_options.mem_model;
-
- memset(&args, 0, sizeof(args));
- parse_args(argc, argv, &args);
-
- /* ODP global init */
- if (odp_init_global(&instance, &init_param, NULL)) {
- ODPH_ERR("ODP global init failed.\n");
- return -1;
- }
-
- /*
- * Init this thread. It makes also ODP calls when
- * setting up resources for worker threads.
- */
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- ODPH_ERR("ODP global init failed.\n");
- return -1;
- }
-
- printf("\n");
- odp_sys_info_print();
-
- /* Get default worker cpumask */
- num_workers = odp_cpumask_default_worker(&cpumask, args.cpu_count);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
-
- printf("num worker threads: %i\n", num_workers);
- printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n", cpumaskstr);
-
- thread_tbl = calloc(num_workers, sizeof(odph_thread_t));
- if (!thread_tbl) {
- ODPH_ERR("no memory for thread_tbl\n");
- return -1;
- }
-
- /* Test cycle count frequency */
- test_cpu_freq(args.test_sec);
-
- shm = odp_shm_reserve("test_globals",
- sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
- if (shm == ODP_SHM_INVALID) {
- ODPH_ERR("Shared memory reserve failed.\n");
- return -1;
- }
-
- globals = odp_shm_addr(shm);
- memset(globals, 0, sizeof(test_globals_t));
- memcpy(&globals->args, &args, sizeof(test_args_t));
-
- /*
- * Create message pool
- */
- if (odp_pool_capability(&pool_capa)) {
- ODPH_ERR("Pool capabilities failed.\n");
- return -1;
- }
-
- num_buf = MAX_BUF;
- if (pool_capa.buf.max_num && pool_capa.buf.max_num < MAX_BUF)
- num_buf = pool_capa.buf.max_num;
-
- odp_pool_param_init(&params);
- params.buf.size = sizeof(test_message_t);
- params.buf.align = 0;
- params.buf.num = num_buf;
- params.type = ODP_POOL_BUFFER;
-
- pool = odp_pool_create("msg_pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- ODPH_ERR("Pool create failed.\n");
- return -1;
- }
-
- globals->pool = pool;
-
- if (odp_queue_capability(&capa)) {
- ODPH_ERR("Fetching queue capabilities failed.\n");
- return -1;
- }
-
- odp_schedule_config_init(&schedule_config);
- odp_schedule_config(&schedule_config);
-
- globals->queues_per_prio = QUEUES_PER_PRIO;
- num_queues = globals->queues_per_prio * NUM_PRIOS;
- if (schedule_config.num_queues &&
- num_queues > schedule_config.num_queues)
- globals->queues_per_prio = schedule_config.num_queues /
- NUM_PRIOS;
-
- /* One plain queue is also used */
- num_queues = (globals->queues_per_prio * NUM_PRIOS) + 1;
- if (num_queues > capa.max_queues)
- globals->queues_per_prio--;
-
- if (globals->queues_per_prio <= 0) {
- ODPH_ERR("Not enough queues. At least 1 plain and %d scheduled "
- "queues required.\n", NUM_PRIOS);
- return -1;
- }
-
- /*
- * Create a queue for plain queue test
- */
- plain_queue = odp_queue_create("plain_queue", NULL);
-
- if (plain_queue == ODP_QUEUE_INVALID) {
- ODPH_ERR("Plain queue create failed.\n");
- return -1;
- }
-
- /*
- * Create queues for schedule test.
- */
- for (i = 0; i < NUM_PRIOS; i++) {
- char name[] = "sched_XX_YY";
- odp_queue_t queue;
- odp_queue_param_t param;
- int prio;
-
- if (i == 0)
- prio = odp_schedule_max_prio();
- else
- prio = odp_schedule_min_prio();
-
- name[6] = '0' + (prio / 10);
- name[7] = '0' + prio - (10 * (prio / 10));
-
- odp_queue_param_init(&param);
- param.type = ODP_QUEUE_TYPE_SCHED;
- param.sched.prio = prio;
- param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- param.sched.group = ODP_SCHED_GROUP_ALL;
-
- for (j = 0; j < globals->queues_per_prio; j++) {
- name[9] = '0' + j / 10;
- name[10] = '0' + j - 10 * (j / 10);
-
- queue = odp_queue_create(name, &param);
-
- if (queue == ODP_QUEUE_INVALID) {
- ODPH_ERR("Schedule queue create failed.\n");
- return -1;
- }
-
- globals->queue[i][j] = queue;
-
- if (odp_queue_context_set(queue,
- &globals->queue_ctx[i][j],
- sizeof(queue_context_t))
- < 0) {
- ODPH_ERR("Queue context set failed.\n");
- return -1;
- }
- }
- }
-
- odp_shm_print_all();
-
- odp_pool_print(pool);
-
- /* Barrier to sync test case execution */
- odp_barrier_init(&globals->barrier, num_workers);
-
- odp_spinlock_init(&globals->lock);
- globals->first_thr = -1;
-
- /* Create and launch worker threads */
-
- odph_thread_common_param_init(&thr_common);
- thr_common.instance = instance;
- thr_common.cpumask = &cpumask;
- thr_common.share_param = 1;
-
- odph_thread_param_init(&thr_param);
- thr_param.thr_type = ODP_THREAD_WORKER;
- thr_param.start = run_thread;
- thr_param.arg = NULL;
-
- odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
-
- /* Wait for worker threads to terminate */
- odph_thread_join(thread_tbl, num_workers);
- free(thread_tbl);
-
- printf("ODP example complete\n\n");
-
- for (i = 0; i < NUM_PRIOS; i++) {
- odp_queue_t queue;
-
- for (j = 0; j < globals->queues_per_prio; j++) {
- queue = globals->queue[i][j];
- ret += odp_queue_destroy(queue);
- }
- }
-
- ret += odp_shm_free(shm);
- ret += odp_queue_destroy(plain_queue);
- ret += odp_pool_destroy(pool);
- ret += odp_term_local();
- ret += odp_term_global(instance);
-
- return ret;
-}
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh
deleted file mode 100755
index 4e004264e..000000000
--- a/test/performance/odp_scheduling_run.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2015-2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Script that passes command line arguments to odp_scheduling test when
-# launched by 'make check'
-
-TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-ALL=0
-
-run()
-{
- echo odp_scheduling_run starts requesting $1 worker threads
- echo ======================================================
-
- if [ $(nproc) -lt $1 ]; then
- echo "Not enough CPU cores. Skipping test."
- else
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1 -t 0.1
- RET_VAL=$?
- if [ $RET_VAL -ne 0 ]; then
- echo odp_scheduling FAILED
- exit $RET_VAL
- fi
- fi
-}
-
-run 1
-run 5
-run 8
-run 11
-run $ALL
-
-exit 0
diff --git a/test/performance/odp_stress.c b/test/performance/odp_stress.c
index 3ec01df33..1f768b353 100644
--- a/test/performance/odp_stress.c
+++ b/test/performance/odp_stress.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2024 Nokia
*/
/**
@@ -23,6 +21,11 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
+#define MODE_MEMCPY 0x1
+#define MODE_COPY_U32 0x2
+#define MODE_SQRT_U32 0x4
+#define MODE_SQRT_F32 0x8
+
typedef struct test_options_t {
uint32_t num_cpu;
uint64_t period_ns;
@@ -37,6 +40,7 @@ typedef struct test_stat_t {
uint64_t rounds;
uint64_t tot_nsec;
uint64_t work_nsec;
+ uint64_t dummy_sum;
} test_stat_t;
@@ -61,7 +65,7 @@ typedef struct test_global_t {
odp_timer_pool_t timer_pool;
odp_pool_t tmo_pool;
uint64_t period_ticks;
- uint8_t *worker_mem;
+ void *worker_mem;
odp_timer_t timer[ODP_THREAD_COUNT_MAX];
odp_queue_t tmo_queue[ODP_THREAD_COUNT_MAX];
odp_schedule_group_t group[ODP_THREAD_COUNT_MAX];
@@ -75,6 +79,35 @@ typedef struct test_global_t {
test_global_t *test_global;
+/* 250 random numbers: values between 100 and 20000 */
+static const uint32_t pseudo_rand[] = {
+ 14917, 9914, 5313, 4092, 16041, 7757, 17247, 14804, 3255, 7675,
+ 13149, 7288, 5665, 7095, 9594, 1296, 2058, 6013, 17779, 11788,
+ 14855, 760, 16891, 2483, 10937, 16385, 13593, 10674, 4080, 2392,
+ 12218, 11475, 6009, 5798, 7582, 8358, 4520, 14655, 10555, 6598,
+ 10598, 16097, 16634, 17102, 16296, 17142, 5748, 11079, 14569, 10961,
+ 16693, 17775, 19155, 14102, 16132, 19561, 8746, 4521, 8280, 355,
+ 10655, 14539, 5641, 2343, 19213, 9187, 570, 15096, 780, 1711,
+ 8007, 8128, 17416, 14123, 4713, 13774, 11450, 9031, 1194, 16531,
+ 9349, 3496, 19130, 19458, 12412, 9168, 9508, 10607, 5952, 19375,
+ 14934, 18276, 12116, 510, 14272, 10362, 4095, 6789, 1600, 18509,
+ 9274, 2815, 3175, 1122, 6495, 7991, 18831, 17550, 7056, 16185,
+ 18594, 19178, 10028, 1182, 13410, 16173, 3548, 8013, 6099, 2619,
+ 7359, 6889, 15227, 4910, 12341, 18904, 671, 5851, 9836, 18105,
+ 13624, 8138, 5751, 15590, 17415, 15330, 697, 11439, 7008, 10676,
+ 9863, 17163, 10885, 5581, 8078, 4689, 9870, 18370, 19323, 8831,
+ 11444, 3602, 10125, 6244, 13171, 19335, 15635, 19684, 17581, 9513,
+ 8444, 13724, 5243, 9987, 19886, 5087, 17292, 16294, 19627, 14985,
+ 1999, 9889, 1311, 5589, 10084, 911, 301, 2260, 15305, 8265,
+ 409, 1732, 1463, 17680, 15038, 2440, 4239, 9554, 14045, 924,
+ 13997, 3472, 18304, 4848, 10601, 18604, 6459, 19394, 2962, 11218,
+ 5405, 9869, 133, 2512, 13440, 4350, 625, 6580, 5082, 12908,
+ 11517, 8919, 354, 14216, 3190, 15515, 1277, 1028, 507, 9525,
+ 10115, 811, 1268, 17587, 5192, 7240, 17371, 4902, 19908, 1027,
+ 3475, 8658, 11782, 13701, 13034, 154, 4940, 12679, 14067, 2707,
+ 10180, 4669, 17756, 6602, 6727, 818, 8644, 580, 16988, 19127
+};
+
static void print_usage(void)
{
printf("\n"
@@ -83,9 +116,12 @@ static void print_usage(void)
" -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n"
" -p, --period_ns Timeout period in nsec. Default: 100 ms\n"
" -r, --rounds Number of timeout rounds. Default: 2\n"
- " -m, --mode Select test mode. Default: 1\n"
- " 0: No stress, just wait for timeouts\n"
- " 1: Memcpy\n"
+ " -m, --mode Test mode flags, multiple may be selected. Default: 0x1\n"
+ " 0: No stress, just wait for timeouts\n"
+ " 0x1: memcpy()\n"
+ " 0x2: Memory copy loop\n"
+ " 0x4: Integer square root\n"
+ " 0x8: Floating point square root\n"
" -s, --mem_size Memory size per worker in bytes. Default: 2048\n"
" -g, --group_mode Select schedule group mode: Default: 1\n"
" 0: Use GROUP_ALL group. Scheduler load balances timeout events.\n"
@@ -116,7 +152,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->num_cpu = 1;
test_options->period_ns = 100 * ODP_TIME_MSEC_IN_NS;
test_options->rounds = 2;
- test_options->mode = 1;
+ test_options->mode = MODE_MEMCPY;
test_options->mem_size = 2048;
test_options->group_mode = 1;
@@ -137,7 +173,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
test_options->rounds = atoll(optarg);
break;
case 'm':
- test_options->mode = atoi(optarg);
+ test_options->mode = strtoul(optarg, NULL, 0);
break;
case 's':
test_options->mem_size = atoll(optarg);
@@ -155,8 +191,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
}
if (test_options->mode) {
- if (test_options->mem_size < 2) {
- ODPH_ERR("Too small memory size\n");
+ if (test_options->mem_size < sizeof(uint32_t)) {
+ ODPH_ERR("Too small memory size. Minimum is %zu bytes.\n",
+ sizeof(uint32_t));
return -1;
}
}
@@ -218,20 +255,25 @@ static int worker_thread(void *arg)
odp_event_t ev;
odp_timeout_t tmo;
odp_timer_t timer;
- uint64_t tot_nsec, work_sum, max_nsec;
+ uint64_t tot_nsec, work_sum, max_nsec, i;
odp_timer_start_t start_param;
odp_time_t t1, t2, max_time;
odp_time_t work_t1, work_t2;
uint8_t *src = NULL, *dst = NULL;
+ uint32_t *src_u32 = NULL, *dst_u32 = NULL;
thread_arg_t *thread_arg = arg;
int worker_idx = thread_arg->worker_idx;
test_global_t *global = thread_arg->global;
test_options_t *test_options = &global->test_options;
- int mode = test_options->mode;
- int group_mode = test_options->group_mode;
- uint64_t mem_size = test_options->mem_size;
- uint64_t copy_size = mem_size / 2;
+ const int group_mode = test_options->group_mode;
+ const int mode = test_options->mode;
+ const int data_mode = mode & (MODE_SQRT_U32 | MODE_SQRT_F32);
+ const uint64_t mem_size = test_options->mem_size;
+ const uint64_t copy_size = mem_size / 2;
+ const uint64_t num_words = mem_size / sizeof(uint32_t);
+ const uint64_t copy_words = num_words / 2;
uint64_t rounds = 0;
+ uint64_t dummy_sum = 0;
int ret = 0;
uint32_t done = 0;
uint64_t wait = ODP_SCHED_WAIT;
@@ -255,8 +297,10 @@ static int worker_thread(void *arg)
}
if (mode) {
- src = global->worker_mem + worker_idx * mem_size;
+ src = (uint8_t *)global->worker_mem + worker_idx * mem_size;
dst = src + copy_size;
+ src_u32 = (uint32_t *)(uintptr_t)src;
+ dst_u32 = (uint32_t *)(uintptr_t)dst;
}
start_param.tick_type = ODP_TIMER_TICK_REL;
@@ -316,7 +360,22 @@ static int worker_thread(void *arg)
if (mode) {
work_t1 = odp_time_local();
- memcpy(dst, src, copy_size);
+ if (mode & MODE_MEMCPY)
+ memcpy(dst, src, copy_size);
+
+ if (mode & MODE_COPY_U32)
+ for (i = 0; i < copy_words; i++)
+ dst_u32[i] = src_u32[i];
+
+ if (data_mode) {
+ for (i = 0; i < num_words; i++) {
+ if (mode & MODE_SQRT_U32)
+ dummy_sum += odph_stress_sqrt_u32(src_u32[i]);
+
+ if (mode & MODE_SQRT_F32)
+ dummy_sum += odph_stress_sqrt_f32(src_u32[i]);
+ }
+ }
work_t2 = odp_time_local();
work_sum += odp_time_diff_ns(work_t2, work_t1);
@@ -336,6 +395,7 @@ static int worker_thread(void *arg)
global->stat[thr].rounds = rounds;
global->stat[thr].tot_nsec = tot_nsec;
global->stat[thr].work_nsec = work_sum;
+ global->stat[thr].dummy_sum = dummy_sum;
return ret;
}
@@ -656,8 +716,8 @@ static void print_stat(test_global_t *global)
test_stat_sum_t *sum = &global->stat_sum;
double sec_ave, work_ave, perc;
double round_ave = 0.0;
- double copy_ave = 0.0;
- double copy_tot = 0.0;
+ double rate_ave = 0.0;
+ double rate_tot = 0.0;
double cpu_load = 0.0;
const double mega = 1000000.0;
const double giga = 1000000000.0;
@@ -692,10 +752,16 @@ static void print_stat(test_global_t *global)
cpu_load = 100.0 * (work_ave / sec_ave);
if (mode) {
- uint64_t copy_bytes = sum->rounds * test_options->mem_size / 2;
+ uint64_t data_bytes;
+
+ if (mode == MODE_MEMCPY || mode == MODE_COPY_U32 ||
+ mode == (MODE_COPY_U32 | MODE_MEMCPY))
+ data_bytes = sum->rounds * test_options->mem_size / 2;
+ else
+ data_bytes = sum->rounds * test_options->mem_size;
- copy_ave = copy_bytes / (sum->work_nsec / giga);
- copy_tot = copy_ave * num_cpu;
+ rate_ave = data_bytes / (sum->work_nsec / giga);
+ rate_tot = rate_ave * num_cpu;
}
}
@@ -705,8 +771,8 @@ static void print_stat(test_global_t *global)
printf(" ave work: %.2f sec\n", work_ave);
printf(" ave CPU load: %.2f\n", cpu_load);
printf(" ave rounds per sec: %.2f\n", round_ave / sec_ave);
- printf(" ave copy speed: %.2f MB/sec\n", copy_ave / mega);
- printf(" total copy speed: %.2f MB/sec\n", copy_tot / mega);
+ printf(" ave data rate: %.2f MB/sec\n", rate_ave / mega);
+ printf(" total data rate: %.2f MB/sec\n", rate_tot / mega);
printf("\n");
}
@@ -798,6 +864,10 @@ int main(int argc, char **argv)
/* Memory for workers */
if (mode) {
+ uint64_t num_words;
+ uint32_t *word;
+ uint32_t num_rand = ODPH_ARRAY_SIZE(pseudo_rand);
+
mem_size = test_options->mem_size * num_cpu;
shm = odp_shm_reserve("Test memory", mem_size, ODP_CACHE_LINE_SIZE, 0);
@@ -813,13 +883,18 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
- memset(global->worker_mem, 0, mem_size);
+ num_words = mem_size / sizeof(uint32_t);
+ word = (uint32_t *)global->worker_mem;
+
+ for (uint64_t j = 0; j < num_words; j++)
+ word[j] = pseudo_rand[j % num_rand];
+
}
printf("\n");
printf("Test parameters\n");
printf(" num workers %u\n", num_cpu);
- printf(" mode %i\n", mode);
+ printf(" mode 0x%x\n", mode);
printf(" group mode %i\n", test_options->group_mode);
printf(" mem size per worker %" PRIu64 " bytes\n", test_options->mem_size);
diff --git a/test/performance/odp_timer_accuracy.c b/test/performance/odp_timer_accuracy.c
new file mode 100644
index 000000000..a663c894a
--- /dev/null
+++ b/test/performance/odp_timer_accuracy.c
@@ -0,0 +1,1438 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2019-2023 Nokia
+ */
+
+/**
+ * @example odp_timer_accuracy.c
+ *
+ * ODP timer accuracy test application
+ *
+ * @cond _ODP_HIDE_FROM_DOXYGEN_
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_QUEUES 1024
+#define MAX_FILENAME 128
+
+enum mode_e {
+ MODE_ONESHOT = 0,
+ MODE_RESTART_ABS,
+ MODE_RESTART_REL,
+ MODE_PERIODIC,
+};
+
+typedef struct test_opt_t {
+ int cpu_count;
+ unsigned long long period_ns;
+ long long res_ns;
+ unsigned long long res_hz;
+ unsigned long long offset_ns;
+ unsigned long long max_tmo_ns;
+ unsigned long long num;
+ unsigned long long num_warmup;
+ unsigned long long burst;
+ unsigned long long burst_gap;
+ odp_fract_u64_t freq;
+ unsigned long long max_multiplier;
+ unsigned long long multiplier;
+ enum mode_e mode;
+ int clk_src;
+ odp_queue_type_t queue_type;
+ int num_queue;
+ int groups;
+ int init;
+ int output;
+ int early_retry;
+ uint64_t warmup_timers;
+ uint64_t tot_timers;
+ uint64_t alloc_timers;
+ char filename[MAX_FILENAME];
+} test_opt_t;
+
+typedef struct timer_ctx_t {
+ odp_timer_t timer;
+ odp_event_t event;
+ uint64_t nsec;
+ uint64_t count;
+ uint64_t first_period;
+ int tmo_tick;
+ int64_t first_tmo_diff;
+ int64_t nsec_final;
+
+} timer_ctx_t;
+
+typedef struct {
+ uint64_t nsec_before_sum;
+ uint64_t nsec_before_min;
+ uint64_t nsec_before_min_idx;
+ uint64_t nsec_before_max;
+ uint64_t nsec_before_max_idx;
+
+ uint64_t nsec_after_sum;
+ uint64_t nsec_after_min;
+ uint64_t nsec_after_min_idx;
+ uint64_t nsec_after_max;
+ uint64_t nsec_after_max_idx;
+
+ uint64_t num_before;
+ uint64_t num_exact;
+ uint64_t num_after;
+
+ uint64_t num_too_near;
+
+} test_stat_t;
+
+typedef struct test_log_t {
+ uint64_t tmo_ns;
+ int64_t diff_ns;
+ int tid;
+
+} test_log_t;
+
+typedef struct test_global_t {
+ test_opt_t opt;
+
+ test_stat_t stat[MAX_WORKERS];
+
+ odp_queue_t queue[MAX_QUEUES];
+ odp_schedule_group_t group[MAX_WORKERS];
+ odp_timer_pool_t timer_pool;
+ odp_pool_t timeout_pool;
+ timer_ctx_t *timer_ctx;
+ double res_ns;
+ uint64_t start_tick;
+ uint64_t start_ns;
+ uint64_t period_tick;
+ double period_dbl;
+ odp_fract_u64_t base_freq;
+ test_log_t *log;
+ FILE *file;
+ odp_barrier_t barrier;
+ odp_atomic_u64_t events;
+ odp_atomic_u64_t last_events;
+
+} test_global_t;
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Timer accuracy test application.\n"
+ "\n"
+ "OPTIONS:\n"
+ " -c, --count <num> CPU count, 0=all available, default=1\n"
+ " -p, --period <nsec> Timeout period in nsec. Not used in periodic mode. Default: 200 msec\n"
+ " -r, --res_ns <nsec> Timeout resolution in nsec. Default value is 0. Special values:\n"
+ " 0: Use period / 10 as the resolution\n"
+ " -1: In periodic mode, use resolution from capabilities\n"
+ " -R, --res_hz <hertz> Timeout resolution in hertz. Set resolution either with -r (nsec) or -R (hertz),\n"
+ " and leave other to 0. Default: 0 (not used)\n"
+ " -f, --first <nsec> First timer offset in nsec. Default: 0 for periodic mode, otherwise 300 msec\n"
+ " -x, --max_tmo <nsec> Maximum timeout in nsec. Not used in periodic mode.\n"
+ " When 0, max tmo is calculated from other options. Default: 0\n"
+ " -n, --num <number> Number of timeout periods. Default: 50\n"
+ " -w, --warmup <number> Number of warmup periods. Default: 0\n"
+ " -b, --burst <number> Number of timers per a timeout period. Default: 1\n"
+ " -g, --burst_gap <nsec> Gap (in nsec) between timers within a burst. Default: 0\n"
+ " In periodic mode, first + burst * burst_gap must be less than period length.\n"
+ " -m, --mode <number> Test mode select (default: 0):\n"
+ " 0: One-shot. Start all timers at init phase.\n"
+ " 1: One-shot. Each period, restart timers with absolute time.\n"
+ " 2: One-shot. Each period, restart timers with relative time.\n"
+ " 3: Periodic.\n"
+ " -P, --periodic <freq_integer:freq_numer:freq_denom:max_multiplier>\n"
+ " Periodic timer pool parameters. Default: 5:0:0:1 (5 Hz)\n"
+ " -M, --multiplier Periodic timer multiplier. Default: 1\n"
+ " -o, --output <file> Output file for measurement logs\n"
+ " -e, --early_retry <num> When timer restart fails due to ODP_TIMER_TOO_NEAR, retry this many times\n"
+ " with expiration time incremented by the period. Default: 0\n"
+ " -s, --clk_src Clock source select (default 0):\n"
+ " 0: ODP_CLOCK_DEFAULT\n"
+ " 1: ODP_CLOCK_SRC_1, ...\n"
+ " -t, --queue_type Queue sync type. Default is 0 (PARALLEL).\n"
+ " 0: PARALLEL\n"
+ " 1: ATOMIC\n"
+ " 2: ORDERED\n"
+ " -q, --num_queue Number of queues. Default is 1.\n"
+ " -G, --sched_groups Use dedicated schedule group for each worker.\n"
+ " -i, --init Set global init parameters. Default: init params not set.\n"
+ " -h, --help Display help and exit.\n\n");
+}
+
+static int parse_options(int argc, char *argv[], test_opt_t *test_opt)
+{
+ int opt, long_index;
+ const struct option longopts[] = {
+ {"count", required_argument, NULL, 'c'},
+ {"period", required_argument, NULL, 'p'},
+ {"res_ns", required_argument, NULL, 'r'},
+ {"res_hz", required_argument, NULL, 'R'},
+ {"first", required_argument, NULL, 'f'},
+ {"max_tmo", required_argument, NULL, 'x'},
+ {"num", required_argument, NULL, 'n'},
+ {"warmup", required_argument, NULL, 'w'},
+ {"burst", required_argument, NULL, 'b'},
+ {"burst_gap", required_argument, NULL, 'g'},
+ {"mode", required_argument, NULL, 'm'},
+ {"periodic", required_argument, NULL, 'P'},
+ {"multiplier", required_argument, NULL, 'M'},
+ {"output", required_argument, NULL, 'o'},
+ {"early_retry", required_argument, NULL, 'e'},
+ {"clk_src", required_argument, NULL, 's'},
+ {"queue_type", required_argument, NULL, 't'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"sched_groups", no_argument, NULL, 'G'},
+ {"init", no_argument, NULL, 'i'},
+ {"help", no_argument, NULL, 'h'},
+ {NULL, 0, NULL, 0}
+ };
+ const char *shortopts = "+c:p:r:R:f:x:n:w:b:g:m:P:M:o:e:s:t:q:Gih";
+ int ret = 0;
+
+ memset(test_opt, 0, sizeof(*test_opt));
+
+ test_opt->cpu_count = 1;
+ test_opt->period_ns = 200 * ODP_TIME_MSEC_IN_NS;
+ test_opt->res_ns = 0;
+ test_opt->res_hz = 0;
+ test_opt->offset_ns = UINT64_MAX;
+ test_opt->max_tmo_ns = 0;
+ test_opt->num = 50;
+ test_opt->num_warmup = 0;
+ test_opt->burst = 1;
+ test_opt->burst_gap = 0;
+ test_opt->mode = MODE_ONESHOT;
+ test_opt->freq.integer = ODP_TIME_SEC_IN_NS / test_opt->period_ns;
+ test_opt->freq.numer = 0;
+ test_opt->freq.denom = 0;
+ test_opt->max_multiplier = 1;
+ test_opt->multiplier = 1;
+ test_opt->clk_src = ODP_CLOCK_DEFAULT;
+ test_opt->queue_type = ODP_SCHED_SYNC_PARALLEL;
+ test_opt->groups = 0;
+ test_opt->num_queue = 1;
+ test_opt->init = 0;
+ test_opt->output = 0;
+ test_opt->early_retry = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'c':
+ test_opt->cpu_count = atoi(optarg);
+ break;
+ case 'p':
+ test_opt->period_ns = strtoull(optarg, NULL, 0);
+ break;
+ case 'r':
+ test_opt->res_ns = strtoll(optarg, NULL, 0);
+ break;
+ case 'R':
+ test_opt->res_hz = strtoull(optarg, NULL, 0);
+ break;
+ case 'f':
+ test_opt->offset_ns = strtoull(optarg, NULL, 0);
+ break;
+ case 'x':
+ test_opt->max_tmo_ns = strtoull(optarg, NULL, 0);
+ break;
+ case 'n':
+ test_opt->num = strtoull(optarg, NULL, 0);
+ break;
+ case 'w':
+ test_opt->num_warmup = strtoull(optarg, NULL, 0);
+ break;
+ case 'b':
+ test_opt->burst = strtoull(optarg, NULL, 0);
+ break;
+ case 'g':
+ test_opt->burst_gap = strtoull(optarg, NULL, 0);
+ break;
+ case 'm':
+ test_opt->mode = atoi(optarg);
+ break;
+ case 'P':
+ sscanf(optarg, "%" SCNu64 ":%" SCNu64 ":%" SCNu64 ":%llu",
+ &test_opt->freq.integer, &test_opt->freq.numer,
+ &test_opt->freq.denom, &test_opt->max_multiplier);
+ break;
+ case 'M':
+ test_opt->multiplier = strtoull(optarg, NULL, 0);
+ break;
+ case 'o':
+ test_opt->output = 1;
+ if (strlen(optarg) >= MAX_FILENAME) {
+ printf("Filename too long\n");
+ return -1;
+ }
+ odph_strcpy(test_opt->filename, optarg, MAX_FILENAME);
+ break;
+ case 'e':
+ test_opt->early_retry = atoi(optarg);
+ break;
+ case 's':
+ test_opt->clk_src = atoi(optarg);
+ break;
+ case 't':
+ switch (atoi(optarg)) {
+ case 1:
+ test_opt->queue_type = ODP_SCHED_SYNC_ATOMIC;
+ break;
+ case 2:
+ test_opt->queue_type = ODP_SCHED_SYNC_ORDERED;
+ break;
+ default:
+ test_opt->queue_type = ODP_SCHED_SYNC_PARALLEL;
+ break;
+ }
+ break;
+ case 'q':
+ test_opt->num_queue = atoi(optarg);
+ break;
+ case 'G':
+ test_opt->groups = 1;
+ break;
+ case 'i':
+ test_opt->init = 1;
+ break;
+ case 'h':
+ print_usage();
+ ret = -1;
+ break;
+ default:
+ print_usage();
+ ret = -1;
+ break;
+ }
+ }
+
+ if (test_opt->mode == MODE_PERIODIC) {
+ if ((test_opt->freq.integer == 0 && test_opt->freq.numer == 0) ||
+ (test_opt->freq.numer != 0 && test_opt->freq.denom == 0)) {
+ printf("Bad frequency\n");
+ return -1;
+ }
+
+ test_opt->period_ns =
+ ODP_TIME_SEC_IN_NS / odp_fract_u64_to_dbl(&test_opt->freq);
+
+ if (test_opt->offset_ns == UINT64_MAX)
+ test_opt->offset_ns = 0;
+ } else {
+ if (test_opt->res_ns < 0) {
+ printf("Resolution (res_ns) must be >= 0 with single shot timer\n");
+ return -1;
+ }
+
+ if (test_opt->offset_ns == UINT64_MAX)
+ test_opt->offset_ns = 300 * ODP_TIME_MSEC_IN_NS;
+ }
+
+ test_opt->warmup_timers = test_opt->num_warmup * test_opt->burst;
+ test_opt->tot_timers =
+ test_opt->warmup_timers + test_opt->num * test_opt->burst;
+
+ if (test_opt->mode == MODE_ONESHOT)
+ test_opt->alloc_timers = test_opt->tot_timers;
+ else
+ test_opt->alloc_timers = test_opt->burst;
+
+ return ret;
+}
+
+static int single_shot_params(test_global_t *test_global, odp_timer_pool_param_t *timer_param,
+ odp_timer_capability_t *timer_capa)
+{
+ uint64_t res_ns, res_hz;
+ uint64_t max_res_ns, max_res_hz;
+ uint64_t period_ns = test_global->opt.period_ns;
+ uint64_t num_tmo = test_global->opt.num + test_global->opt.num_warmup;
+ uint64_t offset_ns = test_global->opt.offset_ns;
+ enum mode_e mode = test_global->opt.mode;
+
+ max_res_ns = timer_capa->max_res.res_ns;
+ max_res_hz = timer_capa->max_res.res_hz;
+
+ /* Default resolution */
+ if (test_global->opt.res_ns == 0 && test_global->opt.res_hz == 0) {
+ res_ns = test_global->opt.period_ns / 10;
+ res_hz = 0;
+ } else if (test_global->opt.res_ns) {
+ res_ns = test_global->opt.res_ns;
+ res_hz = 0;
+ } else {
+ res_ns = 0;
+ res_hz = test_global->opt.res_hz;
+ }
+
+ if (res_ns && res_ns < max_res_ns) {
+ printf("Resolution %" PRIu64 " nsec too high. Highest resolution %" PRIu64 " nsec. "
+ "Default resolution is period / 10.\n\n",
+ res_ns, max_res_ns);
+ return -1;
+ }
+
+ if (res_hz && res_hz > max_res_hz) {
+ printf("Resolution %" PRIu64 " hz too high. Highest resolution %" PRIu64 " hz. "
+ "Default resolution is period / 10.\n\n",
+ res_hz, max_res_hz);
+ return -1;
+ }
+
+ if (res_ns)
+ timer_param->res_ns = res_ns;
+ else
+ timer_param->res_hz = res_hz;
+
+ if (mode == MODE_ONESHOT) {
+ timer_param->min_tmo = offset_ns / 2;
+ timer_param->max_tmo = offset_ns + ((num_tmo + 1) * period_ns);
+ } else {
+ timer_param->min_tmo = period_ns / 10;
+ timer_param->max_tmo = offset_ns + (2 * period_ns);
+ }
+
+ if (test_global->opt.max_tmo_ns) {
+ if (test_global->opt.max_tmo_ns < timer_param->max_tmo) {
+ printf("Max tmo is too small. Must be at least %" PRIu64 " nsec.\n",
+ timer_param->max_tmo);
+ return -1;
+ }
+
+ timer_param->max_tmo = test_global->opt.max_tmo_ns;
+ }
+
+ printf(" period: %" PRIu64 " nsec\n", period_ns);
+ printf(" max res nsec: %" PRIu64 "\n", max_res_ns);
+ printf(" max res hertz: %" PRIu64 "\n", max_res_hz);
+
+ test_global->period_dbl = period_ns;
+
+ return 0;
+}
+
+static int periodic_params(test_global_t *test_global, odp_timer_pool_param_t *timer_param,
+ odp_timer_capability_t *timer_capa)
+{
+ int ret;
+ uint64_t res_ns;
+ odp_timer_periodic_capability_t capa;
+ double freq_dbl, min_freq, max_freq;
+ double opt_freq = odp_fract_u64_to_dbl(&test_global->opt.freq);
+ odp_fract_u64_t freq = test_global->opt.freq;
+ uint64_t res_hz = test_global->opt.res_hz;
+ uint64_t max_multiplier = test_global->opt.max_multiplier;
+ uint64_t multiplier = test_global->opt.multiplier;
+
+ if (res_hz) {
+ res_ns = ODP_TIME_SEC_IN_NS / res_hz;
+ } else {
+ res_ns = test_global->opt.res_ns;
+
+ /* Default resolution */
+ if (res_ns == 0)
+ res_ns = ODP_TIME_SEC_IN_NS / (10 * multiplier * opt_freq);
+ }
+
+ if (res_ns == 0) {
+ printf("Too high resolution\n");
+ return -1;
+ }
+
+ /* Resolution from capa */
+ if (test_global->opt.res_ns < 0)
+ res_ns = 0;
+
+ min_freq = odp_fract_u64_to_dbl(&timer_capa->periodic.min_base_freq_hz);
+ max_freq = odp_fract_u64_to_dbl(&timer_capa->periodic.max_base_freq_hz);
+
+ capa.base_freq_hz = freq;
+ capa.max_multiplier = max_multiplier;
+ capa.res_ns = res_ns;
+
+ ret = odp_timer_periodic_capability(test_global->opt.clk_src, &capa);
+
+ if (ret < 0) {
+ printf("Requested periodic timer capabilities are not supported.\n"
+ "Capabilities: min base freq %g Hz, max base freq %g Hz, "
+ "max res %" PRIu64 " Hz\n", min_freq, max_freq, timer_capa->max_res.res_hz);
+ return -1;
+ }
+
+ if (ret == 0) {
+ printf("Requested base frequency is not met. Using %.2f Hz instead of %.2f Hz.\n",
+ odp_fract_u64_to_dbl(&capa.base_freq_hz), opt_freq);
+
+ freq = capa.base_freq_hz;
+ }
+
+ if (res_ns == 0)
+ res_ns = capa.res_ns;
+
+ freq_dbl = odp_fract_u64_to_dbl(&freq);
+ test_global->base_freq = freq;
+ test_global->period_dbl = ODP_TIME_SEC_IN_NS / (multiplier * freq_dbl);
+
+ /* Min/max tmo are ignored, leave those to default values */
+ timer_param->timer_type = ODP_TIMER_TYPE_PERIODIC;
+ timer_param->periodic.base_freq_hz = freq;
+ timer_param->periodic.max_multiplier = max_multiplier;
+
+ if (res_hz)
+ timer_param->res_hz = res_hz;
+ else
+ timer_param->res_ns = res_ns;
+
+ printf(" min freq capa: %.2f hz\n", min_freq);
+ printf(" max freq capa: %.2f hz\n", max_freq);
+ printf(" freq option: %.2f hz\n", opt_freq);
+ printf(" freq: %.2f hz\n", freq_dbl);
+ printf(" freq integer: %" PRIu64 "\n", freq.integer);
+ printf(" freq numer: %" PRIu64 "\n", freq.numer);
+ printf(" freq denom: %" PRIu64 "\n", freq.denom);
+ printf(" max_multiplier: %" PRIu64 "\n", max_multiplier);
+ printf(" multiplier: %" PRIu64 "\n", multiplier);
+ printf(" timer freq: %.2f hz\n", multiplier * freq_dbl);
+ printf(" timer period: %.2f nsec\n", test_global->period_dbl);
+ printf(" resolution capa: %" PRIu64 " nsec\n", capa.res_ns);
+
+ return 0;
+}
+
+static int create_timers(test_global_t *test_global)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_timer_pool_t timer_pool;
+ odp_timer_pool_param_t timer_param;
+ odp_timer_capability_t timer_capa;
+ odp_timer_t timer;
+ odp_queue_t *queue;
+ odp_schedule_group_t *group;
+ odp_queue_param_t queue_param;
+ uint64_t offset_ns;
+ uint32_t max_timers;
+ odp_event_t event;
+ odp_timeout_t timeout;
+ uint64_t i, num_tmo, num_warmup, burst, burst_gap;
+ uint64_t tot_timers, alloc_timers;
+ enum mode_e mode;
+ odp_timer_clk_src_t clk_src;
+ int ret;
+
+ mode = test_global->opt.mode;
+ alloc_timers = test_global->opt.alloc_timers;
+ tot_timers = test_global->opt.tot_timers;
+ num_warmup = test_global->opt.num_warmup;
+ num_tmo = num_warmup + test_global->opt.num;
+ burst = test_global->opt.burst;
+ burst_gap = test_global->opt.burst_gap;
+ offset_ns = test_global->opt.offset_ns;
+ queue = test_global->queue;
+ group = test_global->group;
+
+ /* Always init globals for destroy calls */
+ test_global->timer_pool = ODP_TIMER_POOL_INVALID;
+ test_global->timeout_pool = ODP_POOL_INVALID;
+
+ for (i = 0; i < alloc_timers; i++) {
+ test_global->timer_ctx[i].timer = ODP_TIMER_INVALID;
+ test_global->timer_ctx[i].event = ODP_EVENT_INVALID;
+ }
+
+ if (test_global->opt.groups) {
+ /* Create groups */
+
+ odp_thrmask_t zero;
+
+ odp_thrmask_zero(&zero);
+
+ for (i = 0; i < (uint64_t)test_global->opt.cpu_count; i++) {
+ group[i] = odp_schedule_group_create(NULL, &zero);
+
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ printf("Group create failed.\n");
+ return -1;
+ }
+ }
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = test_global->opt.queue_type;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ for (i = 0; i < (uint64_t)test_global->opt.num_queue; i++) {
+ if (test_global->opt.groups)
+ queue_param.sched.group = group[i % test_global->opt.cpu_count];
+
+ queue[i] = odp_queue_create(NULL, &queue_param);
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ printf("Queue create failed.\n");
+ return -1;
+ }
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_TIMEOUT;
+ pool_param.tmo.num = alloc_timers;
+
+ pool = odp_pool_create("timeout pool", &pool_param);
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Timeout pool create failed.\n");
+ return -1;
+ }
+
+ test_global->timeout_pool = pool;
+ clk_src = test_global->opt.clk_src;
+
+ if (odp_timer_capability(clk_src, &timer_capa)) {
+ printf("Timer capa failed\n");
+ return -1;
+ }
+
+ max_timers = timer_capa.max_timers;
+
+ if (mode == MODE_PERIODIC) {
+ if (timer_capa.periodic.max_pools < 1) {
+ printf("Error: Periodic timers not supported.\n");
+ return -1;
+ }
+ max_timers = timer_capa.periodic.max_timers;
+ }
+
+ if (max_timers && test_global->opt.alloc_timers > max_timers) {
+ printf("Error: Too many timers: %" PRIu64 ".\n"
+ " Max timers: %u\n",
+ test_global->opt.alloc_timers, max_timers);
+ return -1;
+ }
+
+ printf("\nTest parameters:\n");
+ printf(" clock source: %i\n", clk_src);
+ printf(" max timers capa: %" PRIu32 "\n", max_timers);
+ printf(" mode: %i\n", mode);
+ printf(" queue type: %i\n", test_global->opt.queue_type);
+ printf(" num queue: %i\n", test_global->opt.num_queue);
+ printf(" sched groups: %s\n", test_global->opt.groups ? "yes" : "no");
+
+ odp_timer_pool_param_init(&timer_param);
+
+ if (mode == MODE_PERIODIC)
+ ret = periodic_params(test_global, &timer_param, &timer_capa);
+ else
+ ret = single_shot_params(test_global, &timer_param, &timer_capa);
+
+ if (ret)
+ return ret;
+
+ if (timer_param.res_hz) {
+ test_global->res_ns = 1000000000.0 / timer_param.res_hz;
+ printf(" resolution: %" PRIu64 " Hz\n", timer_param.res_hz);
+ } else {
+ test_global->res_ns = timer_param.res_ns;
+ printf(" resolution: %" PRIu64 " nsec\n", timer_param.res_ns);
+ }
+
+ timer_param.num_timers = alloc_timers;
+ timer_param.clk_src = clk_src;
+
+ printf(" restart retries: %i\n", test_global->opt.early_retry);
+ if (test_global->opt.output)
+ printf(" log file: %s\n", test_global->opt.filename);
+ printf(" start offset: %" PRIu64 " nsec\n", offset_ns);
+ printf(" min timeout: %" PRIu64 " nsec\n", timer_param.min_tmo);
+ printf(" max timeout: %" PRIu64 " nsec\n", timer_param.max_tmo);
+ printf(" num timeout: %" PRIu64 "\n", num_tmo);
+ printf(" num warmup: %" PRIu64 "\n", num_warmup);
+ printf(" burst size: %" PRIu64 "\n", burst);
+ printf(" burst gap: %" PRIu64 "\n", burst_gap);
+ printf(" total timers: %" PRIu64 "\n", tot_timers);
+ printf(" warmup timers: %" PRIu64 "\n", test_global->opt.warmup_timers);
+ printf(" alloc timers: %" PRIu64 "\n", alloc_timers);
+ printf(" warmup time: %.2f sec\n",
+ (offset_ns + (num_warmup * test_global->period_dbl)) / 1000000000.0);
+ printf(" test run time: %.2f sec\n\n",
+ (offset_ns + (num_tmo * test_global->period_dbl)) / 1000000000.0);
+
+ timer_pool = odp_timer_pool_create("timer_accuracy", &timer_param);
+
+ if (timer_pool == ODP_TIMER_POOL_INVALID) {
+ printf("Timer pool create failed\n");
+ return -1;
+ }
+
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
+ odp_timer_pool_print(timer_pool);
+
+ /* Spend some time so that current tick would not be zero */
+ odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS);
+
+ test_global->timer_pool = timer_pool;
+
+ for (i = 0; i < alloc_timers; i++) {
+ timer_ctx_t *ctx = &test_global->timer_ctx[i];
+
+ timer = odp_timer_alloc(timer_pool, queue[i % test_global->opt.num_queue], ctx);
+
+ if (timer == ODP_TIMER_INVALID) {
+ printf("Timer alloc failed.\n");
+ return -1;
+ }
+
+ ctx->timer = timer;
+
+ timeout = odp_timeout_alloc(pool);
+ if (timeout == ODP_TIMEOUT_INVALID) {
+ printf("Timeout alloc failed\n");
+ return -1;
+ }
+
+ ctx->event = odp_timeout_to_event(timeout);
+ }
+
+ /* Run scheduler few times to ensure that (software) timer is active */
+ for (i = 0; i < 1000; i++) {
+ event = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (event != ODP_EVENT_INVALID) {
+ printf("Spurious event received\n");
+ odp_event_free(event);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_timers(test_global_t *test_global)
+{
+ odp_timer_pool_t timer_pool;
+ uint64_t start_tick;
+ uint64_t period_ns, start_ns, nsec, offset_ns;
+ odp_time_t time;
+ uint64_t i, j, idx, num_tmo, num_warmup, burst, burst_gap;
+ enum mode_e mode;
+
+ mode = test_global->opt.mode;
+ num_warmup = test_global->opt.num_warmup;
+ num_tmo = num_warmup + test_global->opt.num;
+ burst = test_global->opt.burst;
+ burst_gap = test_global->opt.burst_gap;
+ period_ns = test_global->opt.period_ns;
+ offset_ns = test_global->opt.offset_ns;
+ timer_pool = test_global->timer_pool;
+ idx = 0;
+
+ /* Record test start time and tick. Memory barriers forbid compiler and out-of-order
+ * CPU to move samples apart. */
+ odp_mb_full();
+ start_tick = odp_timer_current_tick(timer_pool);
+ time = odp_time_global();
+ odp_mb_full();
+
+ start_ns = odp_time_to_ns(time);
+ test_global->start_tick = start_tick;
+ test_global->start_ns = start_ns;
+ test_global->period_tick = odp_timer_ns_to_tick(timer_pool, period_ns);
+
+ /* When mode is not one-shot, set only one burst of timers initially */
+ if (mode != MODE_ONESHOT)
+ num_tmo = 1;
+
+ for (i = 0; i < num_tmo; i++) {
+ odp_timer_retval_t retval;
+
+ for (j = 0; j < burst; j++) {
+ timer_ctx_t *ctx = &test_global->timer_ctx[idx];
+ odp_timer_start_t start_param;
+
+ if (mode == MODE_PERIODIC) {
+ odp_timer_periodic_start_t periodic_start;
+
+ nsec = offset_ns + (j * burst_gap);
+
+ /* By default, timer starts one period after current time. Round
+ * floating point to closest integer number. */
+ ctx->nsec = start_ns + test_global->period_dbl + 0.5;
+ if (nsec)
+ ctx->nsec = start_ns + nsec;
+
+ ctx->count = 0;
+ ctx->first_period = start_tick +
+ odp_timer_ns_to_tick(timer_pool,
+ test_global->period_dbl + 0.5);
+ periodic_start.freq_multiplier = test_global->opt.multiplier;
+ periodic_start.first_tick = 0;
+ if (nsec)
+ periodic_start.first_tick =
+ start_tick + odp_timer_ns_to_tick(timer_pool, nsec);
+ periodic_start.tmo_ev = ctx->event;
+ retval = odp_timer_periodic_start(ctx->timer, &periodic_start);
+ } else {
+ nsec = offset_ns + (i * period_ns) + (j * burst_gap);
+ ctx->nsec = start_ns + nsec;
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ start_param.tick =
+ start_tick + odp_timer_ns_to_tick(timer_pool, nsec);
+ start_param.tmo_ev = ctx->event;
+ retval = odp_timer_start(ctx->timer, &start_param);
+ }
+
+ if (retval != ODP_TIMER_SUCCESS) {
+ printf("Timer[%" PRIu64 "] set failed: %i\n", idx, retval);
+ return -1;
+ }
+
+ idx++;
+ }
+ }
+
+ printf("\nStarting timers took %" PRIu64 " nsec\n", odp_time_global_ns() - start_ns);
+
+ return 0;
+}
+
+static int destroy_timers(test_global_t *test_global)
+{
+ uint64_t i, alloc_timers;
+ odp_timer_t timer;
+ int ret = 0;
+
+ alloc_timers = test_global->opt.alloc_timers;
+
+ for (i = 0; i < alloc_timers; i++) {
+ timer = test_global->timer_ctx[i].timer;
+
+ if (timer == ODP_TIMER_INVALID)
+ break;
+
+ if (odp_timer_free(timer)) {
+ printf("Timer free failed: %" PRIu64 "\n", i);
+ ret = -1;
+ }
+ }
+
+ if (test_global->timer_pool != ODP_TIMER_POOL_INVALID)
+ odp_timer_pool_destroy(test_global->timer_pool);
+
+ if (test_global->timeout_pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(test_global->timeout_pool)) {
+ printf("Pool destroy failed.\n");
+ ret = -1;
+ }
+ }
+
+ for (i = 0; i < (uint64_t)test_global->opt.num_queue; i++) {
+ if (odp_queue_destroy(test_global->queue[i])) {
+ printf("Queue destroy failed.\n");
+ ret = -1;
+ }
+ }
+
+ if (test_global->opt.groups) {
+ for (i = 0; i < (uint64_t)test_global->opt.cpu_count; i++) {
+ if (odp_schedule_group_destroy(test_global->group[i])) {
+ printf("Group destroy failed.\n");
+ ret = -1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void print_nsec_error(const char *str, int64_t nsec, double res_ns,
+ int tid, int idx)
+{
+ printf(" %s: %12" PRIi64 " / %.3fx resolution",
+ str, nsec, (double)nsec / res_ns);
+ if (tid >= 0)
+ printf(", thread %d", tid);
+ if (idx >= 0)
+ printf(", event %d", idx);
+ printf("\n");
+}
+
+static void print_stat(test_global_t *test_global)
+{
+ test_stat_t test_stat;
+ test_stat_t *stat = &test_stat;
+ uint64_t tot_timers;
+ test_stat_t *s = test_global->stat;
+ test_log_t *log = test_global->log;
+ double res_ns = test_global->res_ns;
+ uint64_t ave_after = 0;
+ uint64_t ave_before = 0;
+ uint64_t nsec_before_min_tid = 0;
+ uint64_t nsec_before_max_tid = 0;
+ uint64_t nsec_after_min_tid = 0;
+ uint64_t nsec_after_max_tid = 0;
+
+ memset(stat, 0, sizeof(*stat));
+ stat->nsec_before_min = UINT64_MAX;
+ stat->nsec_after_min = UINT64_MAX;
+
+ for (int i = 1; i < test_global->opt.cpu_count + 1; i++) {
+ stat->nsec_before_sum += s[i].nsec_before_sum;
+ stat->nsec_after_sum += s[i].nsec_after_sum;
+ stat->num_before += s[i].num_before;
+ stat->num_exact += s[i].num_exact;
+ stat->num_after += s[i].num_after;
+ stat->num_too_near += s[i].num_too_near;
+
+ if (s[i].nsec_before_min < stat->nsec_before_min) {
+ stat->nsec_before_min = s[i].nsec_before_min;
+ stat->nsec_before_min_idx = s[i].nsec_before_min_idx;
+ nsec_before_min_tid = i;
+ }
+
+ if (s[i].nsec_after_min < stat->nsec_after_min) {
+ stat->nsec_after_min = s[i].nsec_after_min;
+ stat->nsec_after_min_idx = s[i].nsec_after_min_idx;
+ nsec_after_min_tid = i;
+ }
+
+ if (s[i].nsec_before_max > stat->nsec_before_max) {
+ stat->nsec_before_max = s[i].nsec_before_max;
+ stat->nsec_before_max_idx = s[i].nsec_before_max_idx;
+ nsec_before_max_tid = i;
+ }
+
+ if (s[i].nsec_after_max > stat->nsec_after_max) {
+ stat->nsec_after_max = s[i].nsec_after_max;
+ stat->nsec_after_max_idx = s[i].nsec_after_max_idx;
+ nsec_after_max_tid = i;
+ }
+ }
+
+ if (stat->num_after)
+ ave_after = stat->nsec_after_sum / stat->num_after;
+ else
+ stat->nsec_after_min = 0;
+
+ if (stat->num_before)
+ ave_before = stat->nsec_before_sum / stat->num_before;
+ else
+ stat->nsec_before_min = 0;
+
+ tot_timers = stat->num_before + stat->num_after + stat->num_exact;
+
+ if (log) {
+ FILE *file = test_global->file;
+
+ fprintf(file, " Timer thread tmo(ns) diff(ns)\n");
+
+ for (uint64_t i = 0; i < tot_timers; i++) {
+ fprintf(file, "%8" PRIu64 " %7u %12" PRIu64 " %10"
+ PRIi64 "\n", i, log[i].tid, log[i].tmo_ns, log[i].diff_ns);
+ }
+
+ fprintf(file, "\n");
+ }
+
+ printf("\nTest results:\n");
+ printf(" num after: %12" PRIu64 " / %.2f%%\n",
+ stat->num_after, 100.0 * stat->num_after / tot_timers);
+ printf(" num before: %12" PRIu64 " / %.2f%%\n",
+ stat->num_before, 100.0 * stat->num_before / tot_timers);
+ printf(" num exact: %12" PRIu64 " / %.2f%%\n",
+ stat->num_exact, 100.0 * stat->num_exact / tot_timers);
+ printf(" num retry: %12" PRIu64 " / %.2f%%\n",
+ stat->num_too_near, 100.0 * stat->num_too_near / tot_timers);
+ printf(" error after (nsec):\n");
+ print_nsec_error("min", stat->nsec_after_min, res_ns, nsec_after_min_tid,
+ stat->nsec_after_min_idx);
+ print_nsec_error("max", stat->nsec_after_max, res_ns, nsec_after_max_tid,
+ stat->nsec_after_max_idx);
+ print_nsec_error("ave", ave_after, res_ns, -1, -1);
+ printf(" error before (nsec):\n");
+ print_nsec_error("min", stat->nsec_before_min, res_ns, nsec_before_min_tid,
+ stat->nsec_before_min_idx);
+ print_nsec_error("max", stat->nsec_before_max, res_ns, nsec_before_max_tid,
+ stat->nsec_before_max_idx);
+ print_nsec_error("ave", ave_before, res_ns, -1, -1);
+
+ if (test_global->opt.mode == MODE_PERIODIC && !test_global->opt.offset_ns) {
+ int idx = 0;
+ int64_t max = 0;
+
+ for (int i = 0; i < (int)test_global->opt.alloc_timers; i++) {
+ timer_ctx_t *t = &test_global->timer_ctx[i];
+ int64_t v = t->first_tmo_diff;
+
+ if (ODPH_ABS(v) > ODPH_ABS(max)) {
+ max = v;
+ idx = i;
+ }
+ }
+
+ printf(" first timeout difference to one period, based on %s (nsec):\n",
+ test_global->timer_ctx[idx].tmo_tick ? "timeout tick" : "time");
+ print_nsec_error("max", max, res_ns, -1, -1);
+ }
+
+ int64_t max = 0;
+
+ for (int i = 0; i < (int)test_global->opt.alloc_timers; i++) {
+ timer_ctx_t *t = &test_global->timer_ctx[i];
+ int64_t v = t->nsec_final;
+
+ if (ODPH_ABS(v) > ODPH_ABS(max))
+ max = v;
+ }
+
+ printf(" final timeout error (nsec):\n");
+ print_nsec_error("max", max, res_ns, -1, -1);
+
+ printf("\n");
+}
+
+static void cancel_periodic_timers(test_global_t *test_global)
+{
+ uint64_t i, alloc_timers;
+ odp_timer_t timer;
+
+ alloc_timers = test_global->opt.alloc_timers;
+
+ for (i = 0; i < alloc_timers; i++) {
+ timer = test_global->timer_ctx[i].timer;
+
+ if (timer == ODP_TIMER_INVALID)
+ break;
+
+ if (odp_timer_periodic_cancel(timer))
+ printf("Failed to cancel periodic timer.\n");
+ }
+}
+
+static int run_test(void *arg)
+{
+ test_global_t *test_global = (test_global_t *)arg;
+ odp_event_t ev;
+ odp_time_t time;
+ uint64_t time_ns, diff_ns;
+ odp_timeout_t tmo;
+ uint64_t tmo_ns;
+ timer_ctx_t *ctx;
+ odp_thrmask_t mask;
+ uint64_t wait = odp_schedule_wait_time(10 * ODP_TIME_MSEC_IN_NS);
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ test_log_t *log = test_global->log;
+ enum mode_e mode = test_global->opt.mode;
+ uint64_t tot_timers = test_global->opt.tot_timers;
+ double period_dbl = test_global->period_dbl;
+ odp_timer_pool_t tp = test_global->timer_pool;
+ int tid = odp_thread_id();
+
+ if (tid > test_global->opt.cpu_count) {
+ printf("Error: tid %d is larger than cpu_count %d.\n", tid,
+ test_global->opt.cpu_count);
+ return 0;
+ }
+
+ test_stat_t *stat = &test_global->stat[tid];
+
+ memset(stat, 0, sizeof(*stat));
+ stat->nsec_before_min = UINT64_MAX;
+ stat->nsec_after_min = UINT64_MAX;
+
+ if (test_global->opt.groups) {
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, tid);
+ group = test_global->group[tid - 1];
+
+ if (odp_schedule_group_join(group, &mask)) {
+ printf("odp_schedule_group_join() failed\n");
+ return 0;
+ }
+ }
+
+ odp_barrier_wait(&test_global->barrier);
+
+ while (1) {
+ ev = odp_schedule(NULL, wait);
+ time = odp_time_global_strict();
+
+ if (ev == ODP_EVENT_INVALID) {
+ if (mode == MODE_PERIODIC) {
+ if (odp_atomic_load_u64(&test_global->last_events) >=
+ test_global->opt.alloc_timers)
+ break;
+
+ } else if (odp_atomic_load_u64(&test_global->events) >= tot_timers) {
+ break;
+ }
+
+ continue;
+ }
+
+ time_ns = odp_time_to_ns(time);
+ tmo = odp_timeout_from_event(ev);
+ ctx = odp_timeout_user_ptr(tmo);
+ tmo_ns = ctx->nsec;
+
+ if (mode == MODE_PERIODIC) {
+ if (!ctx->count && !test_global->opt.offset_ns) {
+ /*
+ * If first_tick is zero, the API allows the implementation to
+ * place the timer where it can, so we have to adjust our
+ * expectation of the timeout time.
+ */
+
+ uint64_t tmo_tick = odp_timeout_tick(tmo);
+
+ if (tmo_tick) {
+ /*
+ * Adjust by the difference between one period after start
+ * time and the timeout tick.
+ */
+ ctx->tmo_tick = 1;
+ ctx->first_tmo_diff =
+ (int64_t)odp_timer_tick_to_ns(tp, tmo_tick) -
+ (int64_t)odp_timer_tick_to_ns(tp, ctx->first_period);
+ tmo_ns += ctx->first_tmo_diff;
+ } else {
+ /*
+ * Timeout tick is not provided, so the best we can do is
+ * to just take the current time as a baseline.
+ */
+ ctx->first_tmo_diff = (int64_t)time_ns - (int64_t)tmo_ns;
+ tmo_ns = ctx->nsec = time_ns;
+ }
+
+ ctx->nsec = tmo_ns;
+ }
+
+ /* round to closest integer number */
+ tmo_ns += ctx->count * period_dbl + 0.5;
+ ctx->count++;
+ }
+
+ uint64_t events = odp_atomic_fetch_inc_u64(&test_global->events);
+
+ if (events >= test_global->opt.warmup_timers && events < tot_timers) {
+ uint64_t i = events - test_global->opt.warmup_timers;
+
+ ctx->nsec_final = (int64_t)time_ns - (int64_t)tmo_ns;
+
+ if (log) {
+ log[i].tmo_ns = tmo_ns;
+ log[i].tid = tid;
+ }
+
+ if (time_ns > tmo_ns) {
+ diff_ns = time_ns - tmo_ns;
+ stat->num_after++;
+ stat->nsec_after_sum += diff_ns;
+ if (diff_ns < stat->nsec_after_min) {
+ stat->nsec_after_min = diff_ns;
+ stat->nsec_after_min_idx = i;
+ }
+ if (diff_ns > stat->nsec_after_max) {
+ stat->nsec_after_max = diff_ns;
+ stat->nsec_after_max_idx = i;
+ }
+ if (log)
+ log[i].diff_ns = diff_ns;
+
+ } else if (time_ns < tmo_ns) {
+ diff_ns = tmo_ns - time_ns;
+ stat->num_before++;
+ stat->nsec_before_sum += diff_ns;
+ if (diff_ns < stat->nsec_before_min) {
+ stat->nsec_before_min = diff_ns;
+ stat->nsec_before_min_idx = i;
+ }
+ if (diff_ns > stat->nsec_before_max) {
+ stat->nsec_before_max = diff_ns;
+ stat->nsec_before_max_idx = i;
+ }
+ if (log)
+ log[i].diff_ns = -diff_ns;
+ } else {
+ stat->num_exact++;
+ }
+ }
+
+ if ((mode == MODE_RESTART_ABS || mode == MODE_RESTART_REL) &&
+ events < tot_timers - 1) {
+ /* Reset timer for next period */
+ odp_timer_t tim;
+ uint64_t nsec, tick;
+ odp_timer_retval_t ret;
+ unsigned int j;
+ unsigned int retries = test_global->opt.early_retry;
+ uint64_t start_ns = test_global->start_ns;
+ uint64_t period_ns = test_global->opt.period_ns;
+ odp_timer_start_t start_param;
+
+ tim = ctx->timer;
+
+ /* Depending on the option, retry when expiration
+ * time is too early */
+ for (j = 0; j < retries + 1; j++) {
+ if (mode == MODE_RESTART_ABS) {
+ /* Absolute time */
+ ctx->nsec += period_ns;
+ nsec = ctx->nsec - start_ns;
+ tick = test_global->start_tick +
+ odp_timer_ns_to_tick(tp, nsec);
+ start_param.tick_type = ODP_TIMER_TICK_ABS;
+ } else {
+ /* Relative time */
+ tick = test_global->period_tick;
+ time = odp_time_local();
+ time_ns = odp_time_to_ns(time);
+ ctx->nsec = time_ns + period_ns;
+ start_param.tick_type = ODP_TIMER_TICK_REL;
+ }
+
+ start_param.tmo_ev = ev;
+ start_param.tick = tick;
+
+ ret = odp_timer_start(tim, &start_param);
+ if (ret == ODP_TIMER_TOO_NEAR) {
+ if (events >= test_global->opt.warmup_timers)
+ stat->num_too_near++;
+ } else {
+ break;
+ }
+ }
+
+ if (ret != ODP_TIMER_SUCCESS) {
+ printf("Timer set failed: %i. Timeout nsec "
+ "%" PRIu64 "\n", ret, ctx->nsec);
+ return 0;
+ }
+ } else if (mode == MODE_PERIODIC) {
+ int ret = odp_timer_periodic_ack(ctx->timer, ev);
+
+ if (ret < 0)
+ printf("Failed to ack a periodic timer.\n");
+
+ if (ret == 2)
+ odp_atomic_inc_u64(&test_global->last_events);
+
+ if (ret == 2 || ret < 0)
+ odp_event_free(ev);
+ } else {
+ odp_event_free(ev);
+ }
+ }
+
+ if (test_global->opt.groups) {
+ if (odp_schedule_group_leave(group, &mask))
+ printf("odp_schedule_group_leave() failed\n");
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ odp_instance_t instance;
+ odp_init_t init;
+ test_opt_t test_opt;
+ test_global_t *test_global;
+ odph_helper_options_t helper_options;
+ odp_init_t *init_ptr = NULL;
+ int ret = 0;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (parse_options(argc, argv, &test_opt))
+ return -1;
+
+ /* List features not to be used (may optimize performance) */
+ odp_init_param_init(&init);
+ init.not_used.feat.cls = 1;
+ init.not_used.feat.compress = 1;
+ init.not_used.feat.crypto = 1;
+ init.not_used.feat.ipsec = 1;
+ init.not_used.feat.tm = 1;
+
+ init.mem_model = helper_options.mem_model;
+
+ if (test_opt.init)
+ init_ptr = &init;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, init_ptr, NULL)) {
+ printf("Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Local init failed.\n");
+ return -1;
+ }
+
+ odp_sys_info_print();
+
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
+ odp_shm_t shm = ODP_SHM_INVALID, shm_ctx = ODP_SHM_INVALID, shm_log = ODP_SHM_INVALID;
+ uint64_t size = sizeof(test_global_t);
+
+ shm = odp_shm_reserve("timer_accuracy", size,
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SINGLE_VA);
+
+ if (shm == ODP_SHM_INVALID) {
+ printf("Shm alloc failed.\n");
+ return -1;
+ }
+
+ test_global = odp_shm_addr(shm);
+ memset(test_global, 0, size);
+ memcpy(&test_global->opt, &test_opt, sizeof(test_opt_t));
+
+ size = test_global->opt.alloc_timers * sizeof(timer_ctx_t);
+ shm_ctx = odp_shm_reserve("timer_accuracy_ctx", size,
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SINGLE_VA);
+
+ if (shm_ctx == ODP_SHM_INVALID) {
+ printf("Timer context alloc failed.\n");
+ ret = -1;
+ goto quit;
+ }
+
+ test_global->timer_ctx = odp_shm_addr(shm_ctx);
+ memset(test_global->timer_ctx, 0, size);
+
+ if (test_global->opt.output) {
+ test_global->file = fopen(test_global->opt.filename, "w");
+ if (test_global->file == NULL) {
+ printf("Failed to open output file %s: %s\n",
+ test_global->opt.filename, strerror(errno));
+ ret = -1;
+ goto quit;
+ }
+
+ size = (test_global->opt.tot_timers - test_global->opt.warmup_timers) *
+ sizeof(test_log_t);
+ shm_log = odp_shm_reserve("timer_accuracy_log", size, sizeof(test_log_t),
+ ODP_SHM_SINGLE_VA);
+
+ if (shm_log == ODP_SHM_INVALID) {
+ printf("Test log alloc failed.\n");
+ ret = -1;
+ goto quit;
+ }
+
+ test_global->log = odp_shm_addr(shm_log);
+ memset(test_global->log, 0, size);
+ }
+
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ int num_workers;
+ odp_cpumask_t cpumask;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
+ num_workers = MAX_WORKERS;
+ if (test_global->opt.cpu_count && test_global->opt.cpu_count < MAX_WORKERS)
+ num_workers = test_global->opt.cpu_count;
+ num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ test_global->opt.cpu_count = num_workers;
+ odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+
+ printf("num worker threads: %i\n", num_workers);
+ printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n", cpumaskstr);
+
+ ret = create_timers(test_global);
+ if (ret)
+ goto quit;
+
+ odp_barrier_init(&test_global->barrier, num_workers + 1);
+ odp_atomic_init_u64(&test_global->events, 0);
+ odp_atomic_init_u64(&test_global->last_events, 0);
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_test;
+ thr_param.arg = (void *)test_global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
+ odp_barrier_wait(&test_global->barrier);
+
+ ret = start_timers(test_global);
+ if (ret)
+ goto quit;
+
+ if (test_global->opt.mode == MODE_PERIODIC) {
+ while (odp_atomic_load_u64(&test_global->events) < test_global->opt.tot_timers)
+ odp_time_wait_ns(10 * ODP_TIME_MSEC_IN_NS);
+
+ cancel_periodic_timers(test_global);
+ }
+
+ odph_thread_join(thread_tbl, num_workers);
+ print_stat(test_global);
+
+quit:
+ if (test_global->file)
+ fclose(test_global->file);
+
+ if (destroy_timers(test_global))
+ ret = -1;
+
+ if (shm_log != ODP_SHM_INVALID && odp_shm_free(shm_log))
+ ret = -1;
+
+ if (shm_ctx != ODP_SHM_INVALID && odp_shm_free(shm_ctx))
+ ret = -1;
+
+ if (odp_shm_free(shm))
+ ret = -1;
+
+ if (odp_term_local()) {
+ printf("Term local failed.\n");
+ ret = -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Term global failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
diff --git a/test/performance/odp_timer_accuracy_run.sh b/test/performance/odp_timer_accuracy_run.sh
new file mode 100755
index 000000000..84ad2a573
--- /dev/null
+++ b/test/performance/odp_timer_accuracy_run.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2022-2024 Nokia
+#
+
+TEST_DIR="${TEST_DIR:-$(dirname $0)}"
+
+$TEST_DIR/odp_timer_accuracy${EXEEXT} -p 100000000 -n 10
+
+RET_VAL=$?
+if [ $RET_VAL -ne 0 ] ; then
+ echo odp_timer_accuracy FAILED
+ exit $RET_VAL
+fi
+
+exit 0
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
index 918267a1b..6da5f2296 100644
--- a/test/performance/odp_timer_perf.c
+++ b/test/performance/odp_timer_perf.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2019-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2019-2023 Nokia
*/
/**
diff --git a/test/performance/odp_timer_perf_run.sh b/test/performance/odp_timer_perf_run.sh
index 7738ca91b..aa8890e8e 100755
--- a/test/performance/odp_timer_perf_run.sh
+++ b/test/performance/odp_timer_perf_run.sh
@@ -1,9 +1,7 @@
#!/bin/sh
#
-# Copyright (c) 2020, Nokia
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2020 Nokia
#
TEST_DIR="${TEST_DIR:-$(dirname $0)}"
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
index 5a3c0216b..5a846b0c6 100644
--- a/test/validation/api/Makefile.am
+++ b/test/validation/api/Makefile.am
@@ -6,6 +6,7 @@ ODP_MODULES = align \
chksum \
classification \
comp \
+ cpu \
cpumask \
crypto \
dma \
@@ -46,6 +47,7 @@ TESTS = \
chksum/chksum_main$(EXEEXT) \
classification/classification_main$(EXEEXT) \
comp/comp_main$(EXEEXT) \
+ cpu/cpu_main$(EXEEXT) \
cpumask/cpumask_main$(EXEEXT) \
crypto/crypto_main$(EXEEXT) \
dma/dma_main$(EXEEXT) \
diff --git a/test/validation/api/README b/test/validation/api/README
index 7ee903478..665bb7896 100644
--- a/test/validation/api/README
+++ b/test/validation/api/README
@@ -1,8 +1,5 @@
-Copyright (c) 2015-2018, Linaro Limited
-All rights reserved.
-
-SPDX-License-Identifier: BSD-3-Clause
-
+SPDX-License-Identifier: BSD-3-Clause
+Copyright (c) 2015-2018 Linaro Limited
To add tests in here, please observe the rules listed below. This list
is a brief overview, for a more detailed explanation of the test
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index fab982462..8ae541fe4 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
* Copyright (c) 2021-2022 Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
*/
#include <malloc.h>
@@ -55,7 +53,7 @@ static void thread_init(void)
global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
global_mem = odp_shm_addr(global_shm);
- CU_ASSERT_PTR_NOT_NULL(global_mem);
+ CU_ASSERT(global_mem != NULL);
}
static void test_atomic_inc_32(void)
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index 7dc9a44c6..aaf646e8a 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2022 Nokia
*/
#include <malloc.h>
@@ -106,7 +104,7 @@ static per_thread_mem_t *thread_init(void)
global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
global_mem = odp_shm_addr(global_shm);
- CU_ASSERT_PTR_NOT_NULL(global_mem);
+ CU_ASSERT(global_mem != NULL);
per_thread_mem->global_mem = global_mem;
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index 2a79ed27e..89f16d283 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -1,16 +1,15 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
- * Copyright (c) 2022, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
+ * Copyright (c) 2022 Marvell
*/
#include <odp_api.h>
-#include <odp/helper/odph_debug.h>
+#include <odp/helper/odph_api.h>
#include "odp_cunit_common.h"
#define BUF_ALIGN ODP_CACHE_LINE_SIZE
+#define BUF_MAX_SIZE 65536
#define BUF_SIZE 1500
#define BUF_NUM 100
#define BURST 8
@@ -61,6 +60,7 @@ static void test_pool_alloc_free(const odp_pool_param_t *param)
uint32_t num_buf = 0;
void *addr;
odp_event_subtype_t subtype;
+ const uint32_t max_size = pool_capa.buf.max_size;
uint32_t num = param->buf.num;
uint32_t size = param->buf.size;
uint32_t align = param->buf.align;
@@ -104,7 +104,8 @@ static void test_pool_alloc_free(const odp_pool_param_t *param)
wrong_type = true;
if (subtype != ODP_EVENT_NO_SUBTYPE)
wrong_subtype = true;
- if (odp_buffer_size(buffer[i]) < size)
+ if (odp_buffer_size(buffer[i]) < size ||
+ (max_size && odp_buffer_size(buffer[i]) > max_size))
wrong_size = true;
addr = odp_buffer_addr(buffer[i]);
@@ -142,6 +143,7 @@ static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
odp_event_t ev;
void *addr;
odp_event_subtype_t subtype;
+ const uint32_t max_size = pool_capa.buf.max_size;
uint32_t num = param->buf.num;
uint32_t size = param->buf.size;
uint32_t align = param->buf.align;
@@ -193,7 +195,8 @@ static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
wrong_type = true;
if (subtype != ODP_EVENT_NO_SUBTYPE)
wrong_subtype = true;
- if (odp_buffer_size(buffer[i]) < size)
+ if (odp_buffer_size(buffer[i]) < size ||
+ (max_size && odp_buffer_size(buffer[i]) > max_size))
wrong_size = true;
addr = odp_buffer_addr(buffer[i]);
@@ -389,6 +392,16 @@ static void buffer_test_pool_alloc_free(void)
test_pool_alloc_free(&default_param);
}
+static void buffer_test_pool_alloc_free_max_size(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.size = pool_capa.buf.max_size ? pool_capa.buf.max_size : BUF_MAX_SIZE;
+
+ test_pool_alloc_free(&param);
+}
+
static void buffer_test_pool_alloc_free_min_cache(void)
{
odp_pool_param_t param;
@@ -412,6 +425,16 @@ static void buffer_test_pool_alloc_free_multi(void)
test_pool_alloc_free_multi(&default_param);
}
+static void buffer_test_pool_alloc_free_multi_max_size(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.size = pool_capa.buf.max_size ? pool_capa.buf.max_size : BUF_MAX_SIZE;
+
+ test_pool_alloc_free_multi(&param);
+}
+
static void buffer_test_pool_alloc_free_multi_min_cache(void)
{
odp_pool_param_t param;
@@ -552,6 +575,7 @@ static void buffer_test_user_area(void)
CU_ASSERT(prev != addr);
ev = odp_buffer_to_event(buffer[i]);
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == addr);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
CU_ASSERT(flag < 0);
@@ -570,9 +594,11 @@ static void buffer_test_user_area(void)
odp_testinfo_t buffer_suite[] = {
ODP_TEST_INFO(buffer_test_pool_alloc_free),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_max_size),
ODP_TEST_INFO(buffer_test_pool_alloc_free_min_cache),
ODP_TEST_INFO(buffer_test_pool_alloc_free_max_cache),
ODP_TEST_INFO(buffer_test_pool_alloc_free_multi),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_max_size),
ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_min_cache),
ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_max_cache),
ODP_TEST_INFO(buffer_test_pool_single_pool),
diff --git a/test/validation/api/chksum/chksum.c b/test/validation/api/chksum/chksum.c
index 0be418f3a..17f8fed12 100644
--- a/test/validation/api/chksum/chksum.c
+++ b/test/validation/api/chksum/chksum.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
*/
#include <odp_api.h>
diff --git a/test/validation/api/classification/classification.c b/test/validation/api/classification/classification.c
index ef9a647cb..ef975c237 100644
--- a/test/validation/api/classification/classification.c
+++ b/test/validation/api/classification/classification.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#include <odp_api.h>
diff --git a/test/validation/api/classification/classification.h b/test/validation/api/classification/classification.h
index 70dcc6230..1f66b832a 100644
--- a/test/validation/api/classification/classification.h
+++ b/test/validation/api/classification/classification.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#ifndef _ODP_TEST_CLASSIFICATION_H_
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
index ca0b58ad5..b5ccdcfea 100644
--- a/test/validation/api/classification/odp_classification_basic.c
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
#include <odp_cunit_common.h>
@@ -22,14 +20,14 @@ static void test_defaults(uint8_t fill)
CU_ASSERT(cos_param.action == ODP_COS_ACTION_ENQUEUE);
CU_ASSERT(cos_param.num_queue == 1);
- CU_ASSERT_EQUAL(cos_param.stats_enable, false);
- CU_ASSERT_EQUAL(cos_param.red.enable, false);
- CU_ASSERT_EQUAL(cos_param.bp.enable, false);
- CU_ASSERT_EQUAL(cos_param.vector.enable, false);
+ CU_ASSERT(cos_param.stats_enable == false);
+ CU_ASSERT(cos_param.red.enable == false);
+ CU_ASSERT(cos_param.bp.enable == false);
+ CU_ASSERT(cos_param.vector.enable == false);
memset(&pmr_param, fill, sizeof(pmr_param));
odp_cls_pmr_param_init(&pmr_param);
- CU_ASSERT_EQUAL(pmr_param.range_term, false);
+ CU_ASSERT(pmr_param.range_term == false);
}
static void cls_default_values(void)
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index 1fb4c51b5..b767a7582 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2020 Nokia
*/
#include "odp_classification_testsuites.h"
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index 7db0e1b5e..04cf098e3 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2019-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2019-2023 Nokia
*/
#include "odp_classification_testsuites.h"
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index d81884006..086b712ad 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2020-2023 Nokia
*/
#include "odp_classification_testsuites.h"
@@ -226,7 +224,7 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv)
uint16_t val;
uint16_t maskport;
- char cosname[ODP_QUEUE_NAME_LEN];
+ char cosname[ODP_COS_NAME_LEN];
odp_queue_param_t qparam;
odp_cls_cos_param_t cls_param;
char queuename[ODP_QUEUE_NAME_LEN];
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 888613b1f..34f93ee8d 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#ifndef ODP_CLASSIFICATION_TESTSUITES_H_
diff --git a/test/validation/api/comp/comp.c b/test/validation/api/comp/comp.c
index b7dfcd359..7078453df 100644
--- a/test/validation/api/comp/comp.c
+++ b/test/validation/api/comp/comp.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
*/
#include <odp_api.h>
diff --git a/test/validation/api/comp/test_vectors.h b/test/validation/api/comp/test_vectors.h
index 36d98b30d..c99041c9a 100644
--- a/test/validation/api/comp/test_vectors.h
+++ b/test/validation/api/comp/test_vectors.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Linaro Limited
*/
#ifndef _ODP_TEST_COMP_VECTORS_H_
diff --git a/test/validation/api/cpu/.gitignore b/test/validation/api/cpu/.gitignore
new file mode 100644
index 000000000..1b07639e6
--- /dev/null
+++ b/test/validation/api/cpu/.gitignore
@@ -0,0 +1 @@
+cpu_main
diff --git a/test/validation/api/cpu/Makefile.am b/test/validation/api/cpu/Makefile.am
new file mode 100644
index 000000000..c53fbc850
--- /dev/null
+++ b/test/validation/api/cpu/Makefile.am
@@ -0,0 +1,4 @@
+include ../Makefile.inc
+
+test_PROGRAMS = cpu_main
+cpu_main_SOURCES = cpu.c
diff --git a/test/validation/api/cpu/cpu.c b/test/validation/api/cpu/cpu.c
new file mode 100644
index 000000000..5b05a0f83
--- /dev/null
+++ b/test/validation/api/cpu/cpu.c
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ * Copyright (c) 2015-2018 Linaro Limited
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include "odp_cunit_common.h"
+#include "test_common_macros.h"
+
+#define PERIODS_100_MSEC 160
+#define RES_TRY_NUM 10
+#define GIGA_HZ 1000000000ULL
+#define KILO_HZ 1000ULL
+
+/* 10 usec wait time assumes >100kHz resolution on CPU cycles counter */
+#define WAIT_TIME (10 * ODP_TIME_USEC_IN_NS)
+
+/* Data for cache prefetch test cases */
+static uint8_t global_data[8 * ODP_CACHE_LINE_SIZE] ODP_ALIGNED_CACHE;
+
+static int check_cycle_counter(void)
+{
+ if (odp_cpu_cycles_max() == 0) {
+ printf("Cycle counter is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_cpu_hz(void)
+{
+ if (odp_cpu_hz() == 0) {
+ printf("odp_cpu_hz() is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_cpu_hz_max(void)
+{
+ if (odp_cpu_hz_max() == 0) {
+ printf("odp_cpu_hz_max() is not supported, skipping test\n");
+ return ODP_TEST_INACTIVE;
+ }
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ if (hz == 0) {
+ printf("odp_cpu_hz_id() is not supported by CPU %d, skipping test\n", cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ if (hz == 0) {
+ printf("odp_cpu_hz_max_id() is not supported by CPU %d, skipping test\n",
+ cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void cpu_id(void)
+{
+ CU_ASSERT(odp_cpu_id() >= 0);
+}
+
+static void cpu_count(void)
+{
+ int cpus;
+
+ cpus = odp_cpu_count();
+ CU_ASSERT(0 < cpus);
+}
+
+static void cpu_model_str(void)
+{
+ char model[128];
+
+ snprintf(model, 128, "%s", odp_cpu_model_str());
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+}
+
+static void cpu_model_str_id(void)
+{
+ char model[128];
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static void cpu_hz(void)
+{
+ uint64_t hz = odp_cpu_hz();
+
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+}
+
+static void cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static void cpu_hz_max(void)
+{
+ uint64_t hz = odp_cpu_hz_max();
+
+ /* Sanity check value */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ CU_ASSERT(hz < 20 * GIGA_HZ);
+}
+
+static void cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ /* Sanity check value */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ CU_ASSERT(hz < 20 * GIGA_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+static void cpu_cycles(void)
+{
+ uint64_t c2, c1, diff, max;
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+
+ max = odp_cpu_cycles_max();
+
+ /* With 10 usec delay, diff should be small compared to the maximum.
+ * Otherwise, counter is going backwards. */
+ if (c2 > c1) {
+ diff = c2 - c1;
+ CU_ASSERT(diff < (max - diff));
+ }
+
+ /* Same applies also when there was a wrap. */
+ if (c2 < c1) {
+ diff = max - c1 + c2;
+ CU_ASSERT(diff < (max - diff));
+ }
+}
+
+static void cpu_cycles_diff(void)
+{
+ uint64_t c2, c1, max;
+ uint64_t tmp, diff, res;
+
+ res = odp_cpu_cycles_resolution();
+ max = odp_cpu_cycles_max();
+
+ c1 = res;
+ c2 = 2 * res;
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == res);
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff > 0);
+ CU_ASSERT(diff < (max - diff));
+
+ /* check resolution for wrap */
+ c1 = max - 2 * res;
+ do
+ c2 = odp_cpu_cycles();
+ while (c1 < c2);
+
+ diff = odp_cpu_cycles_diff(c1, c1);
+ CU_ASSERT(diff == 0);
+
+ /* wrap */
+ tmp = c2 + (max - c1) + res;
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+
+ /* no wrap, revert args */
+ tmp = c1 - c2;
+ diff = odp_cpu_cycles_diff(c1, c2);
+ CU_ASSERT(diff == tmp);
+}
+
+static void cpu_cycles_max(void)
+{
+ uint64_t c2, c1;
+ uint64_t max1, max2;
+
+ max1 = odp_cpu_cycles_max();
+ odp_time_wait_ns(WAIT_TIME);
+ max2 = odp_cpu_cycles_max();
+
+ CU_ASSERT(max1 >= UINT32_MAX / 2);
+ CU_ASSERT(max1 == max2);
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(WAIT_TIME);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c1 <= max1 && c2 <= max1);
+}
+
+static void cpu_cycles_resolution(void)
+{
+ int i;
+ uint64_t res;
+ uint64_t c2, c1, max;
+ uint64_t test_cycles = odp_cpu_hz() / 100; /* CPU cycles in 10 msec */
+
+ max = odp_cpu_cycles_max();
+
+ res = odp_cpu_cycles_resolution();
+ CU_ASSERT(res != 0);
+ CU_ASSERT(res < max / 1024);
+
+ for (i = 0; i < RES_TRY_NUM; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(10 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ /* Diff may be zero with low resolution */
+ if (test_cycles && test_cycles > res) {
+ uint64_t diff = odp_cpu_cycles_diff(c2, c1);
+
+ CU_ASSERT(diff >= res);
+ }
+ }
+}
+
+static void cpu_cycles_long_period(void)
+{
+ int i;
+ int periods = PERIODS_100_MSEC;
+ uint64_t max_period_duration = 100 * ODP_TIME_MSEC_IN_NS + periods - 1;
+ uint64_t c2, c1, c3, max;
+ uint64_t tmp, diff, res;
+
+ res = odp_cpu_cycles_resolution();
+ max = odp_cpu_cycles_max();
+
+ c3 = odp_cpu_cycles();
+
+ CU_ASSERT(c3 <= max);
+ /*
+ * If the cycle counter is not close to wrapping around during
+ * the test, then speed up the test by not trying to see the wrap
+ * around too hard. Assume cycle counter frequency of less than 10 GHz.
+ */
+ CU_ASSERT(odp_cpu_hz_max() < 10ULL * ODP_TIME_SEC_IN_NS);
+ if (max - c3 > 10 * periods * max_period_duration)
+ periods = 10;
+
+ printf("\n Testing CPU cycles for %i seconds... ", periods / 10);
+
+ for (i = 0; i < periods; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+ CU_ASSERT(c1 <= max && c2 <= max);
+
+ if (c2 > c1)
+ tmp = c2 - c1;
+ else
+ tmp = c2 + (max - c1) + res;
+
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+
+ /* wrap is detected and verified */
+ if (c2 < c1)
+ break;
+ }
+
+ /* wrap was detected, no need to continue */
+ if (i < periods) {
+ printf("wrap was detected.\n");
+ return;
+ }
+
+ /* wrap has to be detected if possible */
+ CU_ASSERT(max > UINT32_MAX);
+ CU_ASSERT((max - c3) > UINT32_MAX);
+
+ printf("wrap was not detected.\n");
+}
+
+static void cpu_pause(void)
+{
+ odp_cpu_pause();
+}
+
+static void cpu_prefetch(void)
+{
+ /* Cacheline aligned address */
+ odp_prefetch(&global_data[0]);
+
+ /* Not cacheline aligned address */
+ odp_prefetch(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+
+ /* An invalid address */
+ odp_prefetch(NULL);
+
+ odp_prefetch_l1(&global_data[0]);
+ odp_prefetch_l1(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_l1(NULL);
+
+ odp_prefetch_l2(&global_data[0]);
+ odp_prefetch_l2(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_l2(NULL);
+
+ odp_prefetch_l3(&global_data[0]);
+ odp_prefetch_l3(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_l3(NULL);
+}
+
+static void cpu_prefetch_store(void)
+{
+ odp_prefetch_store(&global_data[0]);
+ odp_prefetch_store(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_store(NULL);
+
+ odp_prefetch_store_l1(&global_data[0]);
+ odp_prefetch_store_l1(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_store_l1(NULL);
+
+ odp_prefetch_store_l2(&global_data[0]);
+ odp_prefetch_store_l2(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_store_l2(NULL);
+
+ odp_prefetch_store_l3(&global_data[0]);
+ odp_prefetch_store_l3(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_store_l3(NULL);
+}
+
+static void cpu_prefetch_strm(void)
+{
+ odp_prefetch_strm_l1(&global_data[0]);
+ odp_prefetch_strm_l1(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_strm_l1(NULL);
+
+ odp_prefetch_store_strm_l1(&global_data[0]);
+ odp_prefetch_store_strm_l1(&global_data[ODP_CACHE_LINE_SIZE + 11]);
+ odp_prefetch_store_strm_l1(NULL);
+}
+
+odp_testinfo_t cpu_suite[] = {
+ ODP_TEST_INFO(cpu_id),
+ ODP_TEST_INFO(cpu_count),
+ ODP_TEST_INFO(cpu_model_str),
+ ODP_TEST_INFO(cpu_model_str_id),
+ ODP_TEST_INFO_CONDITIONAL(cpu_hz, check_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(cpu_hz_id, check_cpu_hz_id),
+ ODP_TEST_INFO_CONDITIONAL(cpu_hz_max, check_cpu_hz_max),
+ ODP_TEST_INFO_CONDITIONAL(cpu_hz_max_id, check_cpu_hz_max_id),
+ ODP_TEST_INFO_CONDITIONAL(cpu_cycles, check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(cpu_cycles_diff, check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(cpu_cycles_max, check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(cpu_cycles_resolution, check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(cpu_cycles_long_period, check_cycle_counter),
+ ODP_TEST_INFO(cpu_pause),
+ ODP_TEST_INFO(cpu_prefetch),
+ ODP_TEST_INFO(cpu_prefetch_store),
+ ODP_TEST_INFO(cpu_prefetch_strm),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t cpu_suites[] = {
+ {"CPU", NULL, NULL, cpu_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(cpu_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/cpumask/cpumask.c b/test/validation/api/cpumask/cpumask.c
index db500df3a..9ca182fc9 100644
--- a/test/validation/api/cpumask/cpumask.c
+++ b/test/validation/api/cpumask/cpumask.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2021-2022 Nokia
*/
#include <odp_api.h>
diff --git a/test/validation/api/crypto/crypto_op_test.c b/test/validation/api/crypto/crypto_op_test.c
index ae1465581..f2703c5cc 100644
--- a/test/validation/api/crypto/crypto_op_test.c
+++ b/test/validation/api/crypto/crypto_op_test.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2024, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2024 Nokia
*/
#include <string.h>
@@ -164,6 +162,10 @@ static void write_header_and_trailer(odp_packet_t pkt,
{
uint32_t trailer_offset = odp_packet_len(pkt) - trailer_len;
uint32_t max_len = header_len > trailer_len ? header_len : trailer_len;
+
+ if (!max_len)
+ return;
+
uint8_t buffer[max_len];
int rc;
diff --git a/test/validation/api/crypto/crypto_op_test.h b/test/validation/api/crypto/crypto_op_test.h
index 9805457ad..966e0a643 100644
--- a/test/validation/api/crypto/crypto_op_test.h
+++ b/test/validation/api/crypto/crypto_op_test.h
@@ -1,8 +1,5 @@
-/*
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2023 Nokia
*/
#ifndef CRYPTO_OP_TEST_H
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 532aaf525..7ce37a3cd 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2024 Nokia
*/
#include <string.h>
@@ -34,18 +32,18 @@ static void test_defaults(uint8_t fill)
memset(&param, fill, sizeof(param));
odp_crypto_session_param_init(&param);
- CU_ASSERT_EQUAL(param.op, ODP_CRYPTO_OP_ENCODE);
- CU_ASSERT_EQUAL(param.op_type, ODP_CRYPTO_OP_TYPE_LEGACY);
- CU_ASSERT_EQUAL(param.cipher_range_in_bits, false);
- CU_ASSERT_EQUAL(param.auth_range_in_bits, false);
- CU_ASSERT_EQUAL(param.auth_cipher_text, false);
- CU_ASSERT_EQUAL(param.null_crypto_enable, false);
- CU_ASSERT_EQUAL(param.op_mode, ODP_CRYPTO_SYNC);
- CU_ASSERT_EQUAL(param.cipher_alg, ODP_CIPHER_ALG_NULL);
- CU_ASSERT_EQUAL(param.cipher_iv_len, 0);
- CU_ASSERT_EQUAL(param.auth_alg, ODP_AUTH_ALG_NULL);
- CU_ASSERT_EQUAL(param.auth_iv_len, 0);
- CU_ASSERT_EQUAL(param.auth_aad_len, 0);
+ CU_ASSERT(param.op == ODP_CRYPTO_OP_ENCODE);
+ CU_ASSERT(param.op_type == ODP_CRYPTO_OP_TYPE_LEGACY);
+ CU_ASSERT(param.cipher_range_in_bits == false);
+ CU_ASSERT(param.auth_range_in_bits == false);
+ CU_ASSERT(param.auth_cipher_text == false);
+ CU_ASSERT(param.null_crypto_enable == false);
+ CU_ASSERT(param.op_mode == ODP_CRYPTO_SYNC);
+ CU_ASSERT(param.cipher_alg == ODP_CIPHER_ALG_NULL);
+ CU_ASSERT(param.cipher_iv_len == 0);
+ CU_ASSERT(param.auth_alg == ODP_AUTH_ALG_NULL);
+ CU_ASSERT(param.auth_iv_len == 0);
+ CU_ASSERT(param.auth_aad_len == 0);
}
static void test_default_values(void)
@@ -190,8 +188,8 @@ static int session_create(crypto_session_t *session,
int rc;
odp_crypto_ses_create_err_t status;
odp_crypto_session_param_t ses_params;
- uint8_t cipher_key_data[ref->cipher_key_length];
- uint8_t auth_key_data[ref->auth_key_length];
+ uint8_t cipher_key_data[MAX_KEY_LEN];
+ uint8_t auth_key_data[MAX_KEY_LEN];
odp_crypto_key_t cipher_key = {
.data = cipher_key_data,
.length = ref->cipher_key_length
@@ -776,6 +774,10 @@ static odp_cipher_alg_t cipher_algs[] = {
ODP_CIPHER_ALG_SNOW3G_UEA2,
ODP_CIPHER_ALG_AES_EEA2,
ODP_CIPHER_ALG_ZUC_EEA3,
+ ODP_CIPHER_ALG_SNOW_V,
+ ODP_CIPHER_ALG_SM4_ECB,
+ ODP_CIPHER_ALG_SM4_CBC,
+ ODP_CIPHER_ALG_SM4_CTR,
};
/*
@@ -790,6 +792,10 @@ static odp_auth_alg_t auth_algs[] = {
ODP_AUTH_ALG_SHA256_HMAC,
ODP_AUTH_ALG_SHA384_HMAC,
ODP_AUTH_ALG_SHA512_HMAC,
+ ODP_AUTH_ALG_SHA3_224_HMAC,
+ ODP_AUTH_ALG_SHA3_256_HMAC,
+ ODP_AUTH_ALG_SHA3_384_HMAC,
+ ODP_AUTH_ALG_SHA3_512_HMAC,
ODP_AUTH_ALG_AES_GMAC,
ODP_AUTH_ALG_AES_CMAC,
ODP_AUTH_ALG_AES_XCBC_MAC,
@@ -797,12 +803,20 @@ static odp_auth_alg_t auth_algs[] = {
ODP_AUTH_ALG_SNOW3G_UIA2,
ODP_AUTH_ALG_AES_EIA2,
ODP_AUTH_ALG_ZUC_EIA3,
+ ODP_AUTH_ALG_SNOW_V_GMAC,
+ ODP_AUTH_ALG_SM3_HMAC,
+ ODP_AUTH_ALG_SM4_GMAC,
ODP_AUTH_ALG_MD5,
ODP_AUTH_ALG_SHA1,
ODP_AUTH_ALG_SHA224,
ODP_AUTH_ALG_SHA256,
ODP_AUTH_ALG_SHA384,
ODP_AUTH_ALG_SHA512,
+ ODP_AUTH_ALG_SHA3_224,
+ ODP_AUTH_ALG_SHA3_256,
+ ODP_AUTH_ALG_SHA3_384,
+ ODP_AUTH_ALG_SHA3_512,
+ ODP_AUTH_ALG_SM3,
};
static void test_auth_hashes_in_auth_range(void)
diff --git a/test/validation/api/crypto/test_vector_defs.h b/test/validation/api/crypto/test_vector_defs.h
index 46ae4e4e1..6c2eb2085 100644
--- a/test/validation/api/crypto/test_vector_defs.h
+++ b/test/validation/api/crypto/test_vector_defs.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
#ifndef TEST_VECTOR_DEFS_H
diff --git a/test/validation/api/crypto/test_vectors.h b/test/validation/api/crypto/test_vectors.h
index a38644246..33ba52d34 100644
--- a/test/validation/api/crypto/test_vectors.h
+++ b/test/validation/api/crypto/test_vectors.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
#ifndef TEST_VECTORS_H
diff --git a/test/validation/api/crypto/test_vectors_len.h b/test/validation/api/crypto/test_vectors_len.h
index 3818b57a0..92b5c8453 100644
--- a/test/validation/api/crypto/test_vectors_len.h
+++ b/test/validation/api/crypto/test_vectors_len.h
@@ -1,8 +1,7 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
*/
+
#ifndef TEST_VECTORS_LEN_
#define TEST_VECTORS_LEN_
diff --git a/test/validation/api/crypto/util.c b/test/validation/api/crypto/util.c
index 557e5e951..c40d61313 100644
--- a/test/validation/api/crypto/util.c
+++ b/test/validation/api/crypto/util.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2024 Nokia
*/
#include <string.h>
@@ -32,8 +30,14 @@ const char *auth_alg_name(odp_auth_alg_t auth)
return "ODP_AUTH_ALG_SHA384_HMAC";
case ODP_AUTH_ALG_SHA512_HMAC:
return "ODP_AUTH_ALG_SHA512_HMAC";
- case ODP_AUTH_ALG_AES_XCBC_MAC:
- return "ODP_AUTH_ALG_AES_XCBC_MAC";
+ case ODP_AUTH_ALG_SHA3_224_HMAC:
+ return "ODP_AUTH_ALG_SHA3_224_HMAC";
+ case ODP_AUTH_ALG_SHA3_256_HMAC:
+ return "ODP_AUTH_ALG_SHA3_256_HMAC";
+ case ODP_AUTH_ALG_SHA3_384_HMAC:
+ return "ODP_AUTH_ALG_SHA3_384_HMAC";
+ case ODP_AUTH_ALG_SHA3_512_HMAC:
+ return "ODP_AUTH_ALG_SHA3_512_HMAC";
case ODP_AUTH_ALG_AES_GCM:
return "ODP_AUTH_ALG_AES_GCM";
case ODP_AUTH_ALG_AES_GMAC:
@@ -42,6 +46,8 @@ const char *auth_alg_name(odp_auth_alg_t auth)
return "ODP_AUTH_ALG_AES_CCM";
case ODP_AUTH_ALG_AES_CMAC:
return "ODP_AUTH_ALG_AES_CMAC";
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ return "ODP_AUTH_ALG_AES_XCBC_MAC";
case ODP_AUTH_ALG_CHACHA20_POLY1305:
return "ODP_AUTH_ALG_CHACHA20_POLY1305";
case ODP_AUTH_ALG_KASUMI_F9:
@@ -52,6 +58,18 @@ const char *auth_alg_name(odp_auth_alg_t auth)
return "ODP_AUTH_ALG_AES_EIA2";
case ODP_AUTH_ALG_ZUC_EIA3:
return "ODP_AUTH_ALG_ZUC_EIA3";
+ case ODP_AUTH_ALG_SNOW_V_GCM:
+ return "ODP_AUTH_ALG_SNOW_V_GCM";
+ case ODP_AUTH_ALG_SNOW_V_GMAC:
+ return "ODP_AUTH_ALG_SNOW_V_GMAC";
+ case ODP_AUTH_ALG_SM3_HMAC:
+ return "ODP_AUTH_ALG_SM3_HMAC";
+ case ODP_AUTH_ALG_SM4_GCM:
+ return "ODP_AUTH_ALG_SM4_GCM";
+ case ODP_AUTH_ALG_SM4_GMAC:
+ return "ODP_AUTH_ALG_SM4_GMAC";
+ case ODP_AUTH_ALG_SM4_CCM:
+ return "ODP_AUTH_ALG_SM4_CCM";
case ODP_AUTH_ALG_MD5:
return "ODP_AUTH_ALG_MD5";
case ODP_AUTH_ALG_SHA1:
@@ -64,6 +82,8 @@ const char *auth_alg_name(odp_auth_alg_t auth)
return "ODP_AUTH_ALG_SHA384";
case ODP_AUTH_ALG_SHA512:
return "ODP_AUTH_ALG_SHA512";
+ case ODP_AUTH_ALG_SM3:
+ return "ODP_AUTH_ALG_SM3";
default:
return "Unknown";
}
@@ -104,6 +124,20 @@ const char *cipher_alg_name(odp_cipher_alg_t cipher)
return "ODP_CIPHER_ALG_AES_EEA2";
case ODP_CIPHER_ALG_ZUC_EEA3:
return "ODP_CIPHER_ALG_ZUC_EEA3";
+ case ODP_CIPHER_ALG_SNOW_V:
+ return "ODP_CIPHER_ALG_SNOW_V";
+ case ODP_CIPHER_ALG_SNOW_V_GCM:
+ return "ODP_CIPHER_ALG_SNOW_V_GCM";
+ case ODP_CIPHER_ALG_SM4_ECB:
+ return "ODP_CIPHER_ALG_SM4_ECB";
+ case ODP_CIPHER_ALG_SM4_CBC:
+ return "ODP_CIPHER_ALG_SM4_CBC";
+ case ODP_CIPHER_ALG_SM4_CTR:
+ return "ODP_CIPHER_ALG_SM4_CTR";
+ case ODP_CIPHER_ALG_SM4_GCM:
+ return "ODP_CIPHER_ALG_SM4_GCM";
+ case ODP_CIPHER_ALG_SM4_CCM:
+ return "ODP_CIPHER_ALG_SM4_CCM";
default:
return "Unknown";
}
@@ -201,6 +235,34 @@ int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
if (!capability.ciphers.bit.zuc_eea3)
return ODP_TEST_INACTIVE;
break;
+ case ODP_CIPHER_ALG_SNOW_V:
+ if (!capability.ciphers.bit.snow_v)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SNOW_V_GCM:
+ if (!capability.ciphers.bit.snow_v_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SM4_ECB:
+ if (!capability.ciphers.bit.sm4_ecb)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SM4_CBC:
+ if (!capability.ciphers.bit.sm4_cbc)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SM4_CTR:
+ if (!capability.ciphers.bit.sm4_ctr)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SM4_GCM:
+ if (!capability.ciphers.bit.sm4_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_CIPHER_ALG_SM4_CCM:
+ if (!capability.ciphers.bit.sm4_ccm)
+ return ODP_TEST_INACTIVE;
+ break;
default:
ODPH_ERR("Unsupported cipher algorithm\n");
return ODP_TEST_INACTIVE;
@@ -236,8 +298,20 @@ int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
if (!capability.auths.bit.sha512_hmac)
return ODP_TEST_INACTIVE;
break;
- case ODP_AUTH_ALG_AES_XCBC_MAC:
- if (!capability.auths.bit.aes_xcbc_mac)
+ case ODP_AUTH_ALG_SHA3_224_HMAC:
+ if (!capability.auths.bit.sha3_224_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_256_HMAC:
+ if (!capability.auths.bit.sha3_256_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_384_HMAC:
+ if (!capability.auths.bit.sha3_384_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_512_HMAC:
+ if (!capability.auths.bit.sha3_512_hmac)
return ODP_TEST_INACTIVE;
break;
case ODP_AUTH_ALG_AES_GCM:
@@ -256,6 +330,10 @@ int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
if (!capability.auths.bit.aes_cmac)
return ODP_TEST_INACTIVE;
break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ if (!capability.auths.bit.aes_xcbc_mac)
+ return ODP_TEST_INACTIVE;
+ break;
case ODP_AUTH_ALG_CHACHA20_POLY1305:
if (!capability.auths.bit.chacha20_poly1305)
return ODP_TEST_INACTIVE;
@@ -276,6 +354,30 @@ int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
if (!capability.auths.bit.zuc_eia3)
return ODP_TEST_INACTIVE;
break;
+ case ODP_AUTH_ALG_SNOW_V_GCM:
+ if (!capability.auths.bit.snow_v_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SNOW_V_GMAC:
+ if (!capability.auths.bit.snow_v_gmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SM3_HMAC:
+ if (!capability.auths.bit.sm3_hmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SM4_GCM:
+ if (!capability.auths.bit.sm4_gcm)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SM4_GMAC:
+ if (!capability.auths.bit.sm4_gmac)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SM4_CCM:
+ if (!capability.auths.bit.sm4_ccm)
+ return ODP_TEST_INACTIVE;
+ break;
case ODP_AUTH_ALG_MD5:
if (!capability.auths.bit.md5)
return ODP_TEST_INACTIVE;
@@ -300,6 +402,26 @@ int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
if (!capability.auths.bit.sha512)
return ODP_TEST_INACTIVE;
break;
+ case ODP_AUTH_ALG_SHA3_224:
+ if (!capability.auths.bit.sha3_224)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_256:
+ if (!capability.auths.bit.sha3_256)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_384:
+ if (!capability.auths.bit.sha3_384)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SHA3_512:
+ if (!capability.auths.bit.sha3_512)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_AUTH_ALG_SM3:
+ if (!capability.auths.bit.sm3)
+ return ODP_TEST_INACTIVE;
+ break;
default:
ODPH_ERR("Unsupported authentication algorithm\n");
return ODP_TEST_INACTIVE;
diff --git a/test/validation/api/crypto/util.h b/test/validation/api/crypto/util.h
index 5cba21890..b6a013255 100644
--- a/test/validation/api/crypto/util.h
+++ b/test/validation/api/crypto/util.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
#ifndef UTIL_H
diff --git a/test/validation/api/dma/dma.c b/test/validation/api/dma/dma.c
index efc7fa039..739a6c5c6 100644
--- a/test/validation/api/dma/dma.c
+++ b/test/validation/api/dma/dma.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2023 Nokia
*/
#include <odp_api.h>
@@ -325,6 +323,24 @@ static void test_dma_same_name_named(void)
CU_ASSERT(odp_dma_destroy(dma_b) == 0);
}
+static void test_dma_long_name(void)
+{
+ odp_dma_param_t dma_param;
+ odp_dma_t dma;
+ char name[ODP_DMA_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_dma_param_init(&dma_param);
+ dma_param.compl_mode_mask = ODP_DMA_COMPL_SYNC;
+ dma = odp_dma_create(name, &dma_param);
+
+ CU_ASSERT_FATAL(dma != ODP_DMA_INVALID);
+ CU_ASSERT(odp_dma_to_u64(dma) == odp_dma_to_u64(odp_dma_lookup(name)));
+ CU_ASSERT(odp_dma_destroy(dma) == 0);
+}
+
static void test_dma_compl_pool(void)
{
odp_pool_t pool;
@@ -397,6 +413,24 @@ static void test_dma_compl_pool_same_name(void)
CU_ASSERT_FATAL(odp_pool_destroy(pool_b) == 0);
}
+static void test_dma_compl_pool_long_name(void)
+{
+ odp_dma_pool_param_t dma_pool_param;
+ odp_pool_t pool;
+ char name[ODP_POOL_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_dma_pool_param_init(&dma_pool_param);
+ dma_pool_param.num = 1;
+ pool = odp_dma_pool_create(name, &dma_pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(pool == odp_pool_lookup(name));
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
static void test_dma_compl_pool_max_pools(void)
{
odp_dma_pool_param_t dma_pool_param;
@@ -458,6 +492,7 @@ static void test_dma_compl_user_area(void)
CU_ASSERT(prev != addr);
ev = odp_dma_compl_to_event(compl_evs[i]);
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == addr);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
CU_ASSERT(flag < 0);
@@ -1636,8 +1671,10 @@ odp_testinfo_t dma_suite[] = {
ODP_TEST_INFO_CONDITIONAL(test_dma_debug, check_sync),
ODP_TEST_INFO_CONDITIONAL(test_dma_same_name_null, check_session_count),
ODP_TEST_INFO_CONDITIONAL(test_dma_same_name_named, check_session_count),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_long_name, check_session_count),
ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool, check_event),
ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool_same_name, check_event),
+ ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool_long_name, check_event),
ODP_TEST_INFO_CONDITIONAL(test_dma_compl_pool_max_pools, check_event),
ODP_TEST_INFO_CONDITIONAL(test_dma_compl_user_area, check_event_user_area),
ODP_TEST_INFO_CONDITIONAL(test_dma_compl_user_area_init, check_event_user_area_init),
diff --git a/test/validation/api/errno/errno.c b/test/validation/api/errno/errno.c
index 70708ce01..1cbd27b1b 100644
--- a/test/validation/api/errno/errno.c
+++ b/test/validation/api/errno/errno.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#include <odp_api.h>
@@ -13,9 +11,9 @@ static void errno_test_odp_errno_sunny_day(void)
odp_errno_zero();
my_errno = odp_errno();
- CU_ASSERT_TRUE(my_errno == 0);
+ CU_ASSERT(my_errno == 0);
odp_errno_print("odp_errno");
- CU_ASSERT_PTR_NOT_NULL(odp_errno_str(my_errno));
+ CU_ASSERT(odp_errno_str(my_errno) != NULL);
}
odp_testinfo_t errno_suite[] = {
diff --git a/test/validation/api/event/event.c b/test/validation/api/event/event.c
index fbcc08d6f..a4f967791 100644
--- a/test/validation/api/event/event.c
+++ b/test/validation/api/event/event.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
*/
#include <odp_api.h>
diff --git a/test/validation/api/hash/hash.c b/test/validation/api/hash/hash.c
index a935ef7ac..60c6755b2 100644
--- a/test/validation/api/hash/hash.c
+++ b/test/validation/api/hash/hash.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2021 Nokia
*/
#include <odp_api.h>
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index 5ad7bd48d..0389175f0 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2018-2022, Nokia
- * Copyright (c) 2020-2021, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2018-2022 Nokia
+ * Copyright (c) 2020-2021 Marvell
*/
#include <odp_api.h>
@@ -457,27 +455,28 @@ static void ipsec_status_event_handle(odp_event_t ev_status,
};
CU_ASSERT_FATAL(ODP_EVENT_INVALID != ev_status);
- CU_ASSERT_EQUAL(1, odp_event_is_valid(ev_status));
- CU_ASSERT_EQUAL_FATAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(ev_status));
+ CU_ASSERT(1 == odp_event_is_valid(ev_status));
+ CU_ASSERT_FATAL(ODP_EVENT_IPSEC_STATUS == odp_event_type(ev_status));
- /* No user area or source pool for IPsec status events */
+ /* No user area/flag or source pool for IPsec status events */
+ odp_event_user_flag_set(ev_status, 1);
CU_ASSERT(odp_event_user_area(ev_status) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev_status, &flag) == NULL);
CU_ASSERT(flag < 0);
CU_ASSERT(odp_event_pool(ev_status) == ODP_POOL_INVALID);
- CU_ASSERT_EQUAL(0, odp_ipsec_status(&status, ev_status));
- CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_WARN, status.id);
- CU_ASSERT_EQUAL(sa, status.sa);
- CU_ASSERT_EQUAL(0, status.result);
+ CU_ASSERT(0 == odp_ipsec_status(&status, ev_status));
+ CU_ASSERT(ODP_IPSEC_STATUS_WARN == status.id);
+ CU_ASSERT(sa == status.sa);
+ CU_ASSERT(0 == status.result);
if (IPSEC_TEST_EXPIRY_IGNORED != sa_expiry) {
if (IPSEC_TEST_EXPIRY_SOFT_PKT == sa_expiry) {
- CU_ASSERT_EQUAL(1, status.warn.soft_exp_packets);
+ CU_ASSERT(1 == status.warn.soft_exp_packets);
sa_expiry_notified = true;
} else if (IPSEC_TEST_EXPIRY_SOFT_BYTE == sa_expiry) {
- CU_ASSERT_EQUAL(1, status.warn.soft_exp_bytes);
+ CU_ASSERT(1 == status.warn.soft_exp_bytes);
sa_expiry_notified = true;
}
}
@@ -502,51 +501,47 @@ void ipsec_sa_destroy(odp_ipsec_sa_t sa)
odp_ipsec_status_t status;
int ret;
- CU_ASSERT_EQUAL(IPSEC_SA_CTX, odp_ipsec_sa_context(sa));
+ CU_ASSERT(IPSEC_SA_CTX == odp_ipsec_sa_context(sa));
- CU_ASSERT_EQUAL(ODP_IPSEC_OK, odp_ipsec_sa_disable(sa));
+ CU_ASSERT(ODP_IPSEC_OK == odp_ipsec_sa_disable(sa));
if (ODP_QUEUE_INVALID != suite_context.queue) {
event = recv_event(suite_context.queue, EVENT_WAIT_TIME);
CU_ASSERT(odp_event_is_valid(event) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(event));
+ CU_ASSERT(ODP_EVENT_IPSEC_STATUS == odp_event_type(event));
ret = odp_ipsec_status(&status, event);
CU_ASSERT(ret == 0);
if (ret == 0) {
- CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_SA_DISABLE, status.id);
- CU_ASSERT_EQUAL(sa, status.sa);
- CU_ASSERT_EQUAL(0, status.result);
- CU_ASSERT_EQUAL(0, status.warn.all);
+ CU_ASSERT(ODP_IPSEC_STATUS_SA_DISABLE == status.id);
+ CU_ASSERT(sa == status.sa);
+ CU_ASSERT(0 == status.result);
+ CU_ASSERT(0 == status.warn.all);
}
odp_event_free(event);
}
- CU_ASSERT_EQUAL(ODP_IPSEC_OK, odp_ipsec_sa_destroy(sa));
+ CU_ASSERT(ODP_IPSEC_OK == odp_ipsec_sa_destroy(sa));
}
odp_packet_t ipsec_packet(const ipsec_test_packet *itp)
{
odp_packet_t pkt = odp_packet_alloc(suite_context.pool, itp->len);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_PACKET_INVALID, pkt);
+ CU_ASSERT_FATAL(ODP_PACKET_INVALID != pkt);
if (ODP_PACKET_INVALID == pkt)
return pkt;
- CU_ASSERT_EQUAL(0, odp_packet_copy_from_mem(pkt, 0, itp->len,
- itp->data));
+ CU_ASSERT(0 == odp_packet_copy_from_mem(pkt, 0, itp->len, itp->data));
if (itp->l2_offset != ODP_PACKET_OFFSET_INVALID)
- CU_ASSERT_EQUAL(0, odp_packet_l2_offset_set(pkt,
- itp->l2_offset));
+ CU_ASSERT(0 == odp_packet_l2_offset_set(pkt, itp->l2_offset));
if (itp->l3_offset != ODP_PACKET_OFFSET_INVALID)
- CU_ASSERT_EQUAL(0, odp_packet_l3_offset_set(pkt,
- itp->l3_offset));
+ CU_ASSERT(0 == odp_packet_l3_offset_set(pkt, itp->l3_offset));
if (itp->l4_offset != ODP_PACKET_OFFSET_INVALID)
- CU_ASSERT_EQUAL(0, odp_packet_l4_offset_set(pkt,
- itp->l4_offset));
+ CU_ASSERT(0 == odp_packet_l4_offset_set(pkt, itp->l4_offset));
odp_packet_user_ptr_set(pkt, PACKET_USER_PTR);
@@ -568,11 +563,9 @@ static void check_l2_header(const ipsec_test_packet *itp, odp_packet_t pkt)
CU_ASSERT_FATAL(l2 != ODP_PACKET_OFFSET_INVALID);
CU_ASSERT_FATAL(l3 != ODP_PACKET_OFFSET_INVALID);
- CU_ASSERT_EQUAL(l3 - l2, hdr_len);
+ CU_ASSERT(l3 - l2 == hdr_len);
odp_packet_copy_to_mem(pkt, 0, len, data);
- CU_ASSERT_EQUAL(0, memcmp(data + l2,
- itp->data + itp->l2_offset,
- hdr_len));
+ CU_ASSERT(0 == memcmp(data + l2, itp->data + itp->l2_offset, hdr_len));
}
/*
@@ -595,17 +588,17 @@ static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt,
odp_packet_copy_to_mem(pkt, 0, len, data);
if (l3 == ODP_PACKET_OFFSET_INVALID) {
- CU_ASSERT_EQUAL(itp->l3_offset, ODP_PACKET_OFFSET_INVALID);
- CU_ASSERT_EQUAL(l4, ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT(itp->l3_offset == ODP_PACKET_OFFSET_INVALID);
+ CU_ASSERT(l4 == ODP_PACKET_OFFSET_INVALID);
return;
}
- CU_ASSERT_EQUAL(len - l3, itp->len - itp->l3_offset);
+ CU_ASSERT(len - l3 == itp->len - itp->l3_offset);
if (len - l3 != itp->len - itp->l3_offset)
return;
- CU_ASSERT_EQUAL(l4 - l3, itp->l4_offset - itp->l3_offset);
+ CU_ASSERT(l4 - l3 == itp->l4_offset - itp->l3_offset);
if (l4 - l3 != itp->l4_offset - itp->l3_offset)
return;
@@ -634,16 +627,13 @@ static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt,
* Check packet data before the first possible
* location of the AH ICV field.
*/
- CU_ASSERT_EQUAL(0, memcmp(data + l3,
- itp->data + itp->l3_offset,
- ODPH_IPV4HDR_LEN + 12));
+ CU_ASSERT(0 == memcmp(data + l3, itp->data + itp->l3_offset,
+ ODPH_IPV4HDR_LEN + 12));
return;
}
}
- CU_ASSERT_EQUAL(0, memcmp(data + l3,
- itp->data + itp->l3_offset,
- len - l3));
+ CU_ASSERT(0 == memcmp(data + l3, itp->data + itp->l3_offset, len - l3));
}
static int send_pkts(const ipsec_test_part part[], int num_part)
@@ -660,7 +650,7 @@ static int send_pkts(const ipsec_test_part part[], int num_part)
for (i = 0; i < num_part; i++)
pkt[i] = ipsec_packet(part[i].pkt_in);
- CU_ASSERT_EQUAL(num_part, odp_pktout_send(pktout, pkt, num_part));
+ CU_ASSERT(num_part == odp_pktout_send(pktout, pkt, num_part));
return num_part;
}
@@ -689,8 +679,7 @@ static int recv_pkts_inline(const ipsec_test_part *part,
odp_queue_t queue = ODP_QUEUE_INVALID;
int i;
- CU_ASSERT_EQUAL_FATAL(1, odp_pktin_event_queue(suite_context.pktio,
- &queue, 1));
+ CU_ASSERT_FATAL(1 == odp_pktin_event_queue(suite_context.pktio, &queue, 1));
for (i = 0; i < part->num_pkt;) {
odp_event_t ev;
@@ -699,10 +688,8 @@ static int recv_pkts_inline(const ipsec_test_part *part,
ev = recv_event(queue, 0);
if (ODP_EVENT_INVALID != ev) {
CU_ASSERT(odp_event_is_valid(ev) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- odp_event_types(ev, &subtype));
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
- subtype);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(ev, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_BASIC == subtype);
CU_ASSERT(part->out[i].status.error.sa_lookup);
pkto[i] = odp_packet_from_event(ev);
@@ -722,7 +709,7 @@ static int recv_pkts_inline(const ipsec_test_part *part,
int j;
CU_ASSERT(odp_event_is_valid(ev) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET, odp_event_type(ev));
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(ev));
pkt = odp_packet_from_event(ev);
CU_ASSERT(!part->out[i].status.error.sa_lookup);
@@ -785,10 +772,8 @@ static int ipsec_process_in(const ipsec_test_part *part,
if (ODP_IPSEC_OP_MODE_SYNC == suite_context.inbound_op_mode) {
pkt = ipsec_packet(part->pkt_in);
- CU_ASSERT_EQUAL(part->num_pkt, odp_ipsec_in(&pkt, 1,
- pkto, &num_out,
- &param));
- CU_ASSERT_EQUAL(num_out, part->num_pkt);
+ CU_ASSERT(part->num_pkt == odp_ipsec_in(&pkt, 1, pkto, &num_out, &param));
+ CU_ASSERT(num_out == part->num_pkt);
CU_ASSERT_FATAL(*pkto != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_subtype(*pkto) == ODP_EVENT_PACKET_IPSEC);
} else if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.inbound_op_mode) {
@@ -796,7 +781,7 @@ static int ipsec_process_in(const ipsec_test_part *part,
pkt = ipsec_packet(part->pkt_in);
consumed = odp_ipsec_in_enq(&pkt, 1, &param);
- CU_ASSERT_EQUAL(1, consumed);
+ CU_ASSERT(1 == consumed);
if (consumed <= 0)
num_out = 0;
@@ -807,18 +792,17 @@ static int ipsec_process_in(const ipsec_test_part *part,
event = recv_pkt_async_inbound(part->out[i].status);
CU_ASSERT(odp_event_is_valid(event) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- odp_event_types(event, &subtype));
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(event, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_IPSEC == subtype);
pkto[i] = odp_ipsec_packet_from_event(event);
CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_subtype(pkto[i]) ==
ODP_EVENT_PACKET_IPSEC);
}
} else {
- CU_ASSERT_EQUAL(1, send_pkts(part, 1));
+ CU_ASSERT(1 == send_pkts(part, 1));
if (part->num_pkt)
- CU_ASSERT_EQUAL(part->num_pkt, recv_pkts_inline(part, pkto));
+ CU_ASSERT(part->num_pkt == recv_pkts_inline(part, pkto));
}
return num_out;
@@ -881,14 +865,13 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
param.opt = &part->opt;
if (ODP_IPSEC_OP_MODE_SYNC == suite_context.outbound_op_mode) {
- CU_ASSERT_EQUAL(1, odp_ipsec_out(&pkt, 1, pkto, &num_out,
- &param));
+ CU_ASSERT(1 == odp_ipsec_out(&pkt, 1, pkto, &num_out, &param));
CU_ASSERT_FATAL(num_out == 1);
CU_ASSERT_FATAL(*pkto != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_subtype(*pkto) == ODP_EVENT_PACKET_IPSEC);
} else if (ODP_IPSEC_OP_MODE_ASYNC == suite_context.outbound_op_mode) {
num_out = odp_ipsec_out_enq(&pkt, 1, &param);
- CU_ASSERT_EQUAL(1, num_out);
+ CU_ASSERT(1 == num_out);
num_out = (num_out == 1) ? 1 : 0;
@@ -899,9 +882,8 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
event = recv_event(suite_context.queue, EVENT_WAIT_TIME);
CU_ASSERT(odp_event_is_valid(event) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- odp_event_types(event, &subtype));
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC, subtype);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(event, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_IPSEC == subtype);
pkto[i] = odp_ipsec_packet_from_event(event);
CU_ASSERT_FATAL(pkto[i] != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_subtype(pkto[i]) ==
@@ -967,12 +949,8 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
inline_param.tm_queue = ODP_TM_INVALID;
inline_param.outer_hdr.len = hdr_len;
- CU_ASSERT_EQUAL(1, odp_ipsec_out_inline(&pkt, 1, &param,
- &inline_param));
- CU_ASSERT_EQUAL_FATAL(1,
- odp_pktin_event_queue(suite_context.
- pktio,
- &queue, 1));
+ CU_ASSERT(1 == odp_ipsec_out_inline(&pkt, 1, &param, &inline_param));
+ CU_ASSERT_FATAL(1 == odp_pktin_event_queue(suite_context.pktio, &queue, 1));
for (i = 0; i < num_out;) {
odp_event_t ev;
@@ -981,10 +959,8 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
ev = recv_event(queue, 0);
if (ODP_EVENT_INVALID != ev) {
CU_ASSERT(odp_event_is_valid(ev) == 1);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- odp_event_types(ev, &subtype));
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET_BASIC,
- subtype);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_types(ev, &subtype));
+ CU_ASSERT(ODP_EVENT_PACKET_BASIC == subtype);
CU_ASSERT(!part->out[i].status.error.all);
pkto[i] = odp_packet_from_event(ev);
@@ -1010,10 +986,8 @@ static int ipsec_send_out_one(const ipsec_test_part *part,
continue;
}
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET,
- ev_type);
- CU_ASSERT_EQUAL(ODP_EVENT_PACKET_IPSEC,
- subtype);
+ CU_ASSERT(ODP_EVENT_PACKET == ev_type);
+ CU_ASSERT(ODP_EVENT_PACKET_IPSEC == subtype);
/* In the case of SA hard expiry tests, hard expiry error bits are
* expected to be set. The exact error bits expected to be set based
@@ -1053,9 +1027,8 @@ static void ipsec_pkt_seq_num_check(odp_packet_t pkt, uint32_t seq_num)
uint32_t l4_off;
odph_ipv4hdr_t ip;
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_PACKET_OFFSET_INVALID, l3_off);
- CU_ASSERT_EQUAL_FATAL(0, odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip),
- &ip));
+ CU_ASSERT_FATAL(ODP_PACKET_OFFSET_INVALID != l3_off);
+ CU_ASSERT_FATAL(0 == odp_packet_copy_to_mem(pkt, l3_off, sizeof(ip), &ip));
if (ODPH_IPV4HDR_VER(ip.ver_ihl) == ODPH_IPV4) {
l4_off = l3_off + (ODPH_IPV4HDR_IHL(ip.ver_ihl) * 4);
@@ -1064,12 +1037,12 @@ static void ipsec_pkt_seq_num_check(odp_packet_t pkt, uint32_t seq_num)
odph_esphdr_t esp;
odp_packet_copy_to_mem(pkt, l4_off, sizeof(esp), &esp);
- CU_ASSERT_EQUAL(odp_be_to_cpu_32(esp.seq_no), seq_num);
+ CU_ASSERT(odp_be_to_cpu_32(esp.seq_no) == seq_num);
} else if (ip.proto == ODPH_IPPROTO_AH) {
odph_ahhdr_t ah;
odp_packet_copy_to_mem(pkt, l4_off, sizeof(ah), &ah);
- CU_ASSERT_EQUAL(odp_be_to_cpu_32(ah.seq_no), seq_num);
+ CU_ASSERT(odp_be_to_cpu_32(ah.seq_no) == seq_num);
} else {
CU_FAIL("Unexpected IP Proto");
}
@@ -1092,11 +1065,10 @@ static void verify_in(const ipsec_test_part *part,
if (ODP_EVENT_PACKET_IPSEC !=
odp_event_subtype(odp_packet_to_event(pkto[i]))) {
/* Inline packet failed SA lookup */
- CU_ASSERT_EQUAL(1, part->out[i].status.error.sa_lookup);
+ CU_ASSERT(1 == part->out[i].status.error.sa_lookup);
} else {
- CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
- CU_ASSERT_EQUAL(part->out[i].status.error.all,
- result.status.error.all);
+ CU_ASSERT(0 == odp_ipsec_result(&result, pkto[i]));
+ CU_ASSERT(part->out[i].status.error.all == result.status.error.all);
if (part->out[i].status.error.all != 0) {
odp_packet_free(pkto[i]);
@@ -1104,17 +1076,13 @@ static void verify_in(const ipsec_test_part *part,
}
if (0 == result.status.error.all)
- CU_ASSERT_EQUAL(0,
- odp_packet_has_error(pkto[i]));
- CU_ASSERT_EQUAL(suite_context.inbound_op_mode ==
- ODP_IPSEC_OP_MODE_INLINE,
- result.flag.inline_mode);
- CU_ASSERT_EQUAL(sa, result.sa);
- CU_ASSERT_EQUAL(part->out[i].status.warn.all,
- result.status.warn.all);
+ CU_ASSERT(0 == odp_packet_has_error(pkto[i]));
+ CU_ASSERT((suite_context.inbound_op_mode == ODP_IPSEC_OP_MODE_INLINE) ==
+ result.flag.inline_mode);
+ CU_ASSERT(sa == result.sa);
+ CU_ASSERT(part->out[i].status.warn.all == result.status.warn.all);
if (ODP_IPSEC_SA_INVALID != sa)
- CU_ASSERT_EQUAL(IPSEC_SA_CTX,
- odp_ipsec_sa_context(sa));
+ CU_ASSERT(IPSEC_SA_CTX == odp_ipsec_sa_context(sa));
if (suite_context.inbound_op_mode != ODP_IPSEC_OP_MODE_SYNC) {
uint32_t len;
@@ -1135,12 +1103,10 @@ static void verify_in(const ipsec_test_part *part,
if (part->out[i].pkt_res != NULL &&
part->out[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
- CU_ASSERT_EQUAL(part->out[i].l3_type,
- odp_packet_l3_type(pkto[i]));
+ CU_ASSERT(part->out[i].l3_type == odp_packet_l3_type(pkto[i]));
if (part->out[i].pkt_res != NULL &&
part->out[i].l4_type != _ODP_PROTO_L4_TYPE_UNDEF)
- CU_ASSERT_EQUAL(part->out[i].l4_type,
- odp_packet_l4_type(pkto[i]));
+ CU_ASSERT(part->out[i].l4_type == odp_packet_l4_type(pkto[i]));
odp_packet_free(pkto[i]);
}
}
@@ -1180,26 +1146,23 @@ int ipsec_check_out(const ipsec_test_part *part, odp_ipsec_sa_t sa,
if (ODP_EVENT_PACKET_IPSEC !=
odp_event_subtype(odp_packet_to_event(pkto[i]))) {
/* Inline packet went through loop */
- CU_ASSERT_EQUAL(0, part->out[i].status.error.all);
+ CU_ASSERT(0 == part->out[i].status.error.all);
CU_ASSERT(odp_packet_user_ptr(pkto[i]) == NULL);
/* L2 header must match the requested one */
check_l2_header(part->out[i].pkt_res, pkto[i]);
} else {
/* IPsec packet */
- CU_ASSERT_EQUAL(0, odp_ipsec_result(&result, pkto[i]));
+ CU_ASSERT(0 == odp_ipsec_result(&result, pkto[i]));
if (part->out[i].sa_expiry != IPSEC_TEST_EXPIRY_NONE)
if (ipsec_check_sa_expiry(part->out[i].sa_expiry, &result) != 0)
return num_out;
- CU_ASSERT_EQUAL(part->out[i].status.error.all,
- result.status.error.all);
+ CU_ASSERT(part->out[i].status.error.all == result.status.error.all);
if (0 == result.status.error.all)
- CU_ASSERT_EQUAL(0,
- odp_packet_has_error(pkto[i]));
- CU_ASSERT_EQUAL(sa, result.sa);
- CU_ASSERT_EQUAL(IPSEC_SA_CTX,
- odp_ipsec_sa_context(sa));
+ CU_ASSERT(0 == odp_packet_has_error(pkto[i]));
+ CU_ASSERT(sa == result.sa);
+ CU_ASSERT(IPSEC_SA_CTX == odp_ipsec_sa_context(sa));
CU_ASSERT(odp_packet_user_ptr(pkto[i]) == PACKET_USER_PTR);
/* Parse the packet to set L4 offset and type */
@@ -1236,7 +1199,7 @@ void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
int num_out;
num_out = ipsec_process_in(part, sa, pkto);
- CU_ASSERT_EQUAL(num_out, part->num_pkt);
+ CU_ASSERT(num_out == part->num_pkt);
verify_in(part, sa, pkto);
}
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index 47612e3b3..3daa364cc 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -1,9 +1,7 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2020, Marvell
- * Copyright (c) 2020, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2020 Marvell
+ * Copyright (c) 2020 Nokia
*/
#ifndef _ODP_TEST_IPSEC_H_
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
index b5251544e..a93bf9f2a 100644
--- a/test/validation/api/ipsec/ipsec_test_in.c
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2020-2021, Marvell
- * Copyright (c) 2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2020-2021 Marvell
+ * Copyright (c) 2021 Nokia
*/
#include <odp/helper/odph_api.h>
@@ -62,7 +60,7 @@ static void test_in_ipv4_ah_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
@@ -97,7 +95,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
@@ -132,7 +130,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv6_sha256_1,
@@ -164,7 +162,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv4_notun(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_tun_ipv4_sha256_1,
@@ -197,7 +195,7 @@ static void test_in_ipv4_esp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
@@ -229,7 +227,7 @@ static void test_in_ipv4_esp_aes_cbc_null(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_null_1,
@@ -261,7 +259,7 @@ static void test_in_ipv4_esp_aes_cbc_sha1(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha1_1,
@@ -293,7 +291,7 @@ static void test_in_ipv4_esp_aes_cbc_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha256_1,
@@ -325,7 +323,7 @@ static void test_in_ipv4_esp_aes_cbc_sha384(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha384_1,
@@ -357,7 +355,7 @@ static void test_in_ipv4_esp_aes_cbc_sha512(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_cbc_sha512_1,
@@ -389,7 +387,7 @@ static void test_in_ipv4_esp_aes_ctr_null(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_aes_ctr_null_1,
@@ -421,7 +419,7 @@ static void test_in_ipv4_ah_sha256_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
@@ -456,7 +454,7 @@ static void test_in_ipv4_esp_null_sha256_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
@@ -494,7 +492,7 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv4_null_sha256_1,
@@ -529,7 +527,7 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_tun_ipv6_null_sha256_1,
@@ -562,7 +560,7 @@ static void test_in_ipv4_esp_udp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
@@ -595,7 +593,7 @@ static void test_in_ipv4_esp_udp_null_sha256_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_udp_null_sha256_1,
@@ -631,7 +629,7 @@ static void test_in_ipv4_ah_sha256_noreplay(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
@@ -682,7 +680,7 @@ static void test_in_ipv4_ah_sha256_replay(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1,
@@ -734,7 +732,7 @@ static void test_in_ipv4_esp_null_sha256_noreplay(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
@@ -785,7 +783,7 @@ static void test_in_ipv4_esp_null_sha256_replay(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1,
@@ -844,7 +842,7 @@ static void test_in_ipv4_ah_esp_pkt(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
test.num_pkt = 1;
@@ -876,7 +874,7 @@ static void test_in_ipv4_esp_ah_pkt(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
test.num_pkt = 1;
@@ -903,7 +901,7 @@ static void test_in_ipv4_ah_esp_pkt_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1;
test.flags.lookup = 1;
@@ -931,7 +929,7 @@ static void test_in_ipv4_esp_ah_pkt_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1;
test.flags.lookup = 1;
@@ -959,7 +957,7 @@ static void test_in_ipv4_ah_sha256_bad1(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad1;
test.num_pkt = 1;
@@ -986,7 +984,7 @@ static void test_in_ipv4_ah_sha256_bad2(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_ah_sha256_1_bad2;
test.num_pkt = 1;
@@ -1013,7 +1011,7 @@ static void test_in_ipv4_esp_null_sha256_bad1(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0_esp_null_sha256_1_bad1;
test.num_pkt = 1;
@@ -1037,7 +1035,7 @@ static void test_in_ipv4_rfc3602_5_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_5_esp,
@@ -1069,7 +1067,7 @@ static void test_in_ipv4_rfc3602_6_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_6_esp,
@@ -1105,7 +1103,7 @@ static void test_in_ipv4_rfc3602_7_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_7_esp,
@@ -1141,7 +1139,7 @@ static void test_in_ipv4_rfc3602_8_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_rfc3602_8_esp,
@@ -1177,7 +1175,7 @@ static void test_in_ipv4_mcgrew_gcm_2_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_2_esp,
@@ -1213,7 +1211,7 @@ static void test_in_ipv4_mcgrew_gcm_3_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_3_esp,
@@ -1249,7 +1247,7 @@ static void test_in_ipv4_mcgrew_gcm_4_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_4_esp,
@@ -1290,7 +1288,7 @@ static void test_in_ipv4_mcgrew_gcm_12_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_12_esp,
@@ -1323,7 +1321,7 @@ static void test_in_ipv4_mcgrew_gcm_12_esp_notun(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_12_esp,
@@ -1359,7 +1357,7 @@ static void test_in_ipv4_mcgrew_gcm_15_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_mcgrew_gcm_test_15_esp,
@@ -1395,7 +1393,7 @@ static void test_in_ipv4_rfc7634_chacha(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_rfc7634_esp,
@@ -1427,7 +1425,7 @@ static void test_in_ipv4_ah_aes_gmac_128(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_ah_aes_gmac_128_1,
@@ -1459,7 +1457,7 @@ static void test_in_ipv4_esp_null_aes_gmac_128(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0_esp_null_aes_gmac_128_1,
@@ -1491,7 +1489,7 @@ static void test_in_ipv6_ah_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_sha256_1,
@@ -1526,7 +1524,7 @@ static void test_in_ipv6_ah_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv4_sha256_1,
@@ -1561,7 +1559,7 @@ static void test_in_ipv6_ah_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_ah_tun_ipv6_sha256_1,
@@ -1593,7 +1591,7 @@ static void test_in_ipv6_esp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_null_sha256_1,
@@ -1628,7 +1626,7 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv4_null_sha256_1,
@@ -1663,7 +1661,7 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_tun_ipv6_null_sha256_1,
@@ -1696,7 +1694,7 @@ static void test_in_ipv6_esp_udp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
@@ -1729,7 +1727,7 @@ static void test_in_ipv6_esp_udp_null_sha256_lookup(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0_esp_udp_null_sha256_1,
@@ -1769,7 +1767,7 @@ static void test_ipsec_sa_print(void)
in_sa = odp_ipsec_sa_create(&param_in);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != in_sa);
odp_ipsec_sa_print(in_sa);
@@ -1802,7 +1800,7 @@ static void test_multi_out_in(odp_ipsec_sa_t out_sa,
*/
part_prep_esp(&test_out, 1, tunnel_ip_ver == ODPH_IPV6);
test_out.pkt_in = input_packets[i];
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out, out_sa, &pkt), 1);
+ CU_ASSERT(ipsec_check_out(&test_out, out_sa, &pkt) == 1);
/*
* Perform inbound IPsec processing for the IPsec packet.
@@ -1952,10 +1950,10 @@ static void test_in_ipv4_esp_reass_success(void)
param_in.inbound.reassembly_en = 1;
out_sa = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != out_sa);
in_sa = odp_ipsec_sa_create(&param_in);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != in_sa);
printf("\n IPv4 two frags");
test_in_ipv4_esp_reass_success_two_frags(out_sa, in_sa);
@@ -2007,10 +2005,10 @@ static void test_in_ipv4_esp_reass_incomp(void)
param_in.inbound.reassembly_en = 1;
out_sa = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != out_sa);
in_sa = odp_ipsec_sa_create(&param_in);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != in_sa);
printf("\n IPv4 missing frag");
test_in_ipv4_esp_reass_incomp_missing(out_sa, in_sa);
@@ -2141,10 +2139,10 @@ static void test_in_ipv6_esp_reass_success(void)
param_in.inbound.reassembly_en = 1;
out_sa = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != out_sa);
in_sa = odp_ipsec_sa_create(&param_in);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != in_sa);
printf("\n IPv6 two frags");
test_in_ipv6_esp_reass_success_two_frags(out_sa, in_sa);
@@ -2200,10 +2198,10 @@ static void test_in_ipv6_esp_reass_incomp(void)
param_in.inbound.reassembly_en = 1;
out_sa = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != out_sa);
in_sa = odp_ipsec_sa_create(&param_in);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, in_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != in_sa);
printf("\n IPv6 missing frag");
test_in_ipv6_esp_reass_incomp_missing(out_sa, in_sa);
@@ -2231,7 +2229,7 @@ static void test_in_ipv4_null_aes_xcbc_esp(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_null_aes_xcbc_esp,
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index ca8bf97a5..fea66b630 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2020, Marvell
- * Copyright (c) 2020-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2020 Marvell
+ * Copyright (c) 2020-2022 Nokia
*/
#include <odp/helper/odph_api.h>
@@ -145,7 +143,7 @@ static void test_out_ipv4_ah_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -184,7 +182,7 @@ static void test_out_ipv4_ah_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -229,7 +227,7 @@ static void test_out_ipv4_ah_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -259,7 +257,7 @@ static void test_out_ipv4_esp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -298,7 +296,7 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -344,7 +342,7 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -364,15 +362,15 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
static void test_ipsec_stats_zero_assert(odp_ipsec_stats_t *stats)
{
- CU_ASSERT_EQUAL(stats->success, 0);
- CU_ASSERT_EQUAL(stats->proto_err, 0);
- CU_ASSERT_EQUAL(stats->auth_err, 0);
- CU_ASSERT_EQUAL(stats->antireplay_err, 0);
- CU_ASSERT_EQUAL(stats->alg_err, 0);
- CU_ASSERT_EQUAL(stats->mtu_err, 0);
- CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
- CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
- CU_ASSERT_EQUAL(stats->success_bytes, 0);
+ CU_ASSERT(stats->success == 0);
+ CU_ASSERT(stats->proto_err == 0);
+ CU_ASSERT(stats->auth_err == 0);
+ CU_ASSERT(stats->antireplay_err == 0);
+ CU_ASSERT(stats->alg_err == 0);
+ CU_ASSERT(stats->mtu_err == 0);
+ CU_ASSERT(stats->hard_exp_bytes_err == 0);
+ CU_ASSERT(stats->hard_exp_pkts_err == 0);
+ CU_ASSERT(stats->success_bytes == 0);
}
static void test_ipsec_stats_test_assert(odp_ipsec_stats_t *stats,
@@ -380,34 +378,34 @@ static void test_ipsec_stats_test_assert(odp_ipsec_stats_t *stats,
uint64_t succ_bytes)
{
if (test == IPSEC_TEST_STATS_SUCCESS) {
- CU_ASSERT_EQUAL(stats->success, 1);
+ CU_ASSERT(stats->success == 1);
CU_ASSERT(stats->success_bytes >= succ_bytes);
} else {
- CU_ASSERT_EQUAL(stats->success, 0);
- CU_ASSERT_EQUAL(stats->success_bytes, 0);
+ CU_ASSERT(stats->success == 0);
+ CU_ASSERT(stats->success_bytes == 0);
}
if (test == IPSEC_TEST_STATS_PROTO_ERR) {
/* Braces needed by CU macro */
- CU_ASSERT_EQUAL(stats->proto_err, 1);
+ CU_ASSERT(stats->proto_err == 1);
} else {
/* Braces needed by CU macro */
- CU_ASSERT_EQUAL(stats->proto_err, 0);
+ CU_ASSERT(stats->proto_err == 0);
}
if (test == IPSEC_TEST_STATS_AUTH_ERR) {
/* Braces needed by CU macro */
- CU_ASSERT_EQUAL(stats->auth_err, 1);
+ CU_ASSERT(stats->auth_err == 1);
} else {
/* Braces needed by CU macro */
- CU_ASSERT_EQUAL(stats->auth_err, 0);
+ CU_ASSERT(stats->auth_err == 0);
}
- CU_ASSERT_EQUAL(stats->antireplay_err, 0);
- CU_ASSERT_EQUAL(stats->alg_err, 0);
- CU_ASSERT_EQUAL(stats->mtu_err, 0);
- CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
- CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+ CU_ASSERT(stats->antireplay_err == 0);
+ CU_ASSERT(stats->alg_err == 0);
+ CU_ASSERT(stats->mtu_err == 0);
+ CU_ASSERT(stats->hard_exp_bytes_err == 0);
+ CU_ASSERT(stats->hard_exp_pkts_err == 0);
}
static void ipsec_pkt_proto_err_set(odp_packet_t pkt)
@@ -527,7 +525,7 @@ static void test_out_in_common(const ipsec_test_flags *flags,
odp_proto_l3_type_t out_l3_type = ODP_PROTO_L3_TYPE_IPV4;
odp_proto_l4_type_t out_l4_type = ODP_PROTO_L4_TYPE_ESP;
- CU_ASSERT_NOT_EQUAL_FATAL(flags, NULL);
+ CU_ASSERT_FATAL(flags != NULL);
/* ICV won't be generated for NULL AUTH */
if ((flags->stats == IPSEC_TEST_STATS_AUTH_ERR) &&
@@ -566,7 +564,7 @@ static void test_out_in_common(const ipsec_test_flags *flags,
if (sa_out == ODP_IPSEC_SA_INVALID && sa_creation_failure_ok(&param))
return;
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa_out);
ipsec_sa_param_fill(&param,
ODP_IPSEC_DIR_INBOUND, proto, 123, tun_ptr,
@@ -579,7 +577,7 @@ static void test_out_in_common(const ipsec_test_flags *flags,
sa_in = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_in);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa_in);
if ((flags->tunnel && flags->tunnel_is_v6) ||
(!flags->tunnel && flags->v6))
@@ -627,9 +625,9 @@ static void test_out_in_common(const ipsec_test_flags *flags,
test_in.out[0].status.error.auth = 1;
if (flags->stats != IPSEC_TEST_STATS_NONE) {
- CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ CU_ASSERT(odp_ipsec_stats(sa_out, &stats) == 0);
test_ipsec_stats_zero_assert(&stats);
- CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ CU_ASSERT(odp_ipsec_stats(sa_in, &stats) == 0);
test_ipsec_stats_zero_assert(&stats);
}
@@ -675,11 +673,11 @@ static void test_out_in_common(const ipsec_test_flags *flags,
/* All stats tests have outbound operation success and inbound
* varying.
*/
- CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
+ CU_ASSERT(odp_ipsec_stats(sa_out, &stats) == 0);
test_ipsec_stats_test_assert(&stats, IPSEC_TEST_STATS_SUCCESS,
succ_bytes);
- CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
+ CU_ASSERT(odp_ipsec_stats(sa_in, &stats) == 0);
test_ipsec_stats_test_assert(&stats, flags->stats, succ_bytes);
}
@@ -815,7 +813,7 @@ static void test_out_ipv4_esp_udp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -852,7 +850,7 @@ static void test_out_ipv4_ah_sha256_frag_check(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0;
test.num_pkt = 1;
@@ -892,7 +890,7 @@ static void test_out_ipv4_ah_sha256_frag_check_2(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0;
test.num_pkt = 1;
@@ -940,7 +938,7 @@ static void test_out_ipv4_esp_null_sha256_frag_check(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0;
test.num_pkt = 1;
@@ -981,7 +979,7 @@ static void test_out_ipv4_esp_null_sha256_frag_check_2(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
test.pkt_in = &pkt_ipv4_icmp_0;
test.num_pkt = 1;
@@ -1021,7 +1019,7 @@ static void test_out_ipv6_ah_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1060,7 +1058,7 @@ static void test_out_ipv6_ah_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1105,7 +1103,7 @@ static void test_out_ipv6_ah_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1135,7 +1133,7 @@ static void test_out_ipv6_esp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1174,7 +1172,7 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv4(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1220,7 +1218,7 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv6(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1252,7 +1250,7 @@ static void test_out_ipv6_esp_udp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv6_icmp_0,
@@ -1299,7 +1297,7 @@ static void test_out_dummy_esp_null_sha256_tun(odp_ipsec_tunnel_param_t tunnel)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_sa_param_fill(&param,
ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP, 123, &tunnel,
@@ -1309,7 +1307,7 @@ static void test_out_dummy_esp_null_sha256_tun(odp_ipsec_tunnel_param_t tunnel)
sa2 = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa2);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa2);
test.pkt_in = &pkt_test_nodata;
test.num_opt = 1;
@@ -1387,7 +1385,7 @@ static void test_out_ipv4_udp_esp_null_sha256(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_udp,
@@ -1429,7 +1427,7 @@ static void test_out_ipv4_null_aes_xcbc(void)
sa = odp_ipsec_sa_create(&param);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa);
ipsec_test_part test = {
.pkt_in = &pkt_ipv4_null_aes_xcbc_plain,
@@ -1478,7 +1476,7 @@ static void test_sa_info(void)
sa_out = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, sa_out);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != sa_out);
ipsec_sa_param_fill(&param_in,
ODP_IPSEC_DIR_INBOUND, ODP_IPSEC_ESP,
@@ -1492,57 +1490,48 @@ static void test_sa_info(void)
CU_ASSERT_FATAL(sa_in != ODP_IPSEC_SA_INVALID);
memset(&info_out, 0, sizeof(info_out));
- CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
-
- CU_ASSERT_EQUAL(info_out.param.dir, param_out.dir);
- CU_ASSERT_EQUAL(info_out.param.proto, param_out.proto);
- CU_ASSERT_EQUAL(info_out.param.mode, param_out.mode);
-
- CU_ASSERT_EQUAL(info_out.param.crypto.cipher_alg,
- param_out.crypto.cipher_alg);
- CU_ASSERT_EQUAL(info_out.param.crypto.auth_alg,
- param_out.crypto.auth_alg);
- CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
- CU_ASSERT_EQUAL(info_out.param.spi, param_out.spi);
- CU_ASSERT_EQUAL(info_out.param.opt.esn, param_out.opt.esn);
- CU_ASSERT_EQUAL(info_out.param.opt.udp_encap, param_out.opt.udp_encap);
- CU_ASSERT_EQUAL(info_out.param.opt.copy_dscp, param_out.opt.copy_dscp);
- CU_ASSERT_EQUAL(info_out.param.opt.copy_flabel, param_out.opt.copy_flabel);
- CU_ASSERT_EQUAL(info_out.param.opt.copy_df, param_out.opt.copy_df);
-
- CU_ASSERT_EQUAL(ODP_IPSEC_MODE_TUNNEL, info_out.param.mode);
-
- CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.type,
- param_out.outbound.tunnel.type);
- CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.dscp,
- param_out.outbound.tunnel.ipv4.dscp);
- CU_ASSERT_EQUAL(info_out.param.outbound.tunnel.ipv4.df,
- param_out.outbound.tunnel.ipv4.df);
- CU_ASSERT_NOT_EQUAL_FATAL(NULL,
- info_out.param.outbound.tunnel.ipv4.src_addr);
- CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.src_addr,
- param_out.outbound.tunnel.ipv4.src_addr,
- ODP_IPV4_ADDR_SIZE));
- CU_ASSERT_NOT_EQUAL_FATAL(NULL,
- info_out.param.outbound.tunnel.ipv4.dst_addr);
- CU_ASSERT_EQUAL(0, memcmp(info_out.param.outbound.tunnel.ipv4.dst_addr,
- param_out.outbound.tunnel.ipv4.dst_addr,
- ODP_IPV4_ADDR_SIZE));
-
- CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.bytes,
- param_out.lifetime.soft_limit.bytes);
- CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.bytes,
- param_out.lifetime.hard_limit.bytes);
- CU_ASSERT_EQUAL(info_out.param.lifetime.soft_limit.packets,
- param_out.lifetime.soft_limit.packets);
- CU_ASSERT_EQUAL(info_out.param.lifetime.hard_limit.packets,
- param_out.lifetime.hard_limit.packets);
-
- CU_ASSERT_EQUAL(0, info_out.outbound.seq_num);
+ CU_ASSERT_FATAL(0 == odp_ipsec_sa_info(sa_out, &info_out));
+
+ CU_ASSERT(info_out.param.dir == param_out.dir);
+ CU_ASSERT(info_out.param.proto == param_out.proto);
+ CU_ASSERT(info_out.param.mode == param_out.mode);
+
+ CU_ASSERT(info_out.param.crypto.cipher_alg == param_out.crypto.cipher_alg);
+ CU_ASSERT(info_out.param.crypto.auth_alg == param_out.crypto.auth_alg);
+ CU_ASSERT(info_out.param.opt.udp_encap == param_out.opt.udp_encap);
+ CU_ASSERT(info_out.param.spi == param_out.spi);
+ CU_ASSERT(info_out.param.opt.esn == param_out.opt.esn);
+ CU_ASSERT(info_out.param.opt.udp_encap == param_out.opt.udp_encap);
+ CU_ASSERT(info_out.param.opt.copy_dscp == param_out.opt.copy_dscp);
+ CU_ASSERT(info_out.param.opt.copy_flabel == param_out.opt.copy_flabel);
+ CU_ASSERT(info_out.param.opt.copy_df == param_out.opt.copy_df);
+
+ CU_ASSERT(ODP_IPSEC_MODE_TUNNEL == info_out.param.mode);
+
+ CU_ASSERT(info_out.param.outbound.tunnel.type == param_out.outbound.tunnel.type);
+ CU_ASSERT(info_out.param.outbound.tunnel.ipv4.dscp == param_out.outbound.tunnel.ipv4.dscp);
+ CU_ASSERT(info_out.param.outbound.tunnel.ipv4.df == param_out.outbound.tunnel.ipv4.df);
+ CU_ASSERT_FATAL(NULL != info_out.param.outbound.tunnel.ipv4.src_addr);
+ CU_ASSERT(0 == memcmp(info_out.param.outbound.tunnel.ipv4.src_addr,
+ param_out.outbound.tunnel.ipv4.src_addr,
+ ODP_IPV4_ADDR_SIZE));
+ CU_ASSERT_FATAL(NULL != info_out.param.outbound.tunnel.ipv4.dst_addr);
+ CU_ASSERT(0 == memcmp(info_out.param.outbound.tunnel.ipv4.dst_addr,
+ param_out.outbound.tunnel.ipv4.dst_addr,
+ ODP_IPV4_ADDR_SIZE));
+
+ CU_ASSERT(info_out.param.lifetime.soft_limit.bytes == param_out.lifetime.soft_limit.bytes);
+ CU_ASSERT(info_out.param.lifetime.hard_limit.bytes == param_out.lifetime.hard_limit.bytes);
+ CU_ASSERT(info_out.param.lifetime.soft_limit.packets ==
+ param_out.lifetime.soft_limit.packets);
+ CU_ASSERT(info_out.param.lifetime.hard_limit.packets ==
+ param_out.lifetime.hard_limit.packets);
+
+ CU_ASSERT(0 == info_out.outbound.seq_num);
memset(&info_in, 0, sizeof(info_in));
- CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
- CU_ASSERT_EQUAL(0, info_in.inbound.antireplay_window_top);
+ CU_ASSERT_FATAL(0 == odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT(0 == info_in.inbound.antireplay_window_top);
ipsec_test_part test_out = {
.pkt_in = &pkt_ipv4_icmp_0,
@@ -1569,12 +1558,12 @@ static void test_sa_info(void)
ipsec_check_out_in_one(&test_out, &test_in, sa_out, sa_in, NULL);
memset(&info_out, 0, sizeof(info_out));
- CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_out, &info_out));
- CU_ASSERT_EQUAL(1, info_out.outbound.seq_num);
+ CU_ASSERT_FATAL(0 == odp_ipsec_sa_info(sa_out, &info_out));
+ CU_ASSERT(1 == info_out.outbound.seq_num);
memset(&info_in, 0, sizeof(info_in));
- CU_ASSERT_EQUAL_FATAL(0, odp_ipsec_sa_info(sa_in, &info_in));
- CU_ASSERT_EQUAL(1, info_in.inbound.antireplay_window_top);
+ CU_ASSERT_FATAL(0 == odp_ipsec_sa_info(sa_in, &info_in));
+ CU_ASSERT(1 == info_in.inbound.antireplay_window_top);
ipsec_sa_destroy(sa_out);
ipsec_sa_destroy(sa_in);
@@ -1697,7 +1686,7 @@ static void test_out_ipv4_esp_sa_expiry(enum ipsec_test_sa_expiry expiry)
param_out.lifetime.hard_limit.packets = hard_limit_pkt;
out_sa = odp_ipsec_sa_create(&param_out);
- CU_ASSERT_NOT_EQUAL_FATAL(ODP_IPSEC_SA_INVALID, out_sa);
+ CU_ASSERT_FATAL(ODP_IPSEC_SA_INVALID != out_sa);
ipsec_test_part test_out = {
.pkt_in = &pkt_ipv4_icmp_0,
diff --git a/test/validation/api/ipsec/reass_test_vectors.c b/test/validation/api/ipsec/reass_test_vectors.c
index c3bb2bfd4..45dd7af1b 100644
--- a/test/validation/api/ipsec/reass_test_vectors.c
+++ b/test/validation/api/ipsec/reass_test_vectors.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2021, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Marvell
*/
#include "ipsec.h"
diff --git a/test/validation/api/ipsec/reass_test_vectors.h b/test/validation/api/ipsec/reass_test_vectors.h
index 02b41c573..4c8d8e1f4 100644
--- a/test/validation/api/ipsec/reass_test_vectors.h
+++ b/test/validation/api/ipsec/reass_test_vectors.h
@@ -1,7 +1,5 @@
- /* Copyright (c) 2021, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 Marvell
*/
#ifndef _ODP_REASS_TEST_VECTORS_H_
diff --git a/test/validation/api/ipsec/test_vectors.h b/test/validation/api/ipsec/test_vectors.h
index b032ef973..b02912f68 100644
--- a/test/validation/api/ipsec/test_vectors.h
+++ b/test/validation/api/ipsec/test_vectors.h
@@ -1,8 +1,6 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2020, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2020 Nokia
*/
#ifndef _ODP_TEST_IPSEC_VECTORS_H_
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index a4e6932c4..e8eb4360b 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
*/
#include <malloc.h>
@@ -117,7 +115,7 @@ static per_thread_mem_t *thread_init(void)
global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
global_mem = odp_shm_addr(global_shm);
- CU_ASSERT_PTR_NOT_NULL(global_mem);
+ CU_ASSERT(global_mem != NULL);
per_thread_mem->global_mem = global_mem;
diff --git a/test/validation/api/ml/ml.c b/test/validation/api/ml/ml.c
index 5f8be1b64..1f3383428 100644
--- a/test/validation/api/ml/ml.c
+++ b/test/validation/api/ml/ml.c
@@ -317,6 +317,7 @@ static void test_ml_compl_user_area(void)
memset(addr, 0, size);
ev = odp_ml_compl_to_event(compl_evs[i]);
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == addr);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
CU_ASSERT(flag < 0);
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index ca9c73f17..7e7208755 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -1,13 +1,9 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2023, Nokia
- * Copyright (c) 2020, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
+ * Copyright (c) 2020 Marvell
*/
-#include <stdlib.h>
-
#include <odp_api.h>
#include <odp_cunit_common.h>
#include <test_packet_ipv4.h>
@@ -16,6 +12,9 @@
#include <odp/helper/odph_api.h>
+#include <stdint.h>
+#include <stdlib.h>
+
/* Reserve some tailroom for tests */
#define TAILROOM_RESERVE 4
/* Number of packets in the test packet pool */
@@ -118,8 +117,8 @@ static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2,
pkt1map = odp_packet_offset(pkt1, offset, &seglen1, NULL);
pkt2map = odp_packet_offset(pkt2, offset, &seglen2, NULL);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ CU_ASSERT_FATAL(pkt1map != NULL);
+ CU_ASSERT_FATAL(pkt2map != NULL);
cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
ret = memcmp(pkt1map, pkt2map, cmplen);
@@ -849,8 +848,8 @@ static void packet_test_basic_metadata(void)
odp_time_t ts;
odp_packet_data_range_t range;
- CU_ASSERT_PTR_NOT_NULL(odp_packet_head(pkt));
- CU_ASSERT_PTR_NOT_NULL(odp_packet_data(pkt));
+ CU_ASSERT(odp_packet_head(pkt) != NULL);
+ CU_ASSERT(odp_packet_data(pkt) != NULL);
CU_ASSERT(odp_packet_pool(pkt) != ODP_POOL_INVALID);
/* Packet was allocated by application so shouldn't have valid pktio. */
@@ -908,14 +907,24 @@ static void packet_test_length(void)
static void packet_test_reset(void)
{
- uint32_t len, headroom;
+ uint32_t len, max_len, headroom;
+ uint32_t uarea_size = default_param.pkt.uarea_size;
uintptr_t ptr_len;
void *data, *new_data, *tail, *new_tail;
+ struct udata_struct *udat;
odp_packet_t pkt;
pkt = odp_packet_alloc(default_pool, packet_len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ memcpy(udat, &test_packet_udata, uarea_size);
+ }
+
len = odp_packet_len(pkt);
CU_ASSERT(len == packet_len);
@@ -946,8 +955,10 @@ static void packet_test_reset(void)
ptr_len = (uintptr_t)odp_packet_data(pkt) -
(uintptr_t)odp_packet_head(pkt);
CU_ASSERT(ptr_len == (headroom + 1));
- CU_ASSERT(odp_packet_reset(pkt, len) == 0);
- CU_ASSERT(odp_packet_len(pkt) == len);
+ max_len = odp_packet_reset_max_len(pkt);
+ CU_ASSERT(max_len >= len);
+ CU_ASSERT(odp_packet_reset(pkt, max_len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == max_len);
CU_ASSERT(odp_packet_headroom(pkt) == headroom);
ptr_len = (uintptr_t)odp_packet_data(pkt) -
(uintptr_t)odp_packet_head(pkt);
@@ -956,7 +967,7 @@ static void packet_test_reset(void)
tail = odp_packet_tail(pkt);
new_tail = odp_packet_pull_tail(pkt, 1);
- CU_ASSERT(odp_packet_len(pkt) == len - 1);
+ CU_ASSERT(odp_packet_len(pkt) == max_len - 1);
CU_ASSERT((uintptr_t)new_tail == ((uintptr_t)tail - 1));
CU_ASSERT(odp_packet_reset(pkt, len) == 0);
CU_ASSERT(odp_packet_len(pkt) == len);
@@ -973,6 +984,86 @@ static void packet_test_reset(void)
CU_ASSERT(odp_packet_reset(pkt, len) == 0);
CU_ASSERT(odp_packet_len(pkt) == len);
+ if (odp_packet_reset_max_len(pkt) < UINT32_MAX) {
+ CU_ASSERT(odp_packet_reset(pkt, odp_packet_reset_max_len(pkt) + 1) < 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ }
+
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
+
+ odp_packet_free(pkt);
+}
+
+static void packet_test_reset_meta(void)
+{
+ uint32_t data_len, seg_len, headroom, tailroom;
+ uint32_t uarea_size = default_param.pkt.uarea_size;
+ void *data, *head, *tail;
+ struct udata_struct *udat;
+ odp_packet_t pkt;
+ int num_segs;
+
+ pkt = odp_packet_alloc(default_pool, segmented_packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ memcpy(udat, &test_packet_udata, uarea_size);
+ }
+
+ data_len = odp_packet_len(pkt);
+ CU_ASSERT(data_len == segmented_packet_len);
+
+ odp_packet_pull_head(pkt, 1);
+ head = odp_packet_head(pkt);
+ CU_ASSERT(head != NULL);
+
+ odp_packet_pull_tail(pkt, 1);
+ tail = odp_packet_tail(pkt);
+ CU_ASSERT(tail != NULL);
+
+ headroom = odp_packet_headroom(pkt);
+ tailroom = odp_packet_tailroom(pkt);
+
+ data = odp_packet_data(pkt);
+ data_len = odp_packet_len(pkt);
+
+ seg_len = odp_packet_seg_len(pkt);
+ num_segs = odp_packet_num_segs(pkt);
+
+ odp_packet_reset_meta(pkt);
+
+ CU_ASSERT(odp_packet_data(pkt) == data);
+ CU_ASSERT(odp_packet_len(pkt) == data_len);
+ CU_ASSERT(odp_packet_seg_len(pkt) == seg_len);
+ CU_ASSERT(odp_packet_num_segs(pkt) == num_segs);
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom);
+ CU_ASSERT(odp_packet_tailroom(pkt) == tailroom);
+ CU_ASSERT(odp_packet_head(pkt) == head);
+ CU_ASSERT(odp_packet_tail(pkt) == tail);
+
+ packet_set_inflags_common(pkt, 1);
+ packet_check_inflags_common(pkt, 1);
+ odp_packet_reset_meta(pkt);
+ packet_check_inflags_all(pkt, 0);
+
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
+
odp_packet_free(pkt);
}
@@ -1066,7 +1157,7 @@ static void packet_test_layer_offsets(void)
CU_ASSERT(seg_len != 0);
l4_addr = odp_packet_l4_ptr(pkt, &seg_len);
CU_ASSERT(seg_len != 0);
- CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ CU_ASSERT(l2_addr != NULL);
CU_ASSERT(l2_addr == l3_addr);
CU_ASSERT(l2_addr == l4_addr);
@@ -1080,11 +1171,11 @@ static void packet_test_layer_offsets(void)
/* Addresses should not be the same */
l2_addr = odp_packet_l2_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ CU_ASSERT(l2_addr != NULL);
l3_addr = odp_packet_l3_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l3_addr);
+ CU_ASSERT(l3_addr != NULL);
l4_addr = odp_packet_l4_ptr(pkt, NULL);
- CU_ASSERT_PTR_NOT_NULL(l4_addr);
+ CU_ASSERT(l4_addr != NULL);
CU_ASSERT(l2_addr != l3_addr);
CU_ASSERT(l2_addr != l4_addr);
@@ -1124,7 +1215,7 @@ static void _verify_headroom_shift(odp_packet_t *pkt,
}
packet_sanity_check(*pkt);
- CU_ASSERT_PTR_NOT_NULL(data);
+ CU_ASSERT(data != NULL);
if (extended) {
CU_ASSERT(rc >= 0);
CU_ASSERT(odp_packet_seg_len(*pkt) == seg_len);
@@ -1222,7 +1313,7 @@ static void _verify_tailroom_shift(odp_packet_t *pkt,
}
packet_sanity_check(*pkt);
- CU_ASSERT_PTR_NOT_NULL(tail);
+ CU_ASSERT(tail != NULL);
if (extended) {
CU_ASSERT(rc >= 0);
@@ -1323,7 +1414,7 @@ static void packet_test_segments(void)
seg_data = odp_packet_seg_data(pkt, seg);
CU_ASSERT(seg_data_len > 0);
- CU_ASSERT_PTR_NOT_NULL(seg_data);
+ CU_ASSERT(seg_data != NULL);
CU_ASSERT(odp_packet_seg_to_u64(seg) !=
odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
CU_ASSERT(odp_memcmp(seg_data, seg_data, seg_data_len) == 0);
@@ -1635,8 +1726,8 @@ static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
pkt1map = odp_packet_offset(pkt1, off1, &seglen1, NULL);
pkt2map = odp_packet_offset(pkt2, off2, &seglen2, NULL);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ CU_ASSERT_FATAL(pkt1map != NULL);
+ CU_ASSERT_FATAL(pkt2map != NULL);
cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
if (len < cmplen)
cmplen = len;
@@ -1855,7 +1946,7 @@ static void packet_test_copydata(void)
CU_ASSERT_FATAL(pkt_len > 0);
data_buf = malloc(pkt_len);
- CU_ASSERT_PTR_NOT_NULL_FATAL(data_buf);
+ CU_ASSERT_FATAL(data_buf != NULL);
for (i = 0; i < pkt_len; i++)
data_buf[i] = (uint8_t)i;
@@ -2614,7 +2705,7 @@ static void packet_test_offset(void)
CU_ASSERT(seg_len > 1);
CU_ASSERT(seg_len == odp_packet_seg_len(pkt));
CU_ASSERT(seg_len == odp_packet_seg_data_len(pkt, seg));
- CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr != NULL);
CU_ASSERT(ptr == odp_packet_data(pkt));
CU_ASSERT(ptr == odp_packet_seg_data(pkt, seg));
@@ -2624,7 +2715,7 @@ static void packet_test_offset(void)
offset = 1;
ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr != NULL);
CU_ASSERT(ptr == start_ptr + offset);
CU_ASSERT(seg_len == full_seg_len - offset);
@@ -2632,19 +2723,19 @@ static void packet_test_offset(void)
offset = full_seg_len - 1;
ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr != NULL);
CU_ASSERT(ptr == start_ptr + offset);
CU_ASSERT(seg_len == full_seg_len - offset);
/* Query the last byte in a packet */
offset = odp_packet_len(pkt) - 1;
ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr != NULL);
CU_ASSERT(seg_len == 1);
/* Pass NULL to [out] arguments */
ptr = odp_packet_offset(pkt, 0, NULL, NULL);
- CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr != NULL);
}
static void packet_test_ref(void)
@@ -3285,6 +3376,10 @@ static void packet_vector_test_user_area(void)
CU_ASSERT(odp_event_user_area(ev) == addr);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
CU_ASSERT(flag == 0);
+ odp_event_user_flag_set(ev, 1);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
+ CU_ASSERT(flag > 0);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv[i]) > 0);
prev = addr;
memset(addr, 0, size);
@@ -3478,6 +3573,10 @@ static void packet_test_user_area(void)
CU_ASSERT(odp_event_user_area(ev) == odp_packet_user_area(pkt));
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == odp_packet_user_area(pkt));
CU_ASSERT(flag == 0);
+ odp_event_user_flag_set(ev, 1);
+ CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == odp_packet_user_area(pkt));
+ CU_ASSERT(flag > 0);
+ CU_ASSERT(odp_packet_user_flag(pkt) > 0);
odp_packet_free(pkt);
CU_ASSERT(odp_pool_destroy(pool) == 0);
@@ -3629,12 +3728,9 @@ static void parse_eth_ipv4_udp(void)
CU_ASSERT(odp_packet_has_udp(pkt[i]));
CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
- ODP_PROTO_L2_TYPE_ETH);
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV4);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_UDP);
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_UDP);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -3673,12 +3769,9 @@ static void parse_eth_snap_ipv4_udp(void)
CU_ASSERT(odp_packet_has_udp(pkt[i]));
CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
- ODP_PROTO_L2_TYPE_ETH);
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV4);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_UDP);
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_UDP);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -3712,10 +3805,8 @@ static void parse_ipv4_udp(void)
CU_ASSERT(odp_packet_has_udp(pkt[i]));
CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV4);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_UDP);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_UDP);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -3746,12 +3837,9 @@ static void parse_eth_ipv4_tcp(void)
CU_ASSERT(odp_packet_has_tcp(pkt[i]));
CU_ASSERT(!odp_packet_has_ipv6(pkt[i]));
CU_ASSERT(!odp_packet_has_udp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
- ODP_PROTO_L2_TYPE_ETH);
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV4);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_TCP);
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV4);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_TCP);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -3874,12 +3962,9 @@ static void parse_eth_vlan_ipv6_udp(void)
CU_ASSERT(odp_packet_has_udp(pkt[i]));
CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
- ODP_PROTO_L2_TYPE_ETH);
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV6);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_UDP);
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV6);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_UDP);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -4130,12 +4215,9 @@ static void parse_eth_ipv6_ipsec_ah(void)
CU_ASSERT(!odp_packet_has_ipv4(pkt[i]));
CU_ASSERT(!odp_packet_has_tcp(pkt[i]));
CU_ASSERT(!odp_packet_has_udp(pkt[i]));
- CU_ASSERT_EQUAL(odp_packet_l2_type(pkt[i]),
- ODP_PROTO_L2_TYPE_ETH);
- CU_ASSERT_EQUAL(odp_packet_l3_type(pkt[i]),
- ODP_PROTO_L3_TYPE_IPV6);
- CU_ASSERT_EQUAL(odp_packet_l4_type(pkt[i]),
- ODP_PROTO_L4_TYPE_AH);
+ CU_ASSERT(odp_packet_l2_type(pkt[i]) == ODP_PROTO_L2_TYPE_ETH);
+ CU_ASSERT(odp_packet_l3_type(pkt[i]) == ODP_PROTO_L3_TYPE_IPV6);
+ CU_ASSERT(odp_packet_l4_type(pkt[i]) == ODP_PROTO_L4_TYPE_AH);
}
odp_packet_free_multi(pkt, num_pkt);
@@ -4493,6 +4575,7 @@ odp_testinfo_t packet_suite[] = {
ODP_TEST_INFO(packet_test_segments),
ODP_TEST_INFO(packet_test_length),
ODP_TEST_INFO(packet_test_reset),
+ ODP_TEST_INFO(packet_test_reset_meta),
ODP_TEST_INFO(packet_test_prefetch),
ODP_TEST_INFO(packet_test_headroom),
ODP_TEST_INFO(packet_test_tailroom),
diff --git a/test/validation/api/pktio/lso.c b/test/validation/api/pktio/lso.c
index 832c08859..40d0917b4 100644
--- a/test/validation/api/pktio/lso.c
+++ b/test/validation/api/pktio/lso.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2020-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2022 Nokia
*/
#include <odp_api.h>
@@ -657,7 +655,7 @@ static void test_lso_request_clear(odp_lso_profile_t lso_profile, const uint8_t
static void lso_send_custom_eth(const uint8_t *test_packet, uint32_t pkt_len, uint32_t max_payload,
int use_opt)
{
- int i, ret, num;
+ int i, ret, num, num_rcv;
odp_lso_profile_param_t param;
odp_lso_profile_t profile;
uint32_t offset, len, payload_len, payload_sum;
@@ -698,6 +696,7 @@ static void lso_send_custom_eth(const uint8_t *test_packet, uint32_t pkt_len, ui
offset = hdr_len;
payload_sum = 0;
segnum = 0xffff;
+ num_rcv = 0;
for (i = 0; i < num; i++) {
odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt_out[i], NULL);
@@ -712,7 +711,7 @@ static void lso_send_custom_eth(const uint8_t *test_packet, uint32_t pkt_len, ui
if (ret == 0) {
segnum = odp_be_to_cpu_16(segnum);
- CU_ASSERT(segnum == i);
+ CU_ASSERT(segnum == num_rcv);
} else {
CU_FAIL("Seg num field read failed\n");
}
@@ -729,6 +728,7 @@ static void lso_send_custom_eth(const uint8_t *test_packet, uint32_t pkt_len, ui
offset += payload_len;
payload_sum += payload_len;
+ num_rcv++;
}
ODPH_DBG(" Received payload length: %u bytes\n", payload_sum);
diff --git a/test/validation/api/pktio/lso.h b/test/validation/api/pktio/lso.h
index ce3dc7b64..d5688369f 100644
--- a/test/validation/api/pktio/lso.h
+++ b/test/validation/api/pktio/lso.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2020, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020 Nokia
*/
#ifndef _ODP_TEST_PKTIO_LSO_H_
diff --git a/test/validation/api/pktio/parser.c b/test/validation/api/pktio/parser.c
index 7d243877c..a21a9c0f8 100644
--- a/test/validation/api/pktio/parser.c
+++ b/test/validation/api/pktio/parser.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
*/
#include <odp_api.h>
diff --git a/test/validation/api/pktio/parser.h b/test/validation/api/pktio/parser.h
index 4424737fd..8341d9a2c 100644
--- a/test/validation/api/pktio/parser.h
+++ b/test/validation/api/pktio/parser.h
@@ -1,7 +1,5 @@
-/* Copyright (c) 2017-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017-2018 Linaro Limited
*/
#ifndef _ODP_TEST_PARSER_H_
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index deef4895a..12e0a2cd9 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2020-2024, Nokia
- * Copyright (c) 2020, Marvell
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2020-2024 Nokia
+ * Copyright (c) 2020 Marvell
*/
#include <odp_api.h>
@@ -1598,48 +1596,48 @@ static void test_defaults(uint8_t fill)
memset(&pktio_p, fill, sizeof(pktio_p));
odp_pktio_param_init(&pktio_p);
- CU_ASSERT_EQUAL(pktio_p.in_mode, ODP_PKTIN_MODE_DIRECT);
- CU_ASSERT_EQUAL(pktio_p.out_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT(pktio_p.in_mode == ODP_PKTIN_MODE_DIRECT);
+ CU_ASSERT(pktio_p.out_mode == ODP_PKTOUT_MODE_DIRECT);
memset(&qp_in, fill, sizeof(qp_in));
odp_pktin_queue_param_init(&qp_in);
- CU_ASSERT_EQUAL(qp_in.op_mode, ODP_PKTIO_OP_MT);
- CU_ASSERT_EQUAL(qp_in.classifier_enable, 0);
- CU_ASSERT_EQUAL(qp_in.hash_enable, 0);
- CU_ASSERT_EQUAL(qp_in.hash_proto.all_bits, 0);
- CU_ASSERT_EQUAL(qp_in.num_queues, 1);
- CU_ASSERT_EQUAL(qp_in.queue_size[0], 0);
- CU_ASSERT_EQUAL(qp_in.queue_param.enq_mode, ODP_QUEUE_OP_MT);
- CU_ASSERT_EQUAL(qp_in.queue_param.sched.prio, odp_schedule_default_prio());
- CU_ASSERT_EQUAL(qp_in.queue_param.sched.sync, ODP_SCHED_SYNC_PARALLEL);
- CU_ASSERT_EQUAL(qp_in.queue_param.sched.group, ODP_SCHED_GROUP_ALL);
- CU_ASSERT_EQUAL(qp_in.queue_param.sched.lock_count, 0);
- CU_ASSERT_EQUAL(qp_in.queue_param.order, ODP_QUEUE_ORDER_KEEP);
- CU_ASSERT_EQUAL(qp_in.queue_param.nonblocking, ODP_BLOCKING);
- CU_ASSERT_EQUAL(qp_in.queue_param.context, NULL);
- CU_ASSERT_EQUAL(qp_in.queue_param.context_len, 0);
- CU_ASSERT_EQUAL(qp_in.queue_param_ovr, NULL);
- CU_ASSERT_EQUAL(qp_in.vector.enable, false);
+ CU_ASSERT(qp_in.op_mode == ODP_PKTIO_OP_MT);
+ CU_ASSERT(qp_in.classifier_enable == 0);
+ CU_ASSERT(qp_in.hash_enable == 0);
+ CU_ASSERT(qp_in.hash_proto.all_bits == 0);
+ CU_ASSERT(qp_in.num_queues == 1);
+ CU_ASSERT(qp_in.queue_size[0] == 0);
+ CU_ASSERT(qp_in.queue_param.enq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(qp_in.queue_param.sched.prio == odp_schedule_default_prio());
+ CU_ASSERT(qp_in.queue_param.sched.sync == ODP_SCHED_SYNC_PARALLEL);
+ CU_ASSERT(qp_in.queue_param.sched.group == ODP_SCHED_GROUP_ALL);
+ CU_ASSERT(qp_in.queue_param.sched.lock_count == 0);
+ CU_ASSERT(qp_in.queue_param.order == ODP_QUEUE_ORDER_KEEP);
+ CU_ASSERT(qp_in.queue_param.nonblocking == ODP_BLOCKING);
+ CU_ASSERT(qp_in.queue_param.context == NULL);
+ CU_ASSERT(qp_in.queue_param.context_len == 0);
+ CU_ASSERT(qp_in.queue_param_ovr == NULL);
+ CU_ASSERT(qp_in.vector.enable == false);
memset(&qp_out, fill, sizeof(qp_out));
odp_pktout_queue_param_init(&qp_out);
- CU_ASSERT_EQUAL(qp_out.op_mode, ODP_PKTIO_OP_MT);
- CU_ASSERT_EQUAL(qp_out.num_queues, 1);
- CU_ASSERT_EQUAL(qp_out.queue_size[0], 0);
+ CU_ASSERT(qp_out.op_mode == ODP_PKTIO_OP_MT);
+ CU_ASSERT(qp_out.num_queues == 1);
+ CU_ASSERT(qp_out.queue_size[0] == 0);
memset(&pktio_conf, fill, sizeof(pktio_conf));
odp_pktio_config_init(&pktio_conf);
- CU_ASSERT_EQUAL(pktio_conf.pktin.all_bits, 0);
- CU_ASSERT_EQUAL(pktio_conf.pktout.all_bits, 0);
- CU_ASSERT_EQUAL(pktio_conf.parser.layer, ODP_PROTO_LAYER_ALL);
- CU_ASSERT_EQUAL(pktio_conf.enable_loop, false);
- CU_ASSERT_EQUAL(pktio_conf.inbound_ipsec, false);
- CU_ASSERT_EQUAL(pktio_conf.outbound_ipsec, false);
- CU_ASSERT_EQUAL(pktio_conf.enable_lso, false);
- CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv4, false);
- CU_ASSERT_EQUAL(pktio_conf.reassembly.en_ipv6, false);
- CU_ASSERT_EQUAL(pktio_conf.reassembly.max_wait_time, 0);
- CU_ASSERT_EQUAL(pktio_conf.reassembly.max_num_frags, 2);
+ CU_ASSERT(pktio_conf.pktin.all_bits == 0);
+ CU_ASSERT(pktio_conf.pktout.all_bits == 0);
+ CU_ASSERT(pktio_conf.parser.layer == ODP_PROTO_LAYER_ALL);
+ CU_ASSERT(pktio_conf.enable_loop == false);
+ CU_ASSERT(pktio_conf.inbound_ipsec == false);
+ CU_ASSERT(pktio_conf.outbound_ipsec == false);
+ CU_ASSERT(pktio_conf.enable_lso == false);
+ CU_ASSERT(pktio_conf.reassembly.en_ipv4 == false);
+ CU_ASSERT(pktio_conf.reassembly.en_ipv6 == false);
+ CU_ASSERT(pktio_conf.reassembly.max_wait_time == 0);
+ CU_ASSERT(pktio_conf.reassembly.max_num_frags == 2);
}
static void pktio_test_default_values(void)
@@ -3661,7 +3659,8 @@ static void pktio_test_pktout_compl_event(bool use_plain_queue)
CU_ASSERT(odp_packet_tx_compl_user_ptr(tx_compl) ==
(const void *)&pkt_seq[i]);
- /* No user area or source pool for TX completion events */
+ /* No user area/flag or source pool for TX completion events */
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
CU_ASSERT(flag < 0);
@@ -3703,7 +3702,8 @@ static void pktio_test_pktout_compl_event(bool use_plain_queue)
}
}
- /* No user area or source pool for TX completion events */
+ /* No user area/flag or source pool for TX completion events */
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
CU_ASSERT(flag < 0);
@@ -5095,7 +5095,7 @@ static void pktio_test_pktin_event_queue(odp_pktin_mode_t pktin_mode)
odp_pktin_queue_param_t in_queue_param;
odp_pktout_queue_param_t out_queue_param;
odp_pktout_queue_t pktout_queue;
- odp_queue_t queue, from;
+ odp_queue_t queue, from = ODP_QUEUE_INVALID;
odp_pool_t buf_pool;
odp_pool_param_t pool_param;
odp_packet_t pkt_tbl[TX_BATCH_LEN];
@@ -5189,8 +5189,6 @@ static void pktio_test_pktin_event_queue(odp_pktin_mode_t pktin_mode)
if (ev == ODP_EVENT_INVALID)
break;
-
- CU_ASSERT(from == queue);
} else {
ev = odp_queue_deq(queue);
@@ -5214,9 +5212,12 @@ static void pktio_test_pktin_event_queue(odp_pktin_mode_t pktin_mode)
if (odp_event_type(ev) == ODP_EVENT_PACKET) {
pkt = odp_packet_from_event(ev);
- if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID) {
num_pkt++;
+ if (pktin_mode == ODP_PKTIN_MODE_SCHED)
+ CU_ASSERT(from == queue);
+ }
} else if (odp_event_type(ev) == ODP_EVENT_BUFFER) {
num_buf++;
} else {
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 86a47230a..95d9ef14e 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2020, Marvell
- * Copyright (c) 2020-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2020 Marvell
+ * Copyright (c) 2020-2023 Nokia
*/
#include <odp_api.h>
@@ -402,6 +400,35 @@ static void pool_test_lookup_info_print(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void pool_test_long_name(void)
+{
+ odp_pool_t pool;
+ odp_pool_info_t info;
+ odp_pool_param_t param;
+ char name[ODP_POOL_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ memset(&info, 0, sizeof(info));
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = BUF_SIZE;
+ param.buf.num = BUF_NUM;
+ param.buf.uarea_size = 64;
+
+ pool = odp_pool_create(name, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(name);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+ CU_ASSERT(strncmp(name, info.name, sizeof(name)) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
static void pool_test_same_name(const odp_pool_param_t *param)
{
odp_pool_t pool, pool_a, pool_b;
@@ -1136,7 +1163,7 @@ static void pool_test_create_after_fork(void)
shm = odp_shm_reserve(NULL, sizeof(global_shared_mem_t), 0, 0);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
global_mem = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(global_mem);
+ CU_ASSERT_FATAL(global_mem != NULL);
num = odp_cpumask_default_worker(NULL, 0);
if (num > MAX_WORKERS)
@@ -1832,6 +1859,31 @@ static void test_packet_pool_ext_info(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void test_packet_pool_ext_long_name(void)
+{
+ odp_pool_t pool;
+ odp_pool_ext_param_t param;
+ odp_pool_info_t info;
+ char name[ODP_POOL_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ pool_ext_init_packet_pool_param(&param);
+ pool = odp_pool_ext_create(name, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT_FATAL(pool == odp_pool_lookup(name));
+
+ memset(&info, 0, sizeof(odp_pool_info_t));
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+
+ CU_ASSERT(info.pool_ext);
+ CU_ASSERT(strncmp(name, info.name, strlen(name)) == 0);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
static odp_shm_t populate_pool(odp_pool_t pool, odp_pool_ext_capability_t *capa,
void *buf[], uint32_t num, uint32_t buf_size)
{
@@ -2303,6 +2355,7 @@ odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO_CONDITIONAL(pool_test_vector_uarea_init, pool_check_vector_uarea_init),
ODP_TEST_INFO_CONDITIONAL(pool_test_timeout_uarea_init, pool_check_timeout_uarea_init),
ODP_TEST_INFO(pool_test_lookup_info_print),
+ ODP_TEST_INFO(pool_test_long_name),
ODP_TEST_INFO(pool_test_same_name_buf),
ODP_TEST_INFO(pool_test_same_name_pkt),
ODP_TEST_INFO(pool_test_same_name_tmo),
@@ -2347,6 +2400,7 @@ odp_testinfo_t pool_ext_suite[] = {
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_create, check_pool_ext_support),
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_lookup, check_pool_ext_support),
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_info, check_pool_ext_support),
+ ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_long_name, check_pool_ext_support),
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_populate, check_pool_ext_support),
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_alloc, check_pool_ext_support),
ODP_TEST_INFO_CONDITIONAL(test_packet_pool_ext_uarea_init,
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index 4b5ccde65..992f4e4d3 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2021-2023 Nokia
*/
#include <odp_api.h>
@@ -843,6 +841,37 @@ static void queue_test_same_name_sched(void)
queue_test_same_name(1);
}
+static void queue_test_long_name(int sched)
+{
+ odp_queue_t queue;
+ odp_queue_param_t param;
+ char name[ODP_QUEUE_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_queue_param_init(&param);
+
+ if (sched)
+ param.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue = odp_queue_create(name, &param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT(queue == odp_queue_lookup(name));
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+}
+
+static void queue_test_long_name_plain(void)
+{
+ queue_test_long_name(0);
+}
+
+static void queue_test_long_name_sched(void)
+{
+ queue_test_long_name(1);
+}
+
static void queue_test_info(void)
{
odp_queue_t q_plain, q_order;
@@ -1148,6 +1177,8 @@ odp_testinfo_t queue_suite[] = {
ODP_TEST_INFO(queue_test_param),
ODP_TEST_INFO(queue_test_same_name_plain),
ODP_TEST_INFO(queue_test_same_name_sched),
+ ODP_TEST_INFO(queue_test_long_name_plain),
+ ODP_TEST_INFO(queue_test_long_name_sched),
ODP_TEST_INFO(queue_test_info),
ODP_TEST_INFO(queue_test_mt_plain_block),
ODP_TEST_INFO(queue_test_mt_plain_nonblock_lf),
diff --git a/test/validation/api/random/random.c b/test/validation/api/random/random.c
index 551fe775d..6c32cb0f7 100644
--- a/test/validation/api/random/random.c
+++ b/test/validation/api/random/random.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2021-2022 Nokia
*/
#include <odp_api.h>
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 8dddd8d8f..9ecb88a5d 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2024, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
*/
#include <odp_api.h>
@@ -987,6 +985,26 @@ static void scheduler_test_create_group(void)
CU_ASSERT_FATAL(odp_schedule(NULL, wait_time) == ODP_EVENT_INVALID);
}
+static void scheduler_test_group_long_name(void)
+{
+ odp_thrmask_t mask;
+ odp_schedule_group_t group;
+ int thr_id;
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ thr_id = odp_thread_id();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ group = odp_schedule_group_create(name, &mask);
+ CU_ASSERT_FATAL(group != ODP_SCHED_GROUP_INVALID);
+ CU_ASSERT(group == odp_schedule_group_lookup(name));
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(group) == 0);
+}
+
static void scheduler_test_create_max_groups(void)
{
odp_thrmask_t mask;
@@ -1367,12 +1385,12 @@ static void chaos_run(unsigned int qtype)
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+ CU_ASSERT_FATAL(globals != NULL);
shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
args = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+ CU_ASSERT_FATAL(args != NULL);
args->globals = globals;
@@ -1798,7 +1816,7 @@ static void schedule_common(odp_schedule_sync_t sync, int num_queues,
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+ CU_ASSERT_FATAL(globals != NULL);
memset(&args, 0, sizeof(thread_args_t));
args.globals = globals;
@@ -1829,12 +1847,12 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+ CU_ASSERT_FATAL(globals != NULL);
shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
args = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+ CU_ASSERT_FATAL(args != NULL);
args->globals = globals;
args->sync = sync;
@@ -2583,12 +2601,12 @@ static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync)
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
globals = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+ CU_ASSERT_FATAL(globals != NULL);
shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
args = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+ CU_ASSERT_FATAL(args != NULL);
args->globals = globals;
/* Make sure all events fit to queues */
@@ -3183,7 +3201,7 @@ static int create_queues(test_globals_t *globals)
for (j = 0; j < queues_per_prio; j++) {
/* Per sched sync type */
- char name[32];
+ char name[ODP_QUEUE_NAME_LEN];
odp_queue_t q, pq;
snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
@@ -3674,6 +3692,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_order_ignore),
ODP_TEST_INFO(scheduler_test_group_info_predef),
ODP_TEST_INFO(scheduler_test_create_group),
+ ODP_TEST_INFO(scheduler_test_group_long_name),
ODP_TEST_INFO(scheduler_test_create_max_groups),
ODP_TEST_INFO(scheduler_test_groups),
ODP_TEST_INFO(scheduler_test_pause_resume),
diff --git a/test/validation/api/scheduler/scheduler_no_predef_groups.c b/test/validation/api/scheduler/scheduler_no_predef_groups.c
index ad6f6d3a2..d2ea48eb6 100644
--- a/test/validation/api/scheduler/scheduler_no_predef_groups.c
+++ b/test/validation/api/scheduler/scheduler_no_predef_groups.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2013-2018 Linaro Limited
+ * Copyright (c) 2019-2021 Nokia
*/
#include <odp_api.h>
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 9e91dab35..51a6b316e 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Linaro Limited
+ * Copyright (c) 2019-2021 Nokia
*/
#include <odp_api.h>
@@ -261,6 +259,20 @@ static void shmem_test_reserve(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static void shmem_test_reserve_long_name(void)
+{
+ odp_shm_t shm;
+ char name[ODP_SHM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ shm = odp_shm_reserve(name, MEDIUM_MEM, ALIGN_SIZE, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ CU_ASSERT_FATAL(shm == odp_shm_lookup(name));
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
static void shmem_test_info(void)
{
odp_shm_t shm;
@@ -610,7 +622,7 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
CU_ASSERT(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_small = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ CU_ASSERT(pattern_small != NULL);
for (i = 0; i < SMALL_MEM; i++)
pattern_small->data[i] = i;
break;
@@ -620,7 +632,7 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
CU_ASSERT(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_medium = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ CU_ASSERT(pattern_medium != NULL);
for (i = 0; i < MEDIUM_MEM; i++)
pattern_medium->data[i] = (i << 2);
break;
@@ -630,7 +642,7 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
CU_ASSERT(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_big = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ CU_ASSERT(pattern_big != NULL);
for (i = 0; i < BIG_MEM; i++)
pattern_big->data[i] = (i >> 2);
break;
@@ -663,7 +675,7 @@ static void shmem_test_reserve_after_fork(void)
shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t), 0, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
glob_data = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(glob_data);
+ CU_ASSERT(glob_data != NULL);
num = odp_cpumask_default_worker(NULL, 0);
if (num > MAX_WORKERS)
@@ -690,21 +702,21 @@ static void shmem_test_reserve_after_fork(void)
case 0:
pattern_small =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ CU_ASSERT(pattern_small != NULL);
for (i = 0; i < SMALL_MEM; i++)
CU_ASSERT(pattern_small->data[i] == i);
break;
case 1:
pattern_medium =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ CU_ASSERT(pattern_medium != NULL);
for (i = 0; i < MEDIUM_MEM; i++)
CU_ASSERT(pattern_medium->data[i] == (i << 2));
break;
case 2:
pattern_big =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ CU_ASSERT(pattern_big != NULL);
for (i = 0; i < BIG_MEM; i++)
CU_ASSERT(pattern_big->data[i] == (i >> 2));
break;
@@ -774,7 +786,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_small = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ CU_ASSERT(pattern_small != NULL);
glob_data->address[thr_index] = (void *)pattern_small;
for (i = 0; i < SMALL_MEM; i++)
pattern_small->data[i] = i;
@@ -786,7 +798,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_medium = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ CU_ASSERT(pattern_medium != NULL);
glob_data->address[thr_index] = (void *)pattern_medium;
for (i = 0; i < MEDIUM_MEM; i++)
pattern_medium->data[i] = (i << 2);
@@ -798,7 +810,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_big = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ CU_ASSERT(pattern_big != NULL);
glob_data->address[thr_index] = (void *)pattern_big;
for (i = 0; i < BIG_MEM; i++)
pattern_big->data[i] = (i >> 2);
@@ -855,7 +867,7 @@ static void shmem_test_singleva_after_fork(void)
0, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
glob_data = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(glob_data);
+ CU_ASSERT(glob_data != NULL);
num = odp_cpumask_default_worker(NULL, 3);
if (num > MAX_WORKERS)
@@ -885,21 +897,21 @@ static void shmem_test_singleva_after_fork(void)
case 0:
pattern_small =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_small);
+ CU_ASSERT_FATAL(pattern_small != NULL);
for (i = 0; i < SMALL_MEM; i++)
CU_ASSERT(pattern_small->data[i] == i);
break;
case 1:
pattern_medium =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_medium);
+ CU_ASSERT_FATAL(pattern_medium != NULL);
for (i = 0; i < MEDIUM_MEM; i++)
CU_ASSERT(pattern_medium->data[i] == (i << 2));
break;
case 2:
pattern_big =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_big);
+ CU_ASSERT_FATAL(pattern_big != NULL);
for (i = 0; i < BIG_MEM; i++)
CU_ASSERT(pattern_big->data[i] == (i >> 2));
break;
@@ -950,7 +962,7 @@ static int run_test_stress(void *arg ODP_UNUSED)
shm = odp_shm_lookup(MEM_NAME);
glob_data = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(glob_data);
+ CU_ASSERT(glob_data != NULL);
/* wait for general GO! */
odp_barrier_wait(&glob_data->test_barrier1);
@@ -1000,7 +1012,7 @@ static int run_test_stress(void *arg ODP_UNUSED)
}
address = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(address);
+ CU_ASSERT(address != NULL);
glob_data->stress[index].address = address;
glob_data->stress[index].flags = flags;
glob_data->stress[index].size = size;
@@ -1035,7 +1047,7 @@ static int run_test_stress(void *arg ODP_UNUSED)
!= 0);
address = odp_shm_addr(shm);
- CU_ASSERT_PTR_NOT_NULL(address);
+ CU_ASSERT(address != NULL);
align = glob_data->stress[index].align;
if (align) {
@@ -1091,7 +1103,7 @@ static void shmem_test_stress(void)
0, 0);
CU_ASSERT(ODP_SHM_INVALID != globshm);
glob_data = odp_shm_addr(globshm);
- CU_ASSERT_PTR_NOT_NULL(glob_data);
+ CU_ASSERT(glob_data != NULL);
num = odp_cpumask_default_worker(NULL, 0);
if (num > MAX_WORKERS)
@@ -1139,6 +1151,7 @@ static int shm_suite_init(void)
odp_testinfo_t shmem_suite[] = {
ODP_TEST_INFO(shmem_test_capability),
ODP_TEST_INFO(shmem_test_reserve),
+ ODP_TEST_INFO(shmem_test_reserve_long_name),
ODP_TEST_INFO(shmem_test_info),
ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_hp, shmem_check_flag_hp),
ODP_TEST_INFO_CONDITIONAL(shmem_test_flag_no_hp, shmem_check_flag_no_hp),
diff --git a/test/validation/api/stash/stash.c b/test/validation/api/stash/stash.c
index 162697ba9..80b09fd93 100644
--- a/test/validation/api/stash/stash.c
+++ b/test/validation/api/stash/stash.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2020-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2023 Nokia
*/
#include <odp_api.h>
@@ -237,6 +235,30 @@ static void stash_create_u32(void)
CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
}
+static void stash_create_u32_long_name(void)
+{
+ odp_stash_t stash;
+ odp_stash_param_t param;
+ uint32_t num = global.num_default.u32;
+ char name[ODP_STASH_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_stash_param_init(&param);
+ param.num_obj = num;
+ param.obj_size = sizeof(uint32_t);
+
+ stash = odp_stash_create(name, &param);
+
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ printf("\n Stash handle: 0x%" PRIx64 "\n", odp_stash_to_u64(stash));
+
+ CU_ASSERT(stash == odp_stash_lookup(name));
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
static void stash_create_u64_all(void)
{
odp_stash_param_t param;
@@ -1309,6 +1331,7 @@ odp_testinfo_t stash_suite[] = {
ODP_TEST_INFO(stash_param_defaults),
ODP_TEST_INFO_CONDITIONAL(stash_create_u64, check_support_64),
ODP_TEST_INFO(stash_create_u32),
+ ODP_TEST_INFO(stash_create_u32_long_name),
ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_1, check_support_64),
ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_n, check_support_64),
ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_1, check_support_64),
diff --git a/test/validation/api/std/std.c b/test/validation/api/std/std.c
index 161ee87cf..85a8ec0a5 100644
--- a/test/validation/api/std/std.c
+++ b/test/validation/api/std/std.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#include <odp_api.h>
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
index 3f7e0497d..63a0a7e2a 100644
--- a/test/validation/api/system/system.c
+++ b/test/validation/api/system/system.c
@@ -1,7 +1,5 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
*/
#include <ctype.h>
@@ -27,8 +25,8 @@ static void test_version_api_str(void)
char version_string[128];
char *s = version_string;
- strncpy(version_string, odp_version_api_str(),
- sizeof(version_string) - 1);
+ odph_strcpy(version_string, odp_version_api_str(),
+ sizeof(version_string));
while (*s) {
if (isdigit((int)*s) || (strncmp(s, ".", 1) == 0)) {
@@ -77,186 +75,6 @@ static void test_version_macro(void)
ODP_VERSION_API);
}
-static void system_test_odp_cpu_count(void)
-{
- int cpus;
-
- cpus = odp_cpu_count();
- CU_ASSERT(0 < cpus);
-}
-
-static void system_test_cpu_cycles(void)
-{
- uint64_t c2, c1, diff, max;
-
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(WAIT_TIME);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c2 != c1);
-
- max = odp_cpu_cycles_max();
-
- /* With 10 usec delay, diff should be small compared to the maximum.
- * Otherwise, counter is going backwards. */
- if (c2 > c1) {
- diff = c2 - c1;
- CU_ASSERT(diff < (max - diff));
- }
-
- /* Same applies also when there was a wrap. */
- if (c2 < c1) {
- diff = max - c1 + c2;
- CU_ASSERT(diff < (max - diff));
- }
-}
-
-static void system_test_cpu_cycles_max(void)
-{
- uint64_t c2, c1;
- uint64_t max1, max2;
-
- max1 = odp_cpu_cycles_max();
- odp_time_wait_ns(WAIT_TIME);
- max2 = odp_cpu_cycles_max();
-
- CU_ASSERT(max1 >= UINT32_MAX / 2);
- CU_ASSERT(max1 == max2);
-
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(WAIT_TIME);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c1 <= max1 && c2 <= max1);
-}
-
-static void system_test_cpu_cycles_resolution(void)
-{
- int i;
- uint64_t res;
- uint64_t c2, c1, max;
- uint64_t test_cycles = odp_cpu_hz() / 100; /* CPU cycles in 10 msec */
-
- max = odp_cpu_cycles_max();
-
- res = odp_cpu_cycles_resolution();
- CU_ASSERT(res != 0);
- CU_ASSERT(res < max / 1024);
-
- for (i = 0; i < RES_TRY_NUM; i++) {
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(10 * ODP_TIME_MSEC_IN_NS + i);
- c2 = odp_cpu_cycles();
-
- /* Diff may be zero with low resolution */
- if (test_cycles && test_cycles > res) {
- uint64_t diff = odp_cpu_cycles_diff(c2, c1);
-
- CU_ASSERT(diff >= res);
- }
-
- }
-}
-
-static void system_test_cpu_cycles_diff(void)
-{
- uint64_t c2, c1, max;
- uint64_t tmp, diff, res;
-
- res = odp_cpu_cycles_resolution();
- max = odp_cpu_cycles_max();
-
- c1 = res;
- c2 = 2 * res;
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff == res);
-
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(WAIT_TIME);
- c2 = odp_cpu_cycles();
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff > 0);
- CU_ASSERT(diff < (max - diff));
-
- /* check resolution for wrap */
- c1 = max - 2 * res;
- do
- c2 = odp_cpu_cycles();
- while (c1 < c2);
-
- diff = odp_cpu_cycles_diff(c1, c1);
- CU_ASSERT(diff == 0);
-
- /* wrap */
- tmp = c2 + (max - c1) + res;
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff == tmp);
-
- /* no wrap, revert args */
- tmp = c1 - c2;
- diff = odp_cpu_cycles_diff(c1, c2);
- CU_ASSERT(diff == tmp);
-}
-
-static void system_test_cpu_cycles_long_period(void)
-{
- int i;
- int periods = PERIODS_100_MSEC;
- uint64_t max_period_duration = 100 * ODP_TIME_MSEC_IN_NS + periods - 1;
- uint64_t c2, c1, c3, max;
- uint64_t tmp, diff, res;
-
- res = odp_cpu_cycles_resolution();
- max = odp_cpu_cycles_max();
-
- c3 = odp_cpu_cycles();
-
- CU_ASSERT(c3 <= max);
- /*
- * If the cycle counter is not close to wrapping around during
- * the test, then speed up the test by not trying to see the wrap
- * around too hard. Assume cycle counter frequency of less than 10 GHz.
- */
- CU_ASSERT(odp_cpu_hz_max() < 10ULL * ODP_TIME_SEC_IN_NS);
- if (max - c3 > 10 * periods * max_period_duration)
- periods = 10;
-
- printf("\n Testing CPU cycles for %i seconds... ", periods / 10);
-
- for (i = 0; i < periods; i++) {
- c1 = odp_cpu_cycles();
- odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
- c2 = odp_cpu_cycles();
-
- CU_ASSERT(c2 != c1);
- CU_ASSERT(c1 <= max && c2 <= max);
-
- if (c2 > c1)
- tmp = c2 - c1;
- else
- tmp = c2 + (max - c1) + res;
-
- diff = odp_cpu_cycles_diff(c2, c1);
- CU_ASSERT(diff == tmp);
-
- /* wrap is detected and verified */
- if (c2 < c1)
- break;
- }
-
- /* wrap was detected, no need to continue */
- if (i < periods) {
- printf("wrap was detected.\n");
- return;
- }
-
- /* wrap has to be detected if possible */
- CU_ASSERT(max > UINT32_MAX);
- CU_ASSERT((max - c3) > UINT32_MAX);
-
- printf("wrap was not detected.\n");
-}
-
static void system_test_odp_sys_cache_line_size(void)
{
uint64_t cache_size;
@@ -277,32 +95,6 @@ static void system_test_odp_sys_cache_line_size(void)
2 * ODP_CACHE_LINE_SIZE);
}
-static void system_test_odp_cpu_model_str(void)
-{
- char model[128];
-
- snprintf(model, 128, "%s", odp_cpu_model_str());
- CU_ASSERT(strlen(model) > 0);
- CU_ASSERT(strlen(model) < 127);
-}
-
-static void system_test_odp_cpu_model_str_id(void)
-{
- char model[128];
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
- CU_ASSERT(strlen(model) > 0);
- CU_ASSERT(strlen(model) < 127);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
static void system_test_odp_sys_page_size(void)
{
uint64_t page;
@@ -343,135 +135,6 @@ static void system_test_odp_sys_huge_page_size_all(void)
}
}
-static int system_check_cycle_counter(void)
-{
- if (odp_cpu_cycles_max() == 0) {
- printf("Cycle counter is not supported, skipping test\n");
- return ODP_TEST_INACTIVE;
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-static int system_check_odp_cpu_hz(void)
-{
- if (odp_cpu_hz() == 0) {
- printf("odp_cpu_hz() is not supported, skipping test\n");
- return ODP_TEST_INACTIVE;
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-static void system_test_odp_cpu_hz(void)
-{
- uint64_t hz = odp_cpu_hz();
-
- /* Test value sanity: less than 10GHz */
- CU_ASSERT(hz < 10 * GIGA_HZ);
-
- /* larger than 1kHz */
- CU_ASSERT(hz > 1 * KILO_HZ);
-}
-
-static int system_check_odp_cpu_hz_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_id(cpu);
- if (hz == 0) {
- printf("odp_cpu_hz_id() is not supported by CPU %d, skipping test\n", cpu);
- return ODP_TEST_INACTIVE;
- }
- cpu = odp_cpumask_next(&mask, cpu);
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-static void system_test_odp_cpu_hz_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_id(cpu);
- /* Test value sanity: less than 10GHz */
- CU_ASSERT(hz < 10 * GIGA_HZ);
- /* larger than 1kHz */
- CU_ASSERT(hz > 1 * KILO_HZ);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
-static int system_check_odp_cpu_hz_max(void)
-{
- if (odp_cpu_hz_max() == 0) {
- printf("odp_cpu_hz_max() is not supported, skipping test\n");
- return ODP_TEST_INACTIVE;
- }
- return ODP_TEST_ACTIVE;
-}
-
-static void system_test_odp_cpu_hz_max(void)
-{
- uint64_t hz = odp_cpu_hz_max();
-
- /* Sanity check value */
- CU_ASSERT(hz > 1 * KILO_HZ);
- CU_ASSERT(hz < 20 * GIGA_HZ);
-}
-
-static int system_check_odp_cpu_hz_max_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_max_id(cpu);
- if (hz == 0) {
- printf("odp_cpu_hz_max_id() is not supported by CPU %d, skipping test\n",
- cpu);
- return ODP_TEST_INACTIVE;
- }
- cpu = odp_cpumask_next(&mask, cpu);
- }
-
- return ODP_TEST_ACTIVE;
-}
-
-static void system_test_odp_cpu_hz_max_id(void)
-{
- uint64_t hz;
- odp_cpumask_t mask;
- int i, num, cpu;
-
- num = odp_cpumask_all_available(&mask);
- cpu = odp_cpumask_first(&mask);
-
- for (i = 0; i < num; i++) {
- hz = odp_cpu_hz_max_id(cpu);
- /* Sanity check value */
- CU_ASSERT(hz > 1 * KILO_HZ);
- CU_ASSERT(hz < 20 * GIGA_HZ);
- cpu = odp_cpumask_next(&mask, cpu);
- }
-}
-
static void system_test_info_print(void)
{
printf("\n\nCalling system info print...\n");
@@ -645,31 +308,10 @@ odp_testinfo_t system_suite[] = {
ODP_TEST_INFO(test_version_api_str),
ODP_TEST_INFO(test_version_str),
ODP_TEST_INFO(test_version_macro),
- ODP_TEST_INFO(system_test_odp_cpu_count),
ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
- ODP_TEST_INFO(system_test_odp_cpu_model_str),
- ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
ODP_TEST_INFO(system_test_odp_sys_page_size),
ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
ODP_TEST_INFO(system_test_odp_sys_huge_page_size_all),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
- system_check_odp_cpu_hz),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
- system_check_odp_cpu_hz_id),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_max,
- system_check_odp_cpu_hz_max),
- ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_max_id,
- system_check_odp_cpu_hz_max_id),
- ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles,
- system_check_cycle_counter),
- ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_max,
- system_check_cycle_counter),
- ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_resolution,
- system_check_cycle_counter),
- ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_diff,
- system_check_cycle_counter),
- ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_long_period,
- system_check_cycle_counter),
ODP_TEST_INFO(system_test_info),
ODP_TEST_INFO(system_test_meminfo),
ODP_TEST_INFO(system_test_info_print),
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index ad9ffa745..778e51b07 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2022 Nokia
*/
#include <odp_api.h>
@@ -83,11 +81,6 @@ static int thread_global_term(odp_instance_t inst)
return 0;
}
-static void thread_test_odp_cpu_id(void)
-{
- CU_ASSERT(odp_cpu_id() >= 0);
-}
-
static void thread_test_odp_thread_id(void)
{
int id = odp_thread_id();
@@ -222,7 +215,6 @@ static void thread_test_odp_thrmask_control(void)
}
odp_testinfo_t thread_suite[] = {
- ODP_TEST_INFO(thread_test_odp_cpu_id),
ODP_TEST_INFO(thread_test_odp_thread_id),
ODP_TEST_INFO(thread_test_odp_thread_count),
ODP_TEST_INFO(thread_test_odp_thread_count_max),
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
index cfef7f619..67eae7190 100644
--- a/test/validation/api/time/time.c
+++ b/test/validation/api/time/time.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2019-2024, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2019-2024 Nokia
*/
#ifndef _GNU_SOURCE
@@ -19,14 +17,15 @@
#define MIN_TIME_RATE 32000
#define MAX_TIME_RATE 15000000000
#define DELAY_TOLERANCE 40000000 /* deviation for delay */
-#define WAIT_SECONDS 3
+#define WAIT_SECONDS 3
#define MAX_WORKERS 32
+#define TEST_ROUNDS 1024
#define TIME_SAMPLES 2
#define TIME_TOLERANCE_NS 1000000
#define TIME_TOLERANCE_CI_NS 40000000
#define TIME_TOLERANCE_1CPU_NS 40000000
#define GLOBAL_SHM_NAME "GlobalTimeTest"
-#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
+#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
static uint64_t local_res;
static uint64_t global_res;
@@ -40,6 +39,9 @@ typedef struct {
uint32_t num_threads;
odp_barrier_t test_barrier;
odp_time_t time[MAX_WORKERS + 1][TIME_SAMPLES];
+ odp_queue_t queue[MAX_WORKERS];
+ uint32_t num_queues;
+ odp_atomic_u32_t event_count;
} global_shared_mem_t;
static global_shared_mem_t *global_mem;
@@ -924,7 +926,14 @@ static void time_test_global_sync(const int ctrl)
cpu = odp_cpumask_next(&cpumask, cpu);
}
- CU_ASSERT(odph_thread_join(thread_tbl, num) == num);
+ odph_thread_join_result_t res[num];
+
+ int ret = odph_thread_join_result(thread_tbl, res, num);
+
+ CU_ASSERT(ret == num);
+
+ for (int i = 0; i < num; i++)
+ CU_ASSERT(!res[i].is_sig && res[i].ret == 0);
for (int s = 0; s < TIME_SAMPLES; s++) {
int min_idx = 0, max_idx = 0;
@@ -975,6 +984,179 @@ static void time_test_global_sync_control(void)
time_test_global_sync(1);
}
+static odp_queue_t select_dst_queue(int thread_id, const odp_queue_t queue[], uint32_t num)
+{
+ uint8_t rand_u8;
+ int rand_id = 0;
+
+ if (num == 1)
+ return queue[0];
+
+ do {
+ odp_random_data(&rand_u8, 1, ODP_RANDOM_BASIC);
+ rand_id = rand_u8 % num;
+ } while (rand_id == thread_id);
+
+ return queue[rand_id];
+}
+
+static int run_time_global_thread(void *arg)
+{
+ global_shared_mem_t *gbl = arg;
+ const int thread_id = odp_thread_id();
+ const odp_queue_t src_queue = gbl->queue[thread_id % gbl->num_queues];
+ const odp_queue_t *queues = gbl->queue;
+ const uint32_t num_queues = gbl->num_queues;
+ odp_atomic_u32_t *event_count = &gbl->event_count;
+
+ odp_barrier_wait(&gbl->test_barrier);
+
+ while (odp_atomic_load_u32(event_count) < TEST_ROUNDS) {
+ odp_time_t *ts;
+ odp_time_t cur_time;
+ odp_buffer_t buf;
+ odp_queue_t dst_queue;
+ odp_event_t ev = odp_queue_deq(src_queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ odp_cpu_pause();
+ continue;
+ }
+
+ cur_time = odp_time_global();
+
+ buf = odp_buffer_from_event(ev);
+ ts = odp_buffer_addr(buf);
+
+ CU_ASSERT(odp_time_cmp(cur_time, *ts) >= 0);
+
+ *ts = cur_time;
+
+ dst_queue = select_dst_queue(thread_id, queues, num_queues);
+
+ CU_ASSERT_FATAL(odp_queue_enq(dst_queue, ev) == 0);
+
+ odp_atomic_inc_u32(event_count);
+ }
+ return 0;
+}
+
+static void time_test_global_mt(void)
+{
+ odp_cpumask_t cpumask;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_pool_capability_t pool_capa;
+ odp_queue_param_t queue_param;
+ odp_queue_capability_t queue_capa;
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
+ odp_time_t cur_time;
+ uint32_t i;
+ int num_workers = odp_cpumask_default_worker(&cpumask, global_mem->num_threads);
+ uint32_t num_events = num_workers;
+ uint32_t num_queues = num_workers;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+
+ if (pool_capa.buf.max_num && num_events > pool_capa.buf.max_num)
+ num_events = pool_capa.buf.max_num;
+
+ if (queue_capa.plain.max_size && num_events > queue_capa.plain.max_size)
+ num_events = queue_capa.plain.max_size;
+
+ if (queue_capa.plain.max_num < num_queues)
+ num_queues = queue_capa.plain.max_num;
+ CU_ASSERT_FATAL(num_queues > 0);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = sizeof(odp_time_t);
+ pool_param.buf.num = num_events;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test event pool", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.size = num_events;
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+
+ for (i = 0; i < num_queues; i++) {
+ global_mem->queue[i] = odp_queue_create(NULL, &queue_param);
+ CU_ASSERT_FATAL(global_mem->queue[i] != ODP_QUEUE_INVALID);
+ }
+ global_mem->num_queues = num_queues;
+
+ odp_atomic_init_u32(&global_mem->event_count, 0);
+
+ for (i = 0; i < num_events; i++) {
+ odp_time_t *ts;
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ if (buf == ODP_BUFFER_INVALID)
+ break;
+
+ ts = odp_buffer_addr(buf);
+ *ts = odp_time_global();
+
+ CU_ASSERT_FATAL(odp_queue_enq(global_mem->queue[i % num_queues],
+ odp_buffer_to_event(buf)) == 0);
+ }
+ CU_ASSERT_FATAL(i > 0);
+ CU_ASSERT(i == num_events);
+
+ odp_barrier_init(&global_mem->test_barrier, num_workers);
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_time_global_thread;
+ thr_param.arg = global_mem;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = *instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ CU_ASSERT_FATAL(odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers) ==
+ num_workers);
+
+ odph_thread_join_result_t res[num_workers];
+
+ int ret = odph_thread_join_result(thread_tbl, res, num_workers);
+
+ CU_ASSERT(ret == num_workers);
+
+ for (i = 0; i < (uint32_t)num_workers; i++)
+ CU_ASSERT(!res[i].is_sig && res[i].ret == 0);
+
+ cur_time = odp_time_global_strict();
+
+ for (i = 0; i < num_queues; i++) {
+ odp_queue_t queue = global_mem->queue[i];
+
+ while (1) {
+ odp_buffer_t buf;
+ odp_time_t *ts;
+ odp_event_t ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ buf = odp_buffer_from_event(ev);
+ ts = odp_buffer_addr(buf);
+
+ CU_ASSERT(odp_time_cmp(cur_time, *ts) >= 0);
+ odp_buffer_free(buf);
+ };
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
odp_testinfo_t time_suite_time[] = {
ODP_TEST_INFO(time_test_constants),
ODP_TEST_INFO(time_test_startup_time),
@@ -983,6 +1165,7 @@ odp_testinfo_t time_suite_time[] = {
ODP_TEST_INFO(time_test_local_cmp),
ODP_TEST_INFO(time_test_local_diff),
ODP_TEST_INFO(time_test_local_sum),
+ ODP_TEST_INFO(time_test_global_mt),
ODP_TEST_INFO(time_test_global_res),
ODP_TEST_INFO(time_test_global_conversion),
ODP_TEST_INFO(time_test_global_cmp),
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 3678d0cb2..98637b2e1 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -1,8 +1,6 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2019-2023, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2019-2023 Nokia
*/
/* For rand_r and nanosleep */
@@ -650,6 +648,7 @@ static void timer_test_timeout_user_area(void)
CU_ASSERT(prev != addr);
ev = odp_timeout_to_event(tmo[i]);
+ odp_event_user_flag_set(ev, 1);
CU_ASSERT(odp_event_user_area(ev) == addr);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == addr);
CU_ASSERT(flag < 0);
@@ -748,6 +747,32 @@ static void timer_pool_create_destroy(void)
CU_ASSERT(odp_queue_destroy(queue) == 0);
}
+static void timer_pool_long_name(void)
+{
+ odp_timer_pool_param_t tparam;
+ odp_timer_pool_info_t info;
+ odp_timer_pool_t pool;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+ char name[ODP_TIMER_POOL_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_timer_pool_param_init(&tparam);
+ tparam.res_ns = global_mem->param.res_ns;
+ tparam.min_tmo = global_mem->param.min_tmo;
+ tparam.max_tmo = global_mem->param.max_tmo;
+ tparam.num_timers = 100;
+ tparam.priv = 0;
+ tparam.clk_src = clk_src;
+
+ pool = odp_timer_pool_create(name, &tparam);
+ CU_ASSERT(pool != ODP_TIMER_POOL_INVALID);
+ CU_ASSERT(odp_timer_pool_info(pool, &info) == 0);
+ CU_ASSERT(!strcmp(name, info.name));
+ odp_timer_pool_destroy(pool);
+}
+
static void timer_pool_create_max(void)
{
odp_timer_capability_t capa;
@@ -3160,6 +3185,7 @@ odp_suiteinfo_t timer_general_suites[] = {
odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_capa),
ODP_TEST_INFO(timer_pool_create_destroy),
+ ODP_TEST_INFO(timer_pool_long_name),
ODP_TEST_INFO(timer_pool_create_max),
ODP_TEST_INFO(timer_pool_max_res),
ODP_TEST_INFO(timer_pool_current_tick),
diff --git a/test/validation/api/traffic_mngr/traffic_mngr.c b/test/validation/api/traffic_mngr/traffic_mngr.c
index b7f546dcd..dcf21b820 100644
--- a/test/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/validation/api/traffic_mngr/traffic_mngr.c
@@ -1,9 +1,7 @@
-/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2022, Marvell
- * Copyright (c) 2022, Nokia
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2022 Marvell
+ * Copyright (c) 2022 Nokia
*/
#ifndef _GNU_SOURCE
@@ -67,7 +65,8 @@
#define MAX_PKTS 1000u
#define PKT_BUF_SIZE 1460
-#define MAX_PAYLOAD 1400
+#define MIN_HDR_LEN (ODPH_ETHHDR_LEN + ODPH_UDPHDR_LEN + ODPH_IPV4HDR_LEN)
+#define MAX_PAYLOAD (PKT_BUF_SIZE - MIN_HDR_LEN)
#define USE_IPV4 false
#define USE_IPV6 true
#define USE_UDP false
@@ -103,7 +102,6 @@
#define ETHERNET_OVHD_LEN (ETHERNET_IFG + ETHERNET_PREAMBLE)
#define CRC_LEN 4
#define SHAPER_LEN_ADJ ETHERNET_OVHD_LEN
-#define TM_NAME_LEN 32
#define BILLION 1000000000ULL
#define MS 1000000 /* Millisecond in units of NS */
#define MBPS 1000000
@@ -801,6 +799,12 @@ static odp_packet_t make_pkt(odp_pool_t pkt_pool,
uint8_t *buf, *pkt_class_ptr, next_hdr;
int rc;
+ if (payload_len > MAX_PAYLOAD) {
+ ODPH_ERR("packet payload length of %u exceeds MAX_PAYLOAD of %u\n",
+ payload_len, MAX_PAYLOAD);
+ return ODP_PACKET_INVALID;
+ }
+
l4_hdr_len = pkt_info->use_tcp ? ODPH_TCPHDR_LEN : ODPH_UDPHDR_LEN;
l3_hdr_len = pkt_info->use_ipv6 ? ODPH_IPV6HDR_LEN : ODPH_IPV4HDR_LEN;
vlan_hdr_len = pkt_info->use_vlan ? ODPH_VLANHDR_LEN : 0;
@@ -1570,7 +1574,7 @@ static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
odp_tm_wred_t green_profile, yellow_profile, red_profile;
odp_tm_node_t tm_node, parent_node;
uint32_t node_desc_size, queue_desc_size, priority;
- char node_name[TM_NAME_LEN];
+ char node_name[ODP_TM_NAME_LEN];
int rc;
odp_tm_node_params_init(&node_params);
@@ -1731,7 +1735,7 @@ static tm_node_desc_t *find_node_desc(uint8_t tm_system_idx,
name_ptr++;
while (node_desc != NULL) {
- if (strncmp(node_desc->node_name, node_name, TM_NAME_LEN) == 0)
+ if (strncmp(node_desc->node_name, node_name, ODP_TM_NAME_LEN) == 0)
return node_desc;
if (name_ptr == NULL)
@@ -1866,7 +1870,7 @@ static int create_tm_system(void)
tm_node_desc_t *root_node_desc;
uint32_t level, max_nodes[ODP_TM_MAX_LEVELS];
odp_tm_t odp_tm, found_odp_tm;
- char tm_name[TM_NAME_LEN];
+ char tm_name[ODP_TM_NAME_LEN];
int rc;
odp_tm_requirements_init(&requirements);
@@ -2546,7 +2550,7 @@ static void traffic_mngr_test_shaper_profile(void)
odp_tm_shaper_params_t shaper_params;
odp_tm_shaper_t profile;
uint32_t idx, shaper_idx, i;
- char shaper_name[TM_NAME_LEN];
+ char shaper_name[ODP_TM_NAME_LEN];
odp_tm_shaper_params_init(&shaper_params);
shaper_params.shaper_len_adjust = SHAPER_LEN_ADJ;
@@ -2610,7 +2614,7 @@ static void traffic_mngr_test_sched_profile(void)
odp_tm_sched_params_t sched_params;
odp_tm_sched_t profile;
uint32_t idx, priority, sched_idx, i;
- char sched_name[TM_NAME_LEN];
+ char sched_name[ODP_TM_NAME_LEN];
odp_tm_sched_params_init(&sched_params);
@@ -2683,7 +2687,7 @@ static void traffic_mngr_test_threshold_profile(threshold_type_t threshold)
odp_tm_threshold_params_t threshold_params;
odp_tm_threshold_t profile;
uint32_t idx, threshold_idx, i;
- char threshold_name[TM_NAME_LEN];
+ char threshold_name[ODP_TM_NAME_LEN];
odp_tm_threshold_params_init(&threshold_params);
@@ -2778,7 +2782,7 @@ static void traffic_mngr_test_wred_profile(void)
odp_tm_wred_params_t wred_params;
odp_tm_wred_t profile;
uint32_t idx, color, wred_idx, i, c;
- char wred_name[TM_NAME_LEN];
+ char wred_name[ODP_TM_NAME_LEN];
odp_tm_wred_params_init(&wred_params);
wred_params.enable_wred = 1;
@@ -3068,7 +3072,7 @@ static int set_sched_fanin(const char *node_name,
odp_tm_node_t tm_node, fanin_node;
uint32_t fanin_cnt, fanin, priority;
uint8_t sched_weight;
- char sched_name[TM_NAME_LEN];
+ char sched_name[ODP_TM_NAME_LEN];
int rc;
node_desc = find_node_desc(0, node_name);
@@ -4456,7 +4460,7 @@ static void test_defaults(uint8_t fill)
memset(&req, fill, sizeof(req));
odp_tm_requirements_init(&req);
- CU_ASSERT_EQUAL(req.num_levels, 0);
+ CU_ASSERT(req.num_levels == 0);
CU_ASSERT(!req.tm_queue_shaper_needed);
CU_ASSERT(!req.tm_queue_wred_needed);
CU_ASSERT(!req.tm_queue_dual_slope_needed);
@@ -4466,7 +4470,7 @@ static void test_defaults(uint8_t fill)
CU_ASSERT(!req.drop_prec_marking_needed);
for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
CU_ASSERT(!req.marking_colors_needed[n]);
- CU_ASSERT_EQUAL(req.pkt_prio_mode, ODP_TM_PKT_PRIO_MODE_PRESERVE);
+ CU_ASSERT(req.pkt_prio_mode == ODP_TM_PKT_PRIO_MODE_PRESERVE);
for (n = 0; n < ODP_TM_MAX_LEVELS; n++) {
odp_tm_level_requirements_t *l_req = &req.per_level[n];
@@ -4481,14 +4485,14 @@ static void test_defaults(uint8_t fill)
memset(&shaper, fill, sizeof(shaper));
odp_tm_shaper_params_init(&shaper);
CU_ASSERT(shaper.packet_mode == ODP_TM_SHAPER_RATE_SHAPE);
- CU_ASSERT_EQUAL(shaper.shaper_len_adjust, 0);
+ CU_ASSERT(shaper.shaper_len_adjust == 0);
CU_ASSERT(!shaper.dual_rate);
CU_ASSERT(!shaper.packet_mode);
memset(&sched, 0xff, sizeof(sched));
odp_tm_sched_params_init(&sched);
for (n = 0; n < ODP_TM_MAX_PRIORITIES; n++)
- CU_ASSERT_EQUAL(sched.sched_modes[n], ODP_TM_BYTE_BASED_WEIGHTS);
+ CU_ASSERT(sched.sched_modes[n] == ODP_TM_BYTE_BASED_WEIGHTS);
memset(&threshold, fill, sizeof(threshold));
odp_tm_threshold_params_init(&threshold);
@@ -4502,18 +4506,18 @@ static void test_defaults(uint8_t fill)
memset(&node, fill, sizeof(node));
odp_tm_node_params_init(&node);
- CU_ASSERT_EQUAL(node.shaper_profile, ODP_TM_INVALID);
- CU_ASSERT_EQUAL(node.threshold_profile, ODP_TM_INVALID);
+ CU_ASSERT(node.shaper_profile == ODP_TM_INVALID);
+ CU_ASSERT(node.threshold_profile == ODP_TM_INVALID);
for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
- CU_ASSERT_EQUAL(node.wred_profile[n], ODP_TM_INVALID);
+ CU_ASSERT(node.wred_profile[n] == ODP_TM_INVALID);
memset(&queue, fill, sizeof(queue));
odp_tm_queue_params_init(&queue);
- CU_ASSERT_EQUAL(queue.shaper_profile, ODP_TM_INVALID);
- CU_ASSERT_EQUAL(queue.threshold_profile, ODP_TM_INVALID);
+ CU_ASSERT(queue.shaper_profile == ODP_TM_INVALID);
+ CU_ASSERT(queue.threshold_profile == ODP_TM_INVALID);
for (n = 0; n < ODP_NUM_PACKET_COLORS; n++)
- CU_ASSERT_EQUAL(queue.wred_profile[n], ODP_TM_INVALID);
- CU_ASSERT_EQUAL(queue.priority, 0);
+ CU_ASSERT(queue.wred_profile[n] == ODP_TM_INVALID);
+ CU_ASSERT(queue.priority == 0);
CU_ASSERT(queue.ordered_enqueue);
}
@@ -4961,6 +4965,92 @@ static void traffic_mngr_test_lso_ipv4(void)
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
}
+static void traffic_mngr_test_node_long_name(void)
+{
+ odp_tm_node_params_t node_params;
+ odp_tm_node_t tm_node;
+ char name[ODP_TM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_tm_node_params_init(&node_params);
+
+ tm_node = odp_tm_node_create(odp_tm_systems[0], name, &node_params);
+ CU_ASSERT(tm_node != ODP_TM_INVALID);
+ CU_ASSERT(tm_node == odp_tm_node_lookup(odp_tm_systems[0], name));
+ CU_ASSERT(!odp_tm_node_destroy(tm_node));
+}
+
+static void traffic_mngr_test_shaper_long_name(void)
+{
+ odp_tm_shaper_params_t shaper_params;
+ odp_tm_shaper_t profile;
+ char name[ODP_TM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_tm_shaper_params_init(&shaper_params);
+ profile = odp_tm_shaper_create(name, &shaper_params);
+
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == odp_tm_shaper_lookup(name));
+ CU_ASSERT(!odp_tm_shaper_destroy(profile));
+}
+
+static void traffic_mngr_test_sched_long_name(void)
+{
+ odp_tm_sched_params_t sched_params;
+ odp_tm_sched_t profile;
+ char name[ODP_TM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_tm_sched_params_init(&sched_params);
+ profile = odp_tm_sched_create(name, &sched_params);
+
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == odp_tm_sched_lookup(name));
+ CU_ASSERT(!odp_tm_sched_destroy(profile));
+}
+
+static void traffic_mngr_test_threshold_long_name(void)
+{
+ odp_tm_threshold_params_t threshold_params;
+ odp_tm_threshold_t profile;
+ char name[ODP_TM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_tm_threshold_params_init(&threshold_params);
+ threshold_params.enable_max_bytes = true;
+ profile = odp_tm_threshold_create(name, &threshold_params);
+
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == odp_tm_thresholds_lookup(name));
+ CU_ASSERT(!odp_tm_threshold_destroy(profile));
+}
+
+static void traffic_mngr_test_wred_long_name(void)
+{
+ odp_tm_wred_params_t wred_params;
+ odp_tm_wred_t profile;
+ char name[ODP_TM_NAME_LEN];
+
+ memset(name, 'a', sizeof(name));
+ name[sizeof(name) - 1] = 0;
+
+ odp_tm_wred_params_init(&wred_params);
+ profile = odp_tm_wred_create(name, &wred_params);
+
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == odp_tm_wred_lookup(name));
+ CU_ASSERT(!odp_tm_wred_destroy(profile));
+}
+
static void traffic_mngr_test_destroy(void)
{
CU_ASSERT(destroy_tm_systems() == 0);
@@ -5012,6 +5102,15 @@ odp_testinfo_t traffic_mngr_suite[] = {
traffic_mngr_check_tx_aging),
ODP_TEST_INFO(traffic_mngr_test_fanin_info),
ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_lso_ipv4, traffic_mngr_check_lso_ipv4),
+ ODP_TEST_INFO(traffic_mngr_test_node_long_name),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_shaper_long_name,
+ traffic_mngr_check_shaper),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_sched_long_name,
+ traffic_mngr_check_scheduler),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_threshold_long_name,
+ traffic_mngr_check_thresholds_byte),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_wred_long_name,
+ traffic_mngr_check_wred),
ODP_TEST_INFO(traffic_mngr_test_destroy),
ODP_TEST_INFO_NULL,
};