aboutsummaryrefslogtreecommitdiff
path: root/test/common_plat/validation/api
diff options
context:
space:
mode:
Diffstat (limited to 'test/common_plat/validation/api')
-rw-r--r--test/common_plat/validation/api/.gitignore2
-rw-r--r--test/common_plat/validation/api/Makefile.am28
-rw-r--r--test/common_plat/validation/api/Makefile.inc16
-rw-r--r--test/common_plat/validation/api/README35
-rw-r--r--test/common_plat/validation/api/atomic/.gitignore1
-rw-r--r--test/common_plat/validation/api/atomic/Makefile.am10
-rw-r--r--test/common_plat/validation/api/atomic/atomic.c885
-rw-r--r--test/common_plat/validation/api/atomic/atomic.h38
-rw-r--r--test/common_plat/validation/api/atomic/atomic_main.c12
-rw-r--r--test/common_plat/validation/api/barrier/.gitignore1
-rw-r--r--test/common_plat/validation/api/barrier/Makefile.am10
-rw-r--r--test/common_plat/validation/api/barrier/barrier.c397
-rw-r--r--test/common_plat/validation/api/barrier/barrier.h29
-rw-r--r--test/common_plat/validation/api/barrier/barrier_main.c12
-rw-r--r--test/common_plat/validation/api/buffer/.gitignore1
-rw-r--r--test/common_plat/validation/api/buffer/Makefile.am10
-rw-r--r--test/common_plat/validation/api/buffer/buffer.c274
-rw-r--r--test/common_plat/validation/api/buffer/buffer.h32
-rw-r--r--test/common_plat/validation/api/buffer/buffer_main.c11
-rw-r--r--test/common_plat/validation/api/classification/.gitignore1
-rw-r--r--test/common_plat/validation/api/classification/Makefile.am14
-rw-r--r--test/common_plat/validation/api/classification/classification.c43
-rw-r--r--test/common_plat/validation/api/classification/classification.h95
-rw-r--r--test/common_plat/validation/api/classification/classification_main.c12
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_basic.c332
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_common.c388
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_test_pmr.c1162
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_tests.c699
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_testsuites.h55
-rw-r--r--test/common_plat/validation/api/cpumask/.gitignore1
-rw-r--r--test/common_plat/validation/api/cpumask/Makefile.am11
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask.c116
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask.h28
-rw-r--r--test/common_plat/validation/api/cpumask/cpumask_main.c11
-rw-r--r--test/common_plat/validation/api/crypto/.gitignore1
-rw-r--r--test/common_plat/validation/api/crypto/Makefile.am11
-rw-r--r--test/common_plat/validation/api/crypto/crypto.c121
-rw-r--r--test/common_plat/validation/api/crypto/crypto.h45
-rw-r--r--test/common_plat/validation/api/crypto/crypto_main.c12
-rw-r--r--test/common_plat/validation/api/crypto/odp_crypto_test_inp.c726
-rw-r--r--test/common_plat/validation/api/crypto/odp_crypto_test_inp.h21
-rw-r--r--test/common_plat/validation/api/crypto/test_vectors.h353
-rw-r--r--test/common_plat/validation/api/crypto/test_vectors_len.h38
-rw-r--r--test/common_plat/validation/api/errno/.gitignore1
-rw-r--r--test/common_plat/validation/api/errno/Makefile.am10
-rw-r--r--test/common_plat/validation/api/errno/errno.c46
-rw-r--r--test/common_plat/validation/api/errno/errno.h24
-rw-r--r--test/common_plat/validation/api/errno/errno_main.c12
-rw-r--r--test/common_plat/validation/api/hash/.gitignore1
-rw-r--r--test/common_plat/validation/api/hash/Makefile.am10
-rw-r--r--test/common_plat/validation/api/hash/hash.c54
-rw-r--r--test/common_plat/validation/api/hash/hash.h24
-rw-r--r--test/common_plat/validation/api/hash/hash_main.c12
-rw-r--r--test/common_plat/validation/api/init/.gitignore3
-rw-r--r--test/common_plat/validation/api/init/Makefile.am16
-rw-r--r--test/common_plat/validation/api/init/init.c188
-rw-r--r--test/common_plat/validation/api/init/init.h32
-rw-r--r--test/common_plat/validation/api/init/init_main_abort.c11
-rw-r--r--test/common_plat/validation/api/init/init_main_log.c11
-rw-r--r--test/common_plat/validation/api/init/init_main_ok.c11
-rw-r--r--test/common_plat/validation/api/lock/.gitignore1
-rw-r--r--test/common_plat/validation/api/lock/Makefile.am10
-rw-r--r--test/common_plat/validation/api/lock/lock.c1224
-rw-r--r--test/common_plat/validation/api/lock/lock.h45
-rw-r--r--test/common_plat/validation/api/lock/lock_main.c12
-rw-r--r--test/common_plat/validation/api/packet/.gitignore1
-rw-r--r--test/common_plat/validation/api/packet/Makefile.am10
-rw-r--r--test/common_plat/validation/api/packet/packet.c1369
-rw-r--r--test/common_plat/validation/api/packet/packet.h49
-rw-r--r--test/common_plat/validation/api/packet/packet_main.c12
-rw-r--r--test/common_plat/validation/api/pktio/.gitignore1
-rw-r--r--test/common_plat/validation/api/pktio/Makefile.am10
-rw-r--r--test/common_plat/validation/api/pktio/pktio.c2170
-rw-r--r--test/common_plat/validation/api/pktio/pktio.h64
-rw-r--r--test/common_plat/validation/api/pktio/pktio_main.c12
-rw-r--r--test/common_plat/validation/api/pool/.gitignore1
-rw-r--r--test/common_plat/validation/api/pool/Makefile.am10
-rw-r--r--test/common_plat/validation/api/pool/pool.c131
-rw-r--r--test/common_plat/validation/api/pool/pool.h28
-rw-r--r--test/common_plat/validation/api/pool/pool_main.c12
-rw-r--r--test/common_plat/validation/api/queue/.gitignore1
-rw-r--r--test/common_plat/validation/api/queue/Makefile.am10
-rw-r--r--test/common_plat/validation/api/queue/queue.c321
-rw-r--r--test/common_plat/validation/api/queue/queue.h31
-rw-r--r--test/common_plat/validation/api/queue/queue_main.c12
-rw-r--r--test/common_plat/validation/api/random/.gitignore1
-rw-r--r--test/common_plat/validation/api/random/Makefile.am10
-rw-r--r--test/common_plat/validation/api/random/random.c44
-rw-r--r--test/common_plat/validation/api/random/random.h24
-rw-r--r--test/common_plat/validation/api/random/random_main.c12
-rw-r--r--test/common_plat/validation/api/scheduler/.gitignore1
-rw-r--r--test/common_plat/validation/api/scheduler/Makefile.am10
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler.c1653
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler.h62
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler_main.c12
-rw-r--r--test/common_plat/validation/api/shmem/.gitignore1
-rw-r--r--test/common_plat/validation/api/shmem/Makefile.am10
-rw-r--r--test/common_plat/validation/api/shmem/shmem.c108
-rw-r--r--test/common_plat/validation/api/shmem/shmem.h24
-rw-r--r--test/common_plat/validation/api/shmem/shmem_main.c12
-rw-r--r--test/common_plat/validation/api/std_clib/.gitignore1
-rw-r--r--test/common_plat/validation/api/std_clib/Makefile.am10
-rw-r--r--test/common_plat/validation/api/std_clib/std_clib.c110
-rw-r--r--test/common_plat/validation/api/std_clib/std_clib.h21
-rw-r--r--test/common_plat/validation/api/std_clib/std_clib_main.c12
-rw-r--r--test/common_plat/validation/api/system/.gitignore1
-rw-r--r--test/common_plat/validation/api/system/Makefile.am10
-rw-r--r--test/common_plat/validation/api/system/system.c344
-rw-r--r--test/common_plat/validation/api/system/system.h43
-rw-r--r--test/common_plat/validation/api/system/system_main.c12
-rw-r--r--test/common_plat/validation/api/thread/.gitignore1
-rw-r--r--test/common_plat/validation/api/thread/Makefile.am12
-rw-r--r--test/common_plat/validation/api/thread/thread.c140
-rw-r--r--test/common_plat/validation/api/thread/thread.h33
-rw-r--r--test/common_plat/validation/api/thread/thread_main.c12
-rw-r--r--test/common_plat/validation/api/time/.gitignore1
-rw-r--r--test/common_plat/validation/api/time/Makefile.am10
-rw-r--r--test/common_plat/validation/api/time/time.c476
-rw-r--r--test/common_plat/validation/api/time/time.h40
-rw-r--r--test/common_plat/validation/api/time/time_main.c12
-rw-r--r--test/common_plat/validation/api/timer/.gitignore1
-rw-r--r--test/common_plat/validation/api/timer/Makefile.am10
-rw-r--r--test/common_plat/validation/api/timer/timer.c605
-rw-r--r--test/common_plat/validation/api/timer/timer.h27
-rw-r--r--test/common_plat/validation/api/timer/timer_main.c12
-rw-r--r--test/common_plat/validation/api/traffic_mngr/.gitignore1
-rw-r--r--test/common_plat/validation/api/traffic_mngr/Makefile.am10
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr.c4009
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr.h45
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c12
130 files changed, 20130 insertions, 0 deletions
diff --git a/test/common_plat/validation/api/.gitignore b/test/common_plat/validation/api/.gitignore
new file mode 100644
index 000000000..7e563b8b3
--- /dev/null
+++ b/test/common_plat/validation/api/.gitignore
@@ -0,0 +1,2 @@
+*.log
+*.trs
diff --git a/test/common_plat/validation/api/Makefile.am b/test/common_plat/validation/api/Makefile.am
new file mode 100644
index 000000000..e2d30a673
--- /dev/null
+++ b/test/common_plat/validation/api/Makefile.am
@@ -0,0 +1,28 @@
+ODP_MODULES = atomic \
+ barrier \
+ buffer \
+ classification \
+ cpumask \
+ crypto \
+ errno \
+ hash \
+ init \
+ lock \
+ queue \
+ packet \
+ pktio \
+ pool \
+ random \
+ scheduler \
+ std_clib \
+ thread \
+ time \
+ timer \
+ traffic_mngr \
+ shmem \
+ system
+
+SUBDIRS = $(ODP_MODULES)
+
+#The tests will need to retain the deprecated test implementation
+AM_CFLAGS += -Wno-deprecated-declarations
diff --git a/test/common_plat/validation/api/Makefile.inc b/test/common_plat/validation/api/Makefile.inc
new file mode 100644
index 000000000..ffba62013
--- /dev/null
+++ b/test/common_plat/validation/api/Makefile.inc
@@ -0,0 +1,16 @@
+include $(top_srcdir)/test/Makefile.inc
+
+COMMON_DIR = $(top_builddir)/test/common_plat/common
+
+#the following option ensure that option '-I.' is not passed to gcc,
+#therefore distinguishing between '#include "X"' and '#include <X>'.
+#It allows common filenames (such as 'errno.h') to be used locally.
+AUTOMAKE_OPTIONS = nostdinc
+
+AM_CFLAGS += -I$(top_srcdir)/test/common_plat/common
+AM_LDFLAGS += -static
+
+LIBCUNIT_COMMON = $(COMMON_DIR)/libcunit_common.la
+LIBCPUMASK_COMMON = $(COMMON_DIR)/libcpumask_common.la
+LIBTHRMASK_COMMON = $(COMMON_DIR)/libthrmask_common.la
+LIBODP = $(LIB)/libodphelper-linux.la $(LIB)/libodp-linux.la
diff --git a/test/common_plat/validation/api/README b/test/common_plat/validation/api/README
new file mode 100644
index 000000000..1baebaafc
--- /dev/null
+++ b/test/common_plat/validation/api/README
@@ -0,0 +1,35 @@
+Copyright (c) 2015, Linaro Limited
+All rights reserved.
+
+SPDX-License-Identifier: BSD-3-Clause
+
+
+To add tests in here, please observe the rules listed below. This list
+is a brief overview, for a more detailed explanation of the test
+framework refer to the ODP Implementers' Guide, which can built as
+follows:
+
+ ./configure --enable-user-guides
+ make
+
+Output will be in doc/output/. If this fails, check the documentation
+section of the DEPENDENCIES file.
+
+Rules for all tests under this tree:
+
+1. Tests must be placed in the directory of the module they belong to.
+
+2. Tests must be platform agnostic, i.e.
+
+ - should be written in plain C only.
+ - may only use C standard library functions, CUnit functions and of
+ course ODP functions
+ - should be expected to pass on all ODP implementations
+
+ Tests that do not follow these rules should be placed in the platform
+ specific test area (currently platform/<platform>/test/).
+
+3. If a new ODP API module is created, please update the Makefile.am.
+
+4. Symbols exported from test libraries must respect the naming
+ convention detailed in the ODP Implementers' Guide.
diff --git a/test/common_plat/validation/api/atomic/.gitignore b/test/common_plat/validation/api/atomic/.gitignore
new file mode 100644
index 000000000..610ffeab0
--- /dev/null
+++ b/test/common_plat/validation/api/atomic/.gitignore
@@ -0,0 +1 @@
+atomic_main
diff --git a/test/common_plat/validation/api/atomic/Makefile.am b/test/common_plat/validation/api/atomic/Makefile.am
new file mode 100644
index 000000000..9b6bd6315
--- /dev/null
+++ b/test/common_plat/validation/api/atomic/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestatomic.la
+libtestatomic_la_SOURCES = atomic.c
+
+test_PROGRAMS = atomic_main$(EXEEXT)
+dist_atomic_main_SOURCES = atomic_main.c
+atomic_main_LDADD = libtestatomic.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = atomic.h
diff --git a/test/common_plat/validation/api/atomic/atomic.c b/test/common_plat/validation/api/atomic/atomic.c
new file mode 100644
index 000000000..c4e934525
--- /dev/null
+++ b/test/common_plat/validation/api/atomic/atomic.c
@@ -0,0 +1,885 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp_api.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "atomic.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+
+#define ADD_SUB_CNT 5
+
+#define CNT 10
+#define U32_INIT_VAL (1UL << 10)
+#define U64_INIT_VAL (1ULL << 33)
+#define U32_MAGIC 0xa23f65b2
+#define U64_MAGIC 0xf2e1c5430cb6a52e
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+#define CHECK_MAX_MIN (1 << 0)
+#define CHECK_XCHG (1 << 2)
+
+static odp_atomic_u32_t a32u;
+static odp_atomic_u64_t a64u;
+static odp_atomic_u32_t a32u_min;
+static odp_atomic_u32_t a32u_max;
+static odp_atomic_u64_t a64u_min;
+static odp_atomic_u64_t a64u_max;
+static odp_atomic_u32_t a32u_xchg;
+static odp_atomic_u64_t a64u_xchg;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ volatile_u32_t global_lock_owner;
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void test_atomic_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u32(&a32u);
+}
+
+static void test_atomic_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u64(&a64u);
+}
+
+static void test_atomic_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u32(&a32u);
+}
+
+static void test_atomic_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u64(&a64u);
+}
+
+static void test_atomic_fetch_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u32(&a32u);
+}
+
+static void test_atomic_fetch_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u64(&a64u);
+}
+
+static void test_atomic_fetch_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u32(&a32u);
+}
+
+static void test_atomic_fetch_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u64(&a64u);
+}
+
+static void test_atomic_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_min_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u32(&a32u);
+ odp_atomic_min_u32(&a32u_min, tmp);
+ }
+}
+
+static void test_atomic_min_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u64(&a64u);
+ odp_atomic_min_u64(&a64u_min, tmp);
+ }
+}
+
+static void test_atomic_max_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u32(&a32u);
+ odp_atomic_max_u32(&a32u_max, tmp);
+ }
+}
+
+static void test_atomic_max_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u64(&a64u);
+ odp_atomic_max_u64(&a64u_max, tmp);
+ }
+}
+
+static void test_atomic_cas_inc_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_inc_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_xchg_32(void)
+{
+ uint32_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u32(&a32u);
+ old = odp_atomic_xchg_u32(&a32u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u32(&a32u_xchg, 0);
+ else
+ odp_atomic_xchg_u32(&a32u_xchg, 1);
+ }
+
+ odp_atomic_sub_u32(&a32u, CNT);
+ odp_atomic_xchg_u32(&a32u_xchg, U32_MAGIC);
+}
+
+static void test_atomic_xchg_64(void)
+{
+ uint64_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u64(&a64u);
+ old = odp_atomic_xchg_u64(&a64u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u64(&a64u_xchg, 0);
+ else
+ odp_atomic_xchg_u64(&a64u_xchg, 1);
+ }
+
+ odp_atomic_sub_u64(&a64u, CNT);
+ odp_atomic_xchg_u64(&a64u_xchg, U64_MAGIC);
+}
+
+static void test_atomic_non_relaxed_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u32(&a32u);
+ odp_atomic_store_rel_u32(&a32u, tmp);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_max);
+ odp_atomic_add_rel_u32(&a32u_max, 1);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_min);
+ odp_atomic_sub_rel_u32(&a32u_min, 1);
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_acq_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_rel_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u32(&a32u_xchg, &tmp, U32_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_non_relaxed_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u64(&a64u);
+ odp_atomic_store_rel_u64(&a64u, tmp);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_max);
+ odp_atomic_add_rel_u64(&a64u_max, 1);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_min);
+ odp_atomic_sub_rel_u64(&a64u_min, 1);
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_acq_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_rel_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u64(&a64u_xchg, &tmp, U64_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_inc_dec_32(void)
+{
+ test_atomic_inc_32();
+ test_atomic_dec_32();
+}
+
+static void test_atomic_inc_dec_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_dec_64();
+}
+
+static void test_atomic_fetch_inc_dec_32(void)
+{
+ test_atomic_fetch_inc_32();
+ test_atomic_fetch_dec_32();
+}
+
+static void test_atomic_fetch_inc_dec_64(void)
+{
+ test_atomic_fetch_inc_64();
+ test_atomic_fetch_dec_64();
+}
+
+static void test_atomic_add_sub_32(void)
+{
+ test_atomic_add_32();
+ test_atomic_sub_32();
+}
+
+static void test_atomic_add_sub_64(void)
+{
+ test_atomic_add_64();
+ test_atomic_sub_64();
+}
+
+static void test_atomic_fetch_add_sub_32(void)
+{
+ test_atomic_fetch_add_32();
+ test_atomic_fetch_sub_32();
+}
+
+static void test_atomic_fetch_add_sub_64(void)
+{
+ test_atomic_fetch_add_64();
+ test_atomic_fetch_sub_64();
+}
+
+static void test_atomic_max_min_32(void)
+{
+ test_atomic_max_32();
+ test_atomic_min_32();
+}
+
+static void test_atomic_max_min_64(void)
+{
+ test_atomic_max_64();
+ test_atomic_min_64();
+}
+
+static void test_atomic_cas_inc_dec_32(void)
+{
+ test_atomic_cas_inc_32();
+ test_atomic_cas_dec_32();
+}
+
+static void test_atomic_cas_inc_dec_64(void)
+{
+ test_atomic_cas_inc_64();
+ test_atomic_cas_dec_64();
+}
+
+static void test_atomic_init(void)
+{
+ odp_atomic_init_u32(&a32u, 0);
+ odp_atomic_init_u64(&a64u, 0);
+ odp_atomic_init_u32(&a32u_min, 0);
+ odp_atomic_init_u32(&a32u_max, 0);
+ odp_atomic_init_u64(&a64u_min, 0);
+ odp_atomic_init_u64(&a64u_max, 0);
+ odp_atomic_init_u32(&a32u_xchg, 0);
+ odp_atomic_init_u64(&a64u_xchg, 0);
+}
+
+static void test_atomic_store(void)
+{
+ odp_atomic_store_u32(&a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_min, U32_INIT_VAL);
+ odp_atomic_store_u32(&a32u_max, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_min, U64_INIT_VAL);
+ odp_atomic_store_u64(&a64u_max, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_xchg, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_xchg, U64_INIT_VAL);
+}
+
+static void test_atomic_validate(int check)
+{
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
+
+ if (check & CHECK_MAX_MIN) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_max) >
+ odp_atomic_load_u32(&a32u_min));
+
+ CU_ASSERT(odp_atomic_load_u64(&a64u_max) >
+ odp_atomic_load_u64(&a64u_min));
+ }
+
+ if (check & CHECK_XCHG) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_xchg) == U32_MAGIC);
+ CU_ASSERT(odp_atomic_load_u64(&a64u_xchg) == U64_MAGIC);
+ }
+}
+
+int atomic_init(odp_instance_t *inst)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+/* Atomic tests */
+static int test_atomic_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_inc_dec_32();
+ test_atomic_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_add_sub_32();
+ test_atomic_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_inc_dec_32();
+ test_atomic_fetch_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_fetch_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_add_sub_32();
+ test_atomic_fetch_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_max_min_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_max_min_32();
+ test_atomic_max_min_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_cas_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_cas_inc_dec_32();
+ test_atomic_cas_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_xchg_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_xchg_32();
+ test_atomic_xchg_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int test_atomic_non_relaxed_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_non_relaxed_32();
+ test_atomic_non_relaxed_64();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void test_atomic_functional(int func_ptr(void *), int check)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ test_atomic_init();
+ test_atomic_store();
+ odp_cunit_thread_create(func_ptr, &arg);
+ odp_cunit_thread_exit(&arg);
+ test_atomic_validate(check);
+}
+
+void atomic_test_atomic_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_add_sub(void)
+{
+ test_atomic_functional(test_atomic_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_fetch_add_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_max_min(void)
+{
+ test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
+}
+
+void atomic_test_atomic_cas_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_cas_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_xchg(void)
+{
+ test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
+}
+
+void atomic_test_atomic_non_relaxed(void)
+{
+ test_atomic_functional(test_atomic_non_relaxed_thread,
+ CHECK_MAX_MIN | CHECK_XCHG);
+}
+
+void atomic_test_atomic_op_lock_free(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ atomic_op.all_bits = 0;
+
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+
+ /* Test setting first, last and couple of other bits */
+ atomic_op.op.init = 1;
+ CU_ASSERT(atomic_op.op.init == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.init = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.xchg = 1;
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.xchg = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.add = 1;
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.add = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.dec = 1;
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.dec = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u64(&atomic_op);
+ ret_null = odp_atomic_lock_free_u64(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u64() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.fetch_add == 1);
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.op.fetch_sub == 1);
+ CU_ASSERT(atomic_op.op.sub == 1);
+ CU_ASSERT(atomic_op.op.fetch_inc == 1);
+ CU_ASSERT(atomic_op.op.inc == 1);
+ CU_ASSERT(atomic_op.op.fetch_dec == 1);
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.op.min == 1);
+ CU_ASSERT(atomic_op.op.max == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ }
+}
+
+odp_testinfo_t atomic_suite_atomic[] = {
+ ODP_TEST_INFO(atomic_test_atomic_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_max_min),
+ ODP_TEST_INFO(atomic_test_atomic_cas_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_xchg),
+ ODP_TEST_INFO(atomic_test_atomic_non_relaxed),
+ ODP_TEST_INFO(atomic_test_atomic_op_lock_free),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t atomic_suites[] = {
+ {"atomic", NULL, NULL,
+ atomic_suite_atomic},
+ ODP_SUITE_INFO_NULL
+};
+
+int atomic_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(atomic_init);
+
+ ret = odp_cunit_register(atomic_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/atomic/atomic.h b/test/common_plat/validation/api/atomic/atomic.h
new file mode 100644
index 000000000..4ea837b7a
--- /dev/null
+++ b/test/common_plat/validation/api/atomic/atomic.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_ATOMIC_H_
+#define _ODP_TEST_ATOMIC_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void atomic_test_atomic_inc_dec(void);
+void atomic_test_atomic_add_sub(void);
+void atomic_test_atomic_fetch_inc_dec(void);
+void atomic_test_atomic_fetch_add_sub(void);
+void atomic_test_atomic_max_min(void);
+void atomic_test_atomic_cas_inc_dec(void);
+void atomic_test_atomic_xchg(void);
+void atomic_test_atomic_non_relaxed(void);
+void atomic_test_atomic_op_lock_free(void);
+
+/* test arrays: */
+extern odp_testinfo_t atomic_suite_atomic[];
+
+/* test array init/term functions: */
+int atomic_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t atomic_suites[];
+
+/* executable init/term functions: */
+int atomic_init(odp_instance_t *inst);
+
+/* main test program: */
+int atomic_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/atomic/atomic_main.c b/test/common_plat/validation/api/atomic/atomic_main.c
new file mode 100644
index 000000000..db035373e
--- /dev/null
+++ b/test/common_plat/validation/api/atomic/atomic_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "atomic.h"
+
+int main(int argc, char *argv[])
+{
+ return atomic_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/barrier/.gitignore b/test/common_plat/validation/api/barrier/.gitignore
new file mode 100644
index 000000000..2e0ee7ade
--- /dev/null
+++ b/test/common_plat/validation/api/barrier/.gitignore
@@ -0,0 +1 @@
+barrier_main
diff --git a/test/common_plat/validation/api/barrier/Makefile.am b/test/common_plat/validation/api/barrier/Makefile.am
new file mode 100644
index 000000000..8fc632c27
--- /dev/null
+++ b/test/common_plat/validation/api/barrier/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestbarrier.la
+libtestbarrier_la_SOURCES = barrier.c
+
+test_PROGRAMS = barrier_main$(EXEEXT)
+dist_barrier_main_SOURCES = barrier_main.c
+barrier_main_LDADD = libtestbarrier.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = barrier.h
diff --git a/test/common_plat/validation/api/barrier/barrier.c b/test/common_plat/validation/api/barrier/barrier.c
new file mode 100644
index 000000000..d4583884a
--- /dev/null
+++ b/test/common_plat/validation/api/barrier/barrier.c
@@ -0,0 +1,397 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp_api.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "barrier.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+#define BARRIER_ITERATIONS 64
+
+#define SLOW_BARRIER_DELAY 400
+#define BASE_DELAY 6
+
+#define NUM_TEST_BARRIERS BARRIER_ITERATIONS
+#define NUM_RESYNC_BARRIERS 100
+
+#define BARRIER_DELAY 10
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+static volatile int temp_result;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ odp_atomic_u32_t wait_cnt;
+} custom_barrier_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ odp_barrier_t test_barriers[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier1[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier2[NUM_TEST_BARRIERS];
+ volatile_u32_t slow_thread_num;
+ volatile_u32_t barrier_cnt1;
+ volatile_u32_t barrier_cnt2;
+ odp_barrier_t global_barrier;
+
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/*
+* Delay a consistent amount of time. Ideally the amount of CPU time taken
+* is linearly proportional to "iterations". The goal is to try to do some
+* work that the compiler optimizer won't optimize away, and also to
+* minimize loads and stores (at least to different memory addresses)
+* so as to not affect or be affected by caching issues. This does NOT have to
+* correlate to a specific number of cpu cycles or be consistent across
+* CPU architectures.
+*/
+static void thread_delay(per_thread_mem_t *per_thread_mem, uint32_t iterations)
+{
+ volatile_u64_t *counter_ptr;
+ uint32_t cnt;
+
+ counter_ptr = &per_thread_mem->delay_counter;
+
+ for (cnt = 1; cnt <= iterations; cnt++)
+ (*counter_ptr)++;
+}
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void custom_barrier_init(custom_barrier_t *custom_barrier,
+ uint32_t num_threads)
+{
+ odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
+}
+
+static void custom_barrier_wait(custom_barrier_t *custom_barrier)
+{
+ volatile_u64_t counter = 1;
+ uint32_t delay_cnt, wait_cnt;
+
+ odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
+
+ wait_cnt = 1;
+ while (wait_cnt != 0) {
+ for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
+ counter++;
+
+ wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
+ }
+}
+
+static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
+ odp_bool_t no_barrier_test)
+{
+ global_shared_mem_t *global_mem;
+ uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
+ uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
+ uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
+
+ thread_num = odp_thread_id();
+ global_mem = per_thread_mem->global_mem;
+ num_threads = global_mem->g_num_threads;
+ iterations = BARRIER_ITERATIONS;
+
+ barrier_errs = 0;
+ lock_owner_delay = SLOW_BARRIER_DELAY;
+
+ for (cnt = 1; cnt < iterations; cnt++) {
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
+
+ barrier_cnt1 = global_mem->barrier_cnt1;
+ barrier_cnt2 = global_mem->barrier_cnt2;
+
+ if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
+ printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
+ " %" PRIu32 " cnt=%" PRIu32 "\n",
+ thread_num, barrier_cnt1, barrier_cnt2, cnt);
+ barrier_errs++;
+ }
+
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
+
+ slow_thread_num = global_mem->slow_thread_num;
+ i_am_slow_thread = thread_num == slow_thread_num;
+ next_slow_thread = slow_thread_num + 1;
+ if (num_threads < next_slow_thread)
+ next_slow_thread = 1;
+
+ /*
+ * Now run the test, which involves having all but one thread
+ * immediately calling odp_barrier_wait(), and one thread wait a
+ * moderate amount of time and then calling odp_barrier_wait().
+ * The test fails if any of the first group of threads
+ * has not waited for the "slow" thread. The "slow" thread is
+ * responsible for re-initializing the barrier for next trial.
+ */
+ if (i_am_slow_thread) {
+ thread_delay(per_thread_mem, lock_owner_delay);
+ lock_owner_delay += BASE_DELAY;
+ if ((global_mem->barrier_cnt1 != cnt) ||
+ (global_mem->barrier_cnt2 != cnt) ||
+ (global_mem->slow_thread_num
+ != slow_thread_num))
+ barrier_errs++;
+ }
+
+ if (no_barrier_test == 0)
+ odp_barrier_wait(&global_mem->test_barriers[cnt]);
+
+ global_mem->barrier_cnt1 = cnt + 1;
+ odp_mb_full();
+
+ if (i_am_slow_thread) {
+ global_mem->slow_thread_num = next_slow_thread;
+ global_mem->barrier_cnt2 = cnt + 1;
+ odp_mb_full();
+ } else {
+ while (global_mem->barrier_cnt2 != (cnt + 1))
+ thread_delay(per_thread_mem, BASE_DELAY);
+ }
+ }
+
+ if ((global_mem->g_verbose) && (barrier_errs != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " barrier_errs in %" PRIu32 " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core, barrier_errs, iterations);
+
+ return barrier_errs;
+}
+
+static int no_barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 1);
+
+ /*
+ * Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_barrier test it should see barrier_errs or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees barrier_errs or a single
+ * worker was used.
+ */
+ CU_ASSERT(barrier_errs != 0 || global_mem->g_num_threads == 1);
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 0);
+
+ CU_ASSERT(barrier_errs == 0);
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void barrier_test_init(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+
+ for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
+ odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
+ custom_barrier_init(&global_mem->custom_barrier1[idx],
+ num_threads);
+ custom_barrier_init(&global_mem->custom_barrier2[idx],
+ num_threads);
+ }
+
+ global_mem->slow_thread_num = 1;
+ global_mem->barrier_cnt1 = 1;
+ global_mem->barrier_cnt2 = 1;
+}
+
+/* Barrier tests */
+void barrier_test_memory_barrier(void)
+{
+ volatile int a = 0;
+ volatile int b = 0;
+ volatile int c = 0;
+ volatile int d = 0;
+
+ /* Call all memory barriers to verify that those are implemented */
+ a = 1;
+ odp_mb_release();
+ b = 1;
+ odp_mb_acquire();
+ c = 1;
+ odp_mb_full();
+ d = 1;
+
+ /* Avoid "variable set but not used" warning */
+ temp_result = a + b + c + d;
+}
+
+void barrier_test_no_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(no_barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void barrier_test_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t barrier_suite_barrier[] = {
+ ODP_TEST_INFO(barrier_test_memory_barrier),
+ ODP_TEST_INFO(barrier_test_no_barrier_functional),
+ ODP_TEST_INFO(barrier_test_barrier_functional),
+ ODP_TEST_INFO_NULL
+};
+
+int barrier_init(odp_instance_t *inst)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+odp_suiteinfo_t barrier_suites[] = {
+ {"barrier", NULL, NULL,
+ barrier_suite_barrier},
+ ODP_SUITE_INFO_NULL
+};
+
+int barrier_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(barrier_init);
+
+ ret = odp_cunit_register(barrier_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/barrier/barrier.h b/test/common_plat/validation/api/barrier/barrier.h
new file mode 100644
index 000000000..e4890e0f4
--- /dev/null
+++ b/test/common_plat/validation/api/barrier/barrier.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_BARRIER_H_
+#define _ODP_TEST_BARRIER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void barrier_test_memory_barrier(void);
+void barrier_test_no_barrier_functional(void);
+void barrier_test_barrier_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t barrier_suite_barrier[];
+
+/* test registry: */
+extern odp_suiteinfo_t barrier_suites[];
+
+/* executable init/term functions: */
+int barrier_init(odp_instance_t *inst);
+
+/* main test program: */
+int barrier_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/barrier/barrier_main.c b/test/common_plat/validation/api/barrier/barrier_main.c
new file mode 100644
index 000000000..064decf6c
--- /dev/null
+++ b/test/common_plat/validation/api/barrier/barrier_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "barrier.h"
+
+int main(int argc, char *argv[])
+{
+ return barrier_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/buffer/.gitignore b/test/common_plat/validation/api/buffer/.gitignore
new file mode 100644
index 000000000..0e8ac15c1
--- /dev/null
+++ b/test/common_plat/validation/api/buffer/.gitignore
@@ -0,0 +1 @@
+buffer_main
diff --git a/test/common_plat/validation/api/buffer/Makefile.am b/test/common_plat/validation/api/buffer/Makefile.am
new file mode 100644
index 000000000..add2a3419
--- /dev/null
+++ b/test/common_plat/validation/api/buffer/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestbuffer.la
+libtestbuffer_la_SOURCES = buffer.c
+
+test_PROGRAMS = buffer_main$(EXEEXT)
+dist_buffer_main_SOURCES = buffer_main.c
+buffer_main_LDADD = libtestbuffer.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = buffer.h
diff --git a/test/common_plat/validation/api/buffer/buffer.c b/test/common_plat/validation/api/buffer/buffer.c
new file mode 100644
index 000000000..d26d5e82e
--- /dev/null
+++ b/test/common_plat/validation/api/buffer/buffer.c
@@ -0,0 +1,274 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "buffer.h"
+
+static odp_pool_t raw_pool;
+static odp_buffer_t raw_buffer = ODP_BUFFER_INVALID;
+static const size_t raw_buffer_size = 1500;
+
+int buffer_suite_init(void)
+{
+ odp_pool_param_t params = {
+ .buf = {
+ .size = raw_buffer_size,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = 100,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ raw_pool = odp_pool_create("raw_pool", &params);
+ if (raw_pool == ODP_POOL_INVALID)
+ return -1;
+ raw_buffer = odp_buffer_alloc(raw_pool);
+ if (raw_buffer == ODP_BUFFER_INVALID)
+ return -1;
+ return 0;
+}
+
+int buffer_suite_term(void)
+{
+ odp_buffer_free(raw_buffer);
+ if (odp_pool_destroy(raw_pool) != 0)
+ return -1;
+ return 0;
+}
+
+void buffer_test_pool_alloc(void)
+{
+ odp_pool_t pool;
+ const int num = 3;
+ const size_t size = 1500;
+ odp_buffer_t buffer[num];
+ odp_event_t ev;
+ int index;
+ char wrong_type = 0, wrong_size = 0;
+ odp_pool_param_t params = {
+ .buf = {
+ .size = size,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = num,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool = odp_pool_create("buffer_pool_alloc", &params);
+ odp_pool_print(pool);
+
+ /* Try to allocate num items from the pool */
+ for (index = 0; index < num; index++) {
+ buffer[index] = odp_buffer_alloc(pool);
+
+ if (buffer[index] == ODP_BUFFER_INVALID)
+ break;
+
+ ev = odp_buffer_to_event(buffer[index]);
+ if (odp_event_type(ev) != ODP_EVENT_BUFFER)
+ wrong_type = 1;
+ if (odp_buffer_size(buffer[index]) < size)
+ wrong_size = 1;
+ if (wrong_type || wrong_size)
+ odp_buffer_print(buffer[index]);
+ }
+
+ /* Check that the pool had at least num items */
+ CU_ASSERT(index == num);
+ /* index points out of buffer[] or it point to an invalid buffer */
+ index--;
+
+ /* Check that the pool had correct buffers */
+ CU_ASSERT(wrong_type == 0);
+ CU_ASSERT(wrong_size == 0);
+
+ for (; index >= 0; index--)
+ odp_buffer_free(buffer[index]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+/* Wrapper to call odp_buffer_alloc_multi multiple times until
+ * either no mure buffers are returned, or num buffers were alloced */
+static int buffer_alloc_multi(odp_pool_t pool, odp_buffer_t buffer[], int num)
+{
+ int ret, total = 0;
+
+ do {
+ ret = odp_buffer_alloc_multi(pool, buffer + total, num - total);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret <= num - total);
+ total += ret;
+ } while (total < num && ret);
+
+ return total;
+}
+
+void buffer_test_pool_alloc_multi(void)
+{
+ odp_pool_t pool;
+ const int num = 3;
+ const size_t size = 1500;
+ odp_buffer_t buffer[num + 1];
+ odp_event_t ev;
+ int index;
+ char wrong_type = 0, wrong_size = 0;
+ odp_pool_param_t params = {
+ .buf = {
+ .size = size,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = num,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool = odp_pool_create("buffer_pool_alloc_multi", &params);
+ odp_pool_print(pool);
+
+ /* Try to allocate num + 1 items from the pool */
+ CU_ASSERT_FATAL(buffer_alloc_multi(pool, buffer, num + 1) == num);
+
+ for (index = 0; index < num; index++) {
+ if (buffer[index] == ODP_BUFFER_INVALID)
+ break;
+
+ ev = odp_buffer_to_event(buffer[index]);
+ if (odp_event_type(ev) != ODP_EVENT_BUFFER)
+ wrong_type = 1;
+ if (odp_buffer_size(buffer[index]) < size)
+ wrong_size = 1;
+ if (wrong_type || wrong_size)
+ odp_buffer_print(buffer[index]);
+ }
+
+ /* Check that the pool had at least num items */
+ CU_ASSERT(index == num);
+
+ /* Check that the pool had correct buffers */
+ CU_ASSERT(wrong_type == 0);
+ CU_ASSERT(wrong_size == 0);
+
+ odp_buffer_free_multi(buffer, num);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void buffer_test_pool_free(void)
+{
+ odp_pool_t pool;
+ odp_buffer_t buffer;
+ odp_pool_param_t params = {
+ .buf = {
+ .size = 64,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = 1,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool = odp_pool_create("buffer_pool_free", &params);
+
+ /* Allocate the only buffer from the pool */
+ buffer = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buffer != ODP_BUFFER_INVALID);
+
+ /* Pool should have only one buffer */
+ CU_ASSERT_FATAL(odp_buffer_alloc(pool) == ODP_BUFFER_INVALID)
+
+ odp_buffer_free(buffer);
+
+ /* Check that the buffer was returned back to the pool */
+ buffer = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buffer != ODP_BUFFER_INVALID);
+
+ odp_buffer_free(buffer);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void buffer_test_pool_free_multi(void)
+{
+ odp_pool_t pool[2];
+ odp_buffer_t buffer[4];
+ odp_buffer_t buf_inval[2];
+ odp_pool_param_t params = {
+ .buf = {
+ .size = 64,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = 2,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool[0] = odp_pool_create("buffer_pool_free_multi_0", &params);
+ pool[1] = odp_pool_create("buffer_pool_free_multi_1", &params);
+ CU_ASSERT_FATAL(pool[0] != ODP_POOL_INVALID);
+ CU_ASSERT_FATAL(pool[1] != ODP_POOL_INVALID);
+
+ /* Allocate all the buffers from the pools */
+ CU_ASSERT_FATAL(buffer_alloc_multi(pool[0], &buffer[0], 2) == 2);
+ CU_ASSERT_FATAL(buffer_alloc_multi(pool[1], &buffer[2], 2) == 2);
+
+ /* Pools should have no more buffer */
+ CU_ASSERT(odp_buffer_alloc_multi(pool[0], buf_inval, 2) == 0);
+ CU_ASSERT(odp_buffer_alloc_multi(pool[1], buf_inval, 2) == 0);
+
+ /* Try to free both buffers from both pools at once */
+ odp_buffer_free_multi(buffer, 4);
+
+ /* Check that all buffers were returned back to the pools */
+ CU_ASSERT_FATAL(buffer_alloc_multi(pool[0], &buffer[0], 2) == 2);
+ CU_ASSERT_FATAL(buffer_alloc_multi(pool[1], &buffer[2], 2) == 2);
+
+ odp_buffer_free_multi(buffer, 4);
+ CU_ASSERT(odp_pool_destroy(pool[0]) == 0);
+ CU_ASSERT(odp_pool_destroy(pool[1]) == 0);
+}
+
+void buffer_test_management_basic(void)
+{
+ odp_event_t ev = odp_buffer_to_event(raw_buffer);
+
+ CU_ASSERT(odp_buffer_is_valid(raw_buffer) == 1);
+ CU_ASSERT(odp_buffer_pool(raw_buffer) != ODP_POOL_INVALID);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
+ CU_ASSERT(odp_buffer_size(raw_buffer) >= raw_buffer_size);
+ CU_ASSERT(odp_buffer_addr(raw_buffer) != NULL);
+ odp_buffer_print(raw_buffer);
+ CU_ASSERT(odp_buffer_to_u64(raw_buffer) !=
+ odp_buffer_to_u64(ODP_BUFFER_INVALID));
+ CU_ASSERT(odp_event_to_u64(ev) != odp_event_to_u64(ODP_EVENT_INVALID));
+}
+
+odp_testinfo_t buffer_suite[] = {
+ ODP_TEST_INFO(buffer_test_pool_alloc),
+ ODP_TEST_INFO(buffer_test_pool_free),
+ ODP_TEST_INFO(buffer_test_pool_alloc_multi),
+ ODP_TEST_INFO(buffer_test_pool_free_multi),
+ ODP_TEST_INFO(buffer_test_management_basic),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t buffer_suites[] = {
+ {"buffer tests", buffer_suite_init, buffer_suite_term, buffer_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int buffer_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(buffer_suites);
+
+ if (ret == 0)
+ odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/buffer/buffer.h b/test/common_plat/validation/api/buffer/buffer.h
new file mode 100644
index 000000000..48331e3f1
--- /dev/null
+++ b/test/common_plat/validation/api/buffer/buffer.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_BUFFER_H_
+#define _ODP_TEST_BUFFER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void buffer_test_pool_alloc(void);
+void buffer_test_pool_free(void);
+void buffer_test_pool_alloc_multi(void);
+void buffer_test_pool_free_multi(void);
+void buffer_test_management_basic(void);
+
+/* test arrays: */
+extern odp_testinfo_t buffer_suite[];
+
+/* test array init/term functions: */
+int buffer_suite_init(void);
+int buffer_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t buffer_suites[];
+
+/* main test program: */
+int buffer_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/buffer/buffer_main.c b/test/common_plat/validation/api/buffer/buffer_main.c
new file mode 100644
index 000000000..47168f8b9
--- /dev/null
+++ b/test/common_plat/validation/api/buffer/buffer_main.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "buffer.h"
+
+int main(int argc, char *argv[])
+{
+ return buffer_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/classification/.gitignore b/test/common_plat/validation/api/classification/.gitignore
new file mode 100644
index 000000000..e2cdfefe1
--- /dev/null
+++ b/test/common_plat/validation/api/classification/.gitignore
@@ -0,0 +1 @@
+classification_main
diff --git a/test/common_plat/validation/api/classification/Makefile.am b/test/common_plat/validation/api/classification/Makefile.am
new file mode 100644
index 000000000..df382c51f
--- /dev/null
+++ b/test/common_plat/validation/api/classification/Makefile.am
@@ -0,0 +1,14 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestclassification.la
+libtestclassification_la_SOURCES = odp_classification_basic.c \
+ odp_classification_tests.c \
+ odp_classification_test_pmr.c \
+ odp_classification_common.c \
+ classification.c
+
+test_PROGRAMS = classification_main$(EXEEXT)
+dist_classification_main_SOURCES = classification_main.c
+classification_main_LDADD = libtestclassification.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = classification.h odp_classification_testsuites.h
diff --git a/test/common_plat/validation/api/classification/classification.c b/test/common_plat/validation/api/classification/classification.c
new file mode 100644
index 000000000..1032e7f1f
--- /dev/null
+++ b/test/common_plat/validation/api/classification/classification.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+
+odp_suiteinfo_t classification_suites[] = {
+ { .pName = "classification basic",
+ .pTests = classification_suite_basic,
+ },
+ { .pName = "classification pmr tests",
+ .pTests = classification_suite_pmr,
+ .pInitFunc = classification_suite_pmr_init,
+ .pCleanupFunc = classification_suite_pmr_term,
+ },
+ { .pName = "classification tests",
+ .pTests = classification_suite,
+ .pInitFunc = classification_suite_init,
+ .pCleanupFunc = classification_suite_term,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int classification_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(classification_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/classification/classification.h b/test/common_plat/validation/api/classification/classification.h
new file mode 100644
index 000000000..d73c82161
--- /dev/null
+++ b/test/common_plat/validation/api/classification/classification.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_CLASSIFICATION_H_
+#define _ODP_TEST_CLASSIFICATION_H_
+
+#include <odp_cunit_common.h>
+
+#define SHM_PKT_NUM_BUFS 32
+#define SHM_PKT_BUF_SIZE 1024
+
+/* Config values for Default CoS */
+#define TEST_DEFAULT 1
+#define CLS_DEFAULT 0
+#define CLS_DEFAULT_SADDR "10.0.0.1/32"
+#define CLS_DEFAULT_DADDR "10.0.0.100/32"
+#define CLS_DEFAULT_SPORT 1024
+#define CLS_DEFAULT_DPORT 2048
+#define CLS_DEFAULT_DMAC 0x010203040506
+#define CLS_DEFAULT_SMAC 0x060504030201
+
+/* Config values for Error CoS */
+#define TEST_ERROR 1
+#define CLS_ERROR 1
+
+/* Config values for PMR_CHAIN */
+#define TEST_PMR_CHAIN 1
+#define CLS_PMR_CHAIN_SRC 2
+#define CLS_PMR_CHAIN_DST 3
+#define CLS_PMR_CHAIN_SADDR "10.0.0.5/32"
+#define CLS_PMR_CHAIN_PORT 3000
+
+/* Config values for PMR */
+#define TEST_PMR 1
+#define CLS_PMR 4
+#define CLS_PMR_PORT 4000
+
+/* Config values for PMR SET */
+#define TEST_PMR_SET 1
+#define CLS_PMR_SET 5
+#define CLS_PMR_SET_SADDR "10.0.0.6/32"
+#define CLS_PMR_SET_PORT 5000
+
+/* Config values for CoS L2 Priority */
+#define TEST_L2_QOS 1
+#define CLS_L2_QOS_0 6
+#define CLS_L2_QOS_MAX 5
+
+#define CLS_ENTRIES (CLS_L2_QOS_0 + CLS_L2_QOS_MAX)
+
+/* Test Packet values */
+#define DATA_MAGIC 0x01020304
+#define TEST_SEQ_INVALID ((uint32_t)~0)
+
+/* test functions: */
+void classification_test_create_cos(void);
+void classification_test_destroy_cos(void);
+void classification_test_create_pmr_match(void);
+void classification_test_cos_set_queue(void);
+void classification_test_cos_set_pool(void);
+void classification_test_cos_set_drop(void);
+void classification_test_pmr_composite_create(void);
+void classification_test_pmr_composite_destroy(void);
+
+void classification_test_pktio_set_skip(void);
+void classification_test_pktio_set_headroom(void);
+void classification_test_pktio_configure(void);
+void classification_test_pktio_test(void);
+
+void classification_test_pmr_term_tcp_dport(void);
+void classification_test_pmr_term_tcp_sport(void);
+void classification_test_pmr_term_udp_dport(void);
+void classification_test_pmr_term_udp_sport(void);
+void classification_test_pmr_term_ipproto(void);
+void classification_test_pmr_term_dmac(void);
+void classification_test_pmr_term_packet_len(void);
+
+/* test arrays: */
+extern odp_testinfo_t classification_suite_basic[];
+extern odp_testinfo_t classification_suite[];
+
+/* test array init/term functions: */
+int classification_suite_init(void);
+int classification_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t classification_suites[];
+
+/* main test program: */
+int classification_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/classification/classification_main.c b/test/common_plat/validation/api/classification/classification_main.c
new file mode 100644
index 000000000..8902463c2
--- /dev/null
+++ b/test/common_plat/validation/api/classification/classification_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "classification.h"
+
+int main(int argc, char *argv[])
+{
+ return classification_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/classification/odp_classification_basic.c b/test/common_plat/validation/api/classification/odp_classification_basic.c
new file mode 100644
index 000000000..372377d85
--- /dev/null
+++ b/test/common_plat/validation/api/classification/odp_classification_basic.c
@@ -0,0 +1,332 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_cunit_common.h>
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+
+#define PMR_SET_NUM 5
+
+void classification_test_create_cos(void)
+{
+ odp_cos_t cos;
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ char cosname[ODP_COS_NAME_LEN];
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "ClassOfService");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
+ odp_cos_destroy(cos);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+}
+
+void classification_test_destroy_cos(void)
+{
+ odp_cos_t cos;
+ char name[ODP_COS_NAME_LEN];
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_cls_cos_param_t cls_param;
+ int retval;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(name, "ClassOfService");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(name, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+ retval = odp_cos_destroy(cos);
+ CU_ASSERT(retval == 0);
+ retval = odp_cos_destroy(ODP_COS_INVALID);
+ CU_ASSERT(retval < 0);
+
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+}
+
+void classification_test_create_pmr_match(void)
+{
+ odp_pmr_t pmr;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pmr_param_t pmr_param;
+ odp_cos_t default_cos;
+ odp_cos_t cos;
+ odp_queue_t default_queue;
+ odp_queue_t queue;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pkt_pool;
+ odp_cls_cos_param_t cls_param;
+ odp_pktio_t pktio;
+
+ pkt_pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("pmr_match", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("pmr_match");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create("pmr_match", &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ val = 1024;
+ mask = 0xffff;
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.range_term = false;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+ CU_ASSERT(odp_pmr_to_u64(pmr) != odp_pmr_to_u64(ODP_PMR_INVAL));
+ /* destroy the created PMR */
+ retval = odp_cls_pmr_destroy(pmr);
+ CU_ASSERT(retval == 0);
+
+ /* destroy an INVALID PMR */
+ retval = odp_cls_pmr_destroy(ODP_PMR_INVAL);
+ CU_ASSERT(retval < 0);
+
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pkt_pool);
+ odp_cos_destroy(cos);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(default_pool);
+ odp_cos_destroy(default_cos);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_cos_set_queue(void)
+{
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_queue_t queue_cos;
+ odp_cos_t cos_queue;
+ odp_queue_t recvqueue;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "CoSQueue");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_queue = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);
+
+ queue_cos = queue_create("QueueCoS", true);
+ CU_ASSERT_FATAL(queue_cos != ODP_QUEUE_INVALID);
+
+ retval = odp_cos_queue_set(cos_queue, queue_cos);
+ CU_ASSERT(retval == 0);
+ recvqueue = odp_cos_queue(cos_queue);
+ CU_ASSERT(recvqueue == queue_cos);
+
+ odp_cos_destroy(cos_queue);
+ odp_queue_destroy(queue_cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+}
+
+void classification_test_cos_set_pool(void)
+{
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_pool_t cos_pool;
+ odp_cos_t cos;
+ odp_pool_t recvpool;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "CoSQueue");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ cos_pool = pool_create("PoolCoS");
+ CU_ASSERT_FATAL(cos_pool != ODP_POOL_INVALID);
+
+ retval = odp_cls_cos_pool_set(cos, cos_pool);
+ CU_ASSERT(retval == 0);
+ recvpool = odp_cls_cos_pool(cos);
+ CU_ASSERT(recvpool == cos_pool);
+
+ odp_cos_destroy(cos);
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(cos_pool);
+}
+
+void classification_test_cos_set_drop(void)
+{
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cos_t cos_drop;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ odp_cls_cos_param_t cls_param;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ queue = queue_create("cls_basic_queue", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "CoSDrop");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_drop = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_drop != ODP_COS_INVALID);
+
+ retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_POOL);
+ CU_ASSERT(retval == 0);
+ CU_ASSERT(ODP_COS_DROP_POOL == odp_cos_drop(cos_drop));
+
+ retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_NEVER);
+ CU_ASSERT(retval == 0);
+ CU_ASSERT(ODP_COS_DROP_NEVER == odp_cos_drop(cos_drop));
+ odp_cos_destroy(cos_drop);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+}
+
+void classification_test_pmr_composite_create(void)
+{
+ odp_pmr_t pmr_composite;
+ int retval;
+ odp_pmr_param_t pmr_terms[PMR_SET_NUM];
+ odp_cos_t default_cos;
+ odp_cos_t cos;
+ odp_queue_t default_queue;
+ odp_queue_t queue;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pkt_pool;
+ odp_cls_cos_param_t cls_param;
+ odp_pktio_t pktio;
+ uint16_t val = 1024;
+ uint16_t mask = 0xffff;
+ int i;
+
+ pkt_pool = pool_create("pkt_pool");
+ CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("pmr_match", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("pmr_match");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create("pmr_match", &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ for (i = 0; i < PMR_SET_NUM; i++) {
+ odp_cls_pmr_param_init(&pmr_terms[i]);
+ pmr_terms[i].term = ODP_PMR_TCP_DPORT;
+ pmr_terms[i].match.value = &val;
+ pmr_terms[i].range_term = false;
+ pmr_terms[i].match.mask = &mask;
+ pmr_terms[i].val_sz = sizeof(val);
+ }
+
+ pmr_composite = odp_cls_pmr_create(pmr_terms, PMR_SET_NUM,
+ default_cos, cos);
+ CU_ASSERT(odp_pmr_to_u64(pmr_composite) !=
+ odp_pmr_to_u64(ODP_PMR_INVAL));
+
+ retval = odp_cls_pmr_destroy(pmr_composite);
+ CU_ASSERT(retval == 0);
+
+ odp_queue_destroy(queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pkt_pool);
+ odp_cos_destroy(cos);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(default_pool);
+ odp_cos_destroy(default_cos);
+ odp_pktio_close(pktio);
+}
+
+odp_testinfo_t classification_suite_basic[] = {
+ ODP_TEST_INFO(classification_test_create_cos),
+ ODP_TEST_INFO(classification_test_destroy_cos),
+ ODP_TEST_INFO(classification_test_create_pmr_match),
+ ODP_TEST_INFO(classification_test_cos_set_queue),
+ ODP_TEST_INFO(classification_test_cos_set_drop),
+ ODP_TEST_INFO(classification_test_cos_set_pool),
+ ODP_TEST_INFO(classification_test_pmr_composite_create),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/classification/odp_classification_common.c b/test/common_plat/validation/api/classification/odp_classification_common.c
new file mode 100644
index 000000000..7a42ac745
--- /dev/null
+++ b/test/common_plat/validation/api/classification/odp_classification_common.c
@@ -0,0 +1,388 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+#include <odp_cunit_common.h>
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
+
+typedef struct cls_test_packet {
+ odp_u32be_t magic;
+ odp_u32be_t seq;
+} cls_test_packet_t;
+
+odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ int ret;
+
+ if (pool == ODP_POOL_INVALID)
+ return ODP_PKTIO_INVALID;
+
+ odp_pktio_param_init(&pktio_param);
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
+ else
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open("loop", pool, &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID) {
+ ret = odp_pool_destroy(pool);
+ if (ret)
+ fprintf(stderr, "unable to destroy pool.\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ fprintf(stderr, "pktin queue config failed.\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktout_queue_config(pktio, NULL)) {
+ fprintf(stderr, "pktout queue config failed.\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ return pktio;
+}
+
+int stop_pktio(odp_pktio_t pktio)
+{
+ odp_event_t ev;
+
+ if (odp_pktio_stop(pktio)) {
+ fprintf(stderr, "pktio stop failed.\n");
+ return -1;
+ }
+
+ while (1) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ break;
+ }
+
+ return 0;
+}
+
+int cls_pkt_set_seq(odp_packet_t pkt)
+{
+ static uint32_t seq;
+ cls_test_packet_t data;
+ uint32_t offset;
+ odph_ipv4hdr_t *ip;
+ odph_tcphdr_t *tcp;
+ int status;
+
+ data.magic = DATA_MAGIC;
+ data.seq = ++seq;
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ offset = odp_packet_l4_offset(pkt);
+ CU_ASSERT_FATAL(offset != ODP_PACKET_OFFSET_INVALID);
+
+ if (ip->proto == ODPH_IPPROTO_UDP)
+ status = odp_packet_copy_from_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ sizeof(data), &data);
+ else {
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ status = odp_packet_copy_from_mem(pkt, offset + tcp->hl * 4,
+ sizeof(data), &data);
+ }
+
+ return status;
+}
+
+uint32_t cls_pkt_get_seq(odp_packet_t pkt)
+{
+ uint32_t offset;
+ cls_test_packet_t data;
+ odph_ipv4hdr_t *ip;
+ odph_tcphdr_t *tcp;
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ offset = odp_packet_l4_offset(pkt);
+
+ if (offset == ODP_PACKET_OFFSET_INVALID || ip == NULL)
+ return TEST_SEQ_INVALID;
+
+ if (ip->proto == ODPH_IPPROTO_UDP)
+ odp_packet_copy_to_mem(pkt, offset + ODPH_UDPHDR_LEN,
+ sizeof(data), &data);
+ else {
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ odp_packet_copy_to_mem(pkt, offset + tcp->hl * 4,
+ sizeof(data), &data);
+ }
+
+ if (data.magic == DATA_MAGIC)
+ return data.seq;
+
+ return TEST_SEQ_INVALID;
+}
+
+int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask)
+{
+ int b[4];
+ int qualifier = 32;
+ int converted;
+
+ if (strchr(ipaddress, '/')) {
+ converted = sscanf(ipaddress, "%d.%d.%d.%d/%d",
+ &b[3], &b[2], &b[1], &b[0],
+ &qualifier);
+ if (5 != converted)
+ return -1;
+ } else {
+ converted = sscanf(ipaddress, "%d.%d.%d.%d",
+ &b[3], &b[2], &b[1], &b[0]);
+ if (4 != converted)
+ return -1;
+ }
+
+ if ((b[0] > 255) || (b[1] > 255) || (b[2] > 255) || (b[3] > 255))
+ return -1;
+ if (!qualifier || (qualifier > 32))
+ return -1;
+
+ *addr = b[0] | b[1] << 8 | b[2] << 16 | b[3] << 24;
+ if (mask)
+ *mask = ~(0xFFFFFFFF & ((1ULL << (32 - qualifier)) - 1));
+
+ return 0;
+}
+
+void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_pktout_queue_t pktout;
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio, &pktout, 1) == 1);
+ CU_ASSERT(odp_pktout_send(pktout, &pkt, 1) == 1);
+}
+
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns)
+{
+ odp_event_t ev;
+
+ ev = odp_schedule(queue, ns);
+ return odp_packet_from_event(ev);
+}
+
+odp_queue_t queue_create(const char *queuename, bool sched)
+{
+ odp_queue_t queue;
+ odp_queue_param_t qparam;
+
+ if (sched) {
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+
+ queue = odp_queue_create(queuename, &qparam);
+ } else {
+ queue = odp_queue_create(queuename, NULL);
+ }
+
+ return queue;
+}
+
+odp_pool_t pool_create(const char *poolname)
+{
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+ param.pkt.seg_len = SHM_PKT_BUF_SIZE;
+ param.pkt.len = SHM_PKT_BUF_SIZE;
+ param.pkt.num = SHM_PKT_NUM_BUFS;
+ param.type = ODP_POOL_PACKET;
+
+ return odp_pool_create(poolname, &param);
+}
+
+odp_packet_t create_packet(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp)
+{
+ return create_packet_len(pool, vlan, seq, flag_udp, 0);
+}
+
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len)
+{
+ uint32_t seqno;
+ odph_ethhdr_t *ethhdr;
+ odph_udphdr_t *udp;
+ odph_tcphdr_t *tcp;
+ odph_ipv4hdr_t *ip;
+ uint16_t payload_len;
+ uint64_t src_mac = CLS_DEFAULT_SMAC;
+ uint64_t dst_mac = CLS_DEFAULT_DMAC;
+ uint64_t dst_mac_be;
+ uint32_t addr = 0;
+ uint32_t mask;
+ int offset;
+ odp_packet_t pkt;
+ int packet_len = 0;
+
+ /* 48 bit ethernet address needs to be left shifted for proper
+ value after changing to be*/
+ dst_mac_be = odp_cpu_to_be_64(dst_mac);
+ if (dst_mac != dst_mac_be)
+ dst_mac_be = dst_mac_be >> (64 - 8 * ODPH_ETHADDR_LEN);
+
+ payload_len = sizeof(cls_test_packet_t) + len;
+ packet_len += ODPH_ETHHDR_LEN;
+ packet_len += ODPH_IPV4HDR_LEN;
+ if (flag_udp)
+ packet_len += ODPH_UDPHDR_LEN;
+ else
+ packet_len += ODPH_TCPHDR_LEN;
+ packet_len += payload_len;
+
+ if (vlan)
+ packet_len += ODPH_VLANHDR_LEN;
+
+ pkt = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ /* Ethernet Header */
+ offset = 0;
+ odp_packet_l2_offset_set(pkt, offset);
+ ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ memcpy(ethhdr->src.addr, &src_mac, ODPH_ETHADDR_LEN);
+ memcpy(ethhdr->dst.addr, &dst_mac_be, ODPH_ETHADDR_LEN);
+ offset += sizeof(odph_ethhdr_t);
+ if (vlan) {
+ /* Default vlan header */
+ odph_vlanhdr_t *vlan_hdr;
+
+ ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
+ vlan_hdr = (odph_vlanhdr_t *)(ethhdr + 1);
+ vlan_hdr->tci = odp_cpu_to_be_16(0);
+ vlan_hdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+ offset += sizeof(odph_vlanhdr_t);
+ } else {
+ ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+ }
+
+ odp_packet_l3_offset_set(pkt, offset);
+
+ /* ipv4 */
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+
+ parse_ipv4_string(CLS_DEFAULT_DADDR, &addr, &mask);
+ ip->dst_addr = odp_cpu_to_be_32(addr);
+
+ parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
+ ip->src_addr = odp_cpu_to_be_32(addr);
+ ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
+ if (flag_udp)
+ ip->tot_len = odp_cpu_to_be_16(ODPH_UDPHDR_LEN + payload_len +
+ ODPH_IPV4HDR_LEN);
+ else
+ ip->tot_len = odp_cpu_to_be_16(ODPH_TCPHDR_LEN + payload_len +
+ ODPH_IPV4HDR_LEN);
+
+ ip->ttl = 128;
+ if (flag_udp)
+ ip->proto = ODPH_IPPROTO_UDP;
+ else
+ ip->proto = ODPH_IPPROTO_TCP;
+
+ seqno = odp_atomic_fetch_inc_u32(seq);
+ ip->id = odp_cpu_to_be_16(seqno);
+ ip->chksum = 0;
+ ip->chksum = odph_ipv4_csum_update(pkt);
+ offset += ODPH_IPV4HDR_LEN;
+
+ /* udp */
+ if (flag_udp) {
+ odp_packet_l4_offset_set(pkt, offset);
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
+ udp->chksum = 0;
+ } else {
+ odp_packet_l4_offset_set(pkt, offset);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ tcp->hl = ODPH_TCPHDR_LEN / 4;
+ /* TODO: checksum field has to be updated */
+ tcp->cksm = 0;
+ }
+
+ /* set pkt sequence number */
+ cls_pkt_set_seq(pkt);
+
+ return pkt;
+}
+
+odp_cls_pmr_term_t find_first_supported_l3_pmr(void)
+{
+ odp_cls_pmr_term_t term = ODP_PMR_TCP_DPORT;
+ odp_cls_capability_t capability;
+
+ odp_cls_capability(&capability);
+
+ /* choose supported PMR */
+ if (capability.supported_terms.bit.udp_sport)
+ term = ODP_PMR_UDP_SPORT;
+ else if (capability.supported_terms.bit.udp_dport)
+ term = ODP_PMR_UDP_DPORT;
+ else if (capability.supported_terms.bit.tcp_sport)
+ term = ODP_PMR_TCP_SPORT;
+ else if (capability.supported_terms.bit.tcp_dport)
+ term = ODP_PMR_TCP_DPORT;
+ else
+ CU_FAIL("Implementations doesn't support any TCP/UDP PMR");
+
+ return term;
+}
+
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port)
+{
+ odph_udphdr_t *udp;
+ odph_tcphdr_t *tcp;
+ odp_cls_pmr_term_t term;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ port = odp_cpu_to_be_16(port);
+ term = find_first_supported_l3_pmr();
+ switch (term) {
+ case ODP_PMR_UDP_SPORT:
+ udp->src_port = port;
+ break;
+ case ODP_PMR_UDP_DPORT:
+ udp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_DPORT:
+ tcp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_SPORT:
+ tcp->src_port = port;
+ break;
+ default:
+ CU_FAIL("Unsupported L3 term");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/common_plat/validation/api/classification/odp_classification_test_pmr.c b/test/common_plat/validation/api/classification/odp_classification_test_pmr.c
new file mode 100644
index 000000000..c8bbf50b5
--- /dev/null
+++ b/test/common_plat/validation/api/classification/odp_classification_test_pmr.c
@@ -0,0 +1,1162 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+#include <odp_cunit_common.h>
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
+
+static odp_pool_t pkt_pool;
+
+/** sequence number of IP packets */
+odp_atomic_u32_t seq;
+
+int classification_suite_pmr_init(void)
+{
+ pkt_pool = pool_create("classification_pmr_pool");
+ if (ODP_POOL_INVALID == pkt_pool) {
+ fprintf(stderr, "Packet pool creation failed.\n");
+ return -1;
+ }
+
+ odp_atomic_init_u32(&seq, 0);
+ return 0;
+}
+
+static int start_pktio(odp_pktio_t pktio)
+{
+ if (odp_pktio_start(pktio)) {
+ fprintf(stderr, "unable to start loop\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
+ odp_queue_t *queue, odp_pool_t *pool)
+{
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t default_pool;
+ odp_cos_t default_cos;
+ odp_queue_t default_queue;
+ int retval;
+ char cosname[ODP_COS_NAME_LEN];
+
+ default_pool = pool_create("DefaultPool");
+ CU_ASSERT(default_pool != ODP_POOL_INVALID);
+
+ default_queue = queue_create("DefaultQueue", true);
+ CU_ASSERT(default_queue != ODP_QUEUE_INVALID);
+
+ sprintf(cosname, "DefaultCos");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = default_pool;
+ cls_param.queue = default_queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ default_cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(default_cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_default_cos_set(pktio, default_cos);
+ CU_ASSERT(retval == 0);
+
+ *cos = default_cos;
+ *queue = default_queue;
+ *pool = default_pool;
+}
+
+int classification_suite_pmr_term(void)
+{
+ int retcode = 0;
+
+ if (0 != odp_pool_destroy(pkt_pool)) {
+ fprintf(stderr, "pkt_pool destroy failed.\n");
+ retcode = -1;
+ }
+
+ return retcode;
+}
+
+void classification_test_pmr_term_tcp_dport(void)
+{
+ odp_packet_t pkt;
+ odph_tcphdr_t *tcp;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pool_t pool;
+ odp_pool_t pool_recv;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_DPORT;
+ mask = 0xffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("tcp_dport1", true);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("tcp_dport1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "tcp_dport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pool_recv = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_recv);
+ CU_ASSERT(retqueue == queue);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+
+ odp_packet_free(pkt);
+
+ /* Other packets are delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+
+ odp_packet_free(pkt);
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(default_pool);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_tcp_sport(void)
+{
+ odp_packet_t pkt;
+ odph_tcphdr_t *tcp;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_SPORT;
+ mask = 0xffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("tcp_sport", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("tcp_sport");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "tcp_sport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_TCP_SPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ odp_packet_free(pkt);
+
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+
+ odp_packet_free(pkt);
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_udp_dport(void)
+{
+ odp_packet_t pkt;
+ odph_udphdr_t *udp;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_pmr_param_t pmr_param;
+ odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_DPORT;
+ mask = 0xffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("udp_dport", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("udp_dport");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "udp_dport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_UDP_DPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ odp_packet_free(pkt);
+
+ /* Other packets received in default queue */
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+
+ odp_packet_free(pkt);
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_udp_sport(void)
+{
+ odp_packet_t pkt;
+ odph_udphdr_t *udp;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_pmr_param_t pmr_param;
+ odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_SPORT;
+ mask = 0xffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("udp_sport", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("udp_sport");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "udp_sport");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_UDP_SPORT;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ odp_packet_free(pkt);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ odp_packet_free(pkt);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_ipproto(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint8_t val;
+ uint8_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("ipproto", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("ipproto");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "ipproto");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_dmac(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint64_t val;
+ uint64_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_DMAC; /* 48 bit Ethernet Mac address */
+ mask = 0xffffffffffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("dmac", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("dmac");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "dmac");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_DMAC;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = ODPH_ETHADDR_LEN;
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ memset(eth->dst.addr, 0, ODPH_ETHADDR_LEN);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_packet_len(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = 1024;
+ /*Mask value will match any packet of length 1000 - 1099*/
+ mask = 0xff00;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("packet_len", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("packet_len");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "packet_len");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_LEN;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ /* create packet of payload length 1024 */
+ pkt = create_packet_len(pkt_pool, false, &seq, true, 1024);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void classification_test_pmr_pool_set(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint8_t val;
+ uint8_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t pool_new;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("ipproto1", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("ipproto1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "ipproto1");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ pool_new = pool_create("ipproto2");
+ CU_ASSERT_FATAL(pool_new != ODP_POOL_INVALID);
+
+ /* new pool is set on CoS */
+ retval = odp_cls_cos_pool_set(cos, pool_new);
+ CU_ASSERT(retval == 0);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool_new);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_pool_destroy(pool_new);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void classification_test_pmr_queue_set(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint8_t val;
+ uint8_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_queue_t queue_new;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+
+ val = ODPH_IPPROTO_UDP;
+ mask = 0xff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("ipproto1", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("ipproto1");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "ipproto1");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ queue_new = queue_create("ipproto2", true);
+ CU_ASSERT_FATAL(queue_new != ODP_QUEUE_INVALID);
+
+ /* new queue is set on CoS */
+ retval = odp_cos_queue_set(cos, queue_new);
+ CU_ASSERT(retval == 0);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPPROTO;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue_new);
+ odp_packet_free(pkt);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue_new);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+static void classification_test_pmr_term_daddr(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_pool_t pool;
+ odp_pool_t default_pool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ odp_cos_t default_cos;
+ uint32_t addr;
+ uint32_t mask;
+ char cosname[ODP_QUEUE_NAME_LEN];
+ odp_pmr_param_t pmr_param;
+ odp_cls_cos_param_t cls_param;
+ odph_ipv4hdr_t *ip;
+ const char *dst_addr = "10.0.0.99/32";
+ odph_ethhdr_t *eth;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool);
+ retval = start_pktio(pktio);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ queue = queue_create("daddr", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("daddr");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "daddr");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ parse_ipv4_string(dst_addr, &addr, &mask);
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_DIP_ADDR;
+ pmr_param.match.value = &addr;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(addr);
+
+ pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
+ CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
+
+ /* packet with dst ip address matching PMR rule to be
+ received in the CoS queue*/
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ ip->dst_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_cls_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ stop_pktio(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+odp_testinfo_t classification_suite_pmr[] = {
+ ODP_TEST_INFO(classification_test_pmr_term_tcp_dport),
+ ODP_TEST_INFO(classification_test_pmr_term_tcp_sport),
+ ODP_TEST_INFO(classification_test_pmr_term_udp_dport),
+ ODP_TEST_INFO(classification_test_pmr_term_udp_sport),
+ ODP_TEST_INFO(classification_test_pmr_term_ipproto),
+ ODP_TEST_INFO(classification_test_pmr_term_dmac),
+ ODP_TEST_INFO(classification_test_pmr_pool_set),
+ ODP_TEST_INFO(classification_test_pmr_queue_set),
+ ODP_TEST_INFO(classification_test_pmr_term_daddr),
+ ODP_TEST_INFO(classification_test_pmr_term_packet_len),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/classification/odp_classification_tests.c b/test/common_plat/validation/api/classification/odp_classification_tests.c
new file mode 100644
index 000000000..ed45518be
--- /dev/null
+++ b/test/common_plat/validation/api/classification/odp_classification_tests.c
@@ -0,0 +1,699 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "odp_classification_testsuites.h"
+#include "classification.h"
+#include <odp_cunit_common.h>
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
+
+static odp_cos_t cos_list[CLS_ENTRIES];
+static odp_pmr_t pmr_list[CLS_ENTRIES];
+static odp_queue_t queue_list[CLS_ENTRIES];
+static odp_pool_t pool_list[CLS_ENTRIES];
+
+static odp_pool_t pool_default;
+static odp_pktio_t pktio_loop;
+
+/** sequence number of IP packets */
+odp_atomic_u32_t seq;
+
+int classification_suite_init(void)
+{
+ int i;
+ int ret;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+
+ pool_default = pool_create("classification_pool");
+ if (ODP_POOL_INVALID == pool_default) {
+ fprintf(stderr, "Packet pool creation failed.\n");
+ return -1;
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio_loop = odp_pktio_open("loop", pool_default, &pktio_param);
+ if (pktio_loop == ODP_PKTIO_INVALID) {
+ ret = odp_pool_destroy(pool_default);
+ if (ret)
+ fprintf(stderr, "unable to destroy pool.\n");
+ return -1;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ if (odp_pktin_queue_config(pktio_loop, &pktin_param)) {
+ fprintf(stderr, "pktin queue config failed.\n");
+ return -1;
+ }
+
+ if (odp_pktout_queue_config(pktio_loop, NULL)) {
+ fprintf(stderr, "pktout queue config failed.\n");
+ return -1;
+ }
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ cos_list[i] = ODP_COS_INVALID;
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ pmr_list[i] = ODP_PMR_INVAL;
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ queue_list[i] = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ pool_list[i] = ODP_POOL_INVALID;
+
+ odp_atomic_init_u32(&seq, 0);
+
+ ret = odp_pktio_start(pktio_loop);
+ if (ret) {
+ fprintf(stderr, "unable to start loop\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int classification_suite_term(void)
+{
+ int i;
+ int retcode = 0;
+
+ if (0 > stop_pktio(pktio_loop)) {
+ fprintf(stderr, "stop pktio failed.\n");
+ retcode = -1;
+ }
+
+ if (0 > odp_pktio_close(pktio_loop)) {
+ fprintf(stderr, "pktio close failed.\n");
+ retcode = -1;
+ }
+
+ if (0 != odp_pool_destroy(pool_default)) {
+ fprintf(stderr, "pool_default destroy failed.\n");
+ retcode = -1;
+ }
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ odp_cos_destroy(cos_list[i]);
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ odp_cls_pmr_destroy(pmr_list[i]);
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ odp_queue_destroy(queue_list[i]);
+
+ for (i = 0; i < CLS_ENTRIES; i++)
+ odp_pool_destroy(pool_list[i]);
+
+ return retcode;
+}
+
+void configure_cls_pmr_chain(void)
+{
+ /* PKTIO --> PMR_SRC(SRC IP ADDR) --> PMR_DST (TCP SPORT) */
+
+ /* Packet matching only the SRC IP ADDR should be delivered
+ in queue[CLS_PMR_CHAIN_SRC] and a packet matching both SRC IP ADDR and
+ TCP SPORT should be delivered to queue[CLS_PMR_CHAIN_DST] */
+
+ uint16_t val;
+ uint16_t maskport;
+ char cosname[ODP_QUEUE_NAME_LEN];
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+ uint32_t addr;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ odp_queue_capability_t queue_capa;
+
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ qparam.sched.lock_count = queue_capa.max_ordered_locks;
+ sprintf(queuename, "%s", "SrcQueue");
+
+ queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename, &qparam);
+
+ CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_SRC] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "%s", "SrcPool");
+ pool_list[CLS_PMR_CHAIN_SRC] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_PMR_CHAIN_SRC] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "SrcCos");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_PMR_CHAIN_SRC];
+ cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos_list[CLS_PMR_CHAIN_SRC] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_SRC] != ODP_COS_INVALID);
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ sprintf(queuename, "%s", "DstQueue");
+
+ queue_list[CLS_PMR_CHAIN_DST] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_DST] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "%s", "DstPool");
+ pool_list[CLS_PMR_CHAIN_DST] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_PMR_CHAIN_DST] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "DstCos");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_PMR_CHAIN_DST];
+ cls_param.queue = queue_list[CLS_PMR_CHAIN_DST];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_list[CLS_PMR_CHAIN_DST] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_PMR_CHAIN_DST] != ODP_COS_INVALID);
+
+ parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_SIP_ADDR;
+ pmr_param.match.value = &addr;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(addr);
+ pmr_list[CLS_PMR_CHAIN_SRC] =
+ odp_cls_pmr_create(&pmr_param, 1, cos_list[CLS_DEFAULT],
+ cos_list[CLS_PMR_CHAIN_SRC]);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_SRC] != ODP_PMR_INVAL);
+
+ val = CLS_PMR_CHAIN_PORT;
+ maskport = 0xffff;
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &maskport;
+ pmr_param.val_sz = sizeof(val);
+ pmr_list[CLS_PMR_CHAIN_DST] =
+ odp_cls_pmr_create(&pmr_param, 1, cos_list[CLS_PMR_CHAIN_SRC],
+ cos_list[CLS_PMR_CHAIN_DST]);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_DST] != ODP_PMR_INVAL);
+}
+
+void test_cls_pmr_chain(void)
+{
+ odp_packet_t pkt;
+ odph_ipv4hdr_t *ip;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ uint32_t addr = 0;
+ uint32_t mask;
+ uint32_t seqno = 0;
+
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
+ ip->src_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = 0;
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ set_first_supported_pmr_port(pkt, CLS_PMR_CHAIN_PORT);
+
+ enqueue_pktio_interface(pkt, pktio_loop);
+
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_DST]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_PMR_CHAIN_DST]);
+ odp_packet_free(pkt);
+
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ parse_ipv4_string(CLS_PMR_CHAIN_SADDR, &addr, &mask);
+ ip->src_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = 0;
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ enqueue_pktio_interface(pkt, pktio_loop);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(queue == queue_list[CLS_PMR_CHAIN_SRC]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_PMR_CHAIN_SRC]);
+ odp_packet_free(pkt);
+}
+
+void configure_pktio_default_cos(void)
+{
+ int retval;
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+ char cosname[ODP_COS_NAME_LEN];
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ sprintf(queuename, "%s", "DefaultQueue");
+ queue_list[CLS_DEFAULT] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_list[CLS_DEFAULT] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "DefaultPool");
+ pool_list[CLS_DEFAULT] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_DEFAULT] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "DefaultCoS");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_DEFAULT];
+ cls_param.queue = queue_list[CLS_DEFAULT];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_list[CLS_DEFAULT] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_DEFAULT] != ODP_COS_INVALID);
+
+ retval = odp_pktio_default_cos_set(pktio_loop, cos_list[CLS_DEFAULT]);
+ CU_ASSERT(retval == 0);
+}
+
+void test_pktio_default_cos(void)
+{
+ odp_packet_t pkt;
+ odp_queue_t queue;
+ uint32_t seqno = 0;
+ odp_pool_t pool;
+ /* create a default packet */
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio_loop);
+
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ /* Default packet should be received in default queue */
+ CU_ASSERT(queue == queue_list[CLS_DEFAULT]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_DEFAULT]);
+
+ odp_packet_free(pkt);
+}
+
+void configure_pktio_error_cos(void)
+{
+ int retval;
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char cosname[ODP_COS_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_LOWEST;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ sprintf(queuename, "%s", "ErrorCos");
+
+ queue_list[CLS_ERROR] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_list[CLS_ERROR] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "ErrorPool");
+ pool_list[CLS_ERROR] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_ERROR] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "%s", "ErrorCos");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_ERROR];
+ cls_param.queue = queue_list[CLS_ERROR];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_list[CLS_ERROR] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_ERROR] != ODP_COS_INVALID);
+
+ retval = odp_pktio_error_cos_set(pktio_loop, cos_list[CLS_ERROR]);
+ CU_ASSERT(retval == 0);
+}
+
+void test_pktio_error_cos(void)
+{
+ odp_queue_t queue;
+ odp_packet_t pkt;
+ odp_pool_t pool;
+
+ /*Create an error packet */
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+
+ /* Incorrect IpV4 version */
+ ip->ver_ihl = 8 << 4 | ODPH_IPV4HDR_IHL_MIN;
+ ip->chksum = 0;
+ enqueue_pktio_interface(pkt, pktio_loop);
+
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ /* Error packet should be received in error queue */
+ CU_ASSERT(queue == queue_list[CLS_ERROR]);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_ERROR]);
+ odp_packet_free(pkt);
+}
+
+void classification_test_pktio_set_skip(void)
+{
+ int retval;
+ size_t offset = 5;
+
+ retval = odp_pktio_skip_set(pktio_loop, offset);
+ CU_ASSERT(retval == 0);
+
+ retval = odp_pktio_skip_set(ODP_PKTIO_INVALID, offset);
+ CU_ASSERT(retval < 0);
+
+ /* reset skip value to zero as validation suite expects
+ offset to be zero*/
+
+ retval = odp_pktio_skip_set(pktio_loop, 0);
+ CU_ASSERT(retval == 0);
+}
+
+void classification_test_pktio_set_headroom(void)
+{
+ size_t headroom;
+ int retval;
+
+ headroom = 5;
+ retval = odp_pktio_headroom_set(pktio_loop, headroom);
+ CU_ASSERT(retval == 0);
+
+ retval = odp_pktio_headroom_set(ODP_PKTIO_INVALID, headroom);
+ CU_ASSERT(retval < 0);
+}
+
+void configure_cos_with_l2_priority(void)
+{
+ uint8_t num_qos = CLS_L2_QOS_MAX;
+ odp_cos_t cos_tbl[CLS_L2_QOS_MAX];
+ odp_queue_t queue_tbl[CLS_L2_QOS_MAX];
+ odp_pool_t pool;
+ uint8_t qos_tbl[CLS_L2_QOS_MAX];
+ char cosname[ODP_COS_NAME_LEN];
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+ int retval;
+ int i;
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+
+ /** Initialize scalar variable qos_tbl **/
+ for (i = 0; i < CLS_L2_QOS_MAX; i++)
+ qos_tbl[i] = 0;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ for (i = 0; i < num_qos; i++) {
+ qparam.sched.prio = ODP_SCHED_PRIO_LOWEST - i;
+ sprintf(queuename, "%s_%d", "L2_Queue", i);
+ queue_tbl[i] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
+ queue_list[CLS_L2_QOS_0 + i] = queue_tbl[i];
+
+ sprintf(poolname, "%s_%d", "L2_Pool", i);
+ pool = pool_create(poolname);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pool_list[CLS_L2_QOS_0 + i] = pool;
+
+ sprintf(cosname, "%s_%d", "L2_Cos", i);
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue_tbl[i];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_tbl[i] = odp_cls_cos_create(cosname, &cls_param);
+ if (cos_tbl[i] == ODP_COS_INVALID)
+ break;
+
+ cos_list[CLS_L2_QOS_0 + i] = cos_tbl[i];
+ qos_tbl[i] = i;
+ }
+ /* count 'i' is passed instead of num_qos to handle the rare scenario
+ if the odp_cls_cos_create() failed in the middle*/
+ retval = odp_cos_with_l2_priority(pktio_loop, i, qos_tbl, cos_tbl);
+ CU_ASSERT(retval == 0);
+}
+
+void test_cos_with_l2_priority(void)
+{
+ odp_packet_t pkt;
+ odph_ethhdr_t *ethhdr;
+ odph_vlanhdr_t *vlan;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ uint32_t seqno = 0;
+ uint8_t i;
+
+ for (i = 0; i < CLS_L2_QOS_MAX; i++) {
+ pkt = create_packet(pool_default, true, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan = (odph_vlanhdr_t *)(ethhdr + 1);
+ vlan->tci = odp_cpu_to_be_16(i << 13);
+ enqueue_pktio_interface(pkt, pktio_loop);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(queue == queue_list[CLS_L2_QOS_0 + i]);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_L2_QOS_0 + i]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ odp_packet_free(pkt);
+ }
+}
+
+void configure_pmr_cos(void)
+{
+ uint16_t val;
+ uint16_t mask;
+ odp_pmr_param_t pmr_param;
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+ char cosname[ODP_COS_NAME_LEN];
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ sprintf(queuename, "%s", "PMR_CoS");
+
+ queue_list[CLS_PMR] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_list[CLS_PMR] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "PMR_Pool");
+ pool_list[CLS_PMR] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_PMR] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "PMR_CoS");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_PMR];
+ cls_param.queue = queue_list[CLS_PMR];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_list[CLS_PMR] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_PMR] != ODP_COS_INVALID);
+
+ val = CLS_PMR_PORT;
+ mask = 0xffff;
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = find_first_supported_l3_pmr();
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pmr_list[CLS_PMR] = odp_cls_pmr_create(&pmr_param, 1,
+ cos_list[CLS_DEFAULT],
+ cos_list[CLS_PMR]);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVAL);
+}
+
+void test_pmr_cos(void)
+{
+ odp_packet_t pkt;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ uint32_t seqno = 0;
+
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ set_first_supported_pmr_port(pkt, CLS_PMR_PORT);
+ enqueue_pktio_interface(pkt, pktio_loop);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(queue == queue_list[CLS_PMR]);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_PMR]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ odp_packet_free(pkt);
+}
+
+void configure_pktio_pmr_composite(void)
+{
+ odp_pmr_param_t pmr_params[2];
+ uint16_t val;
+ uint16_t maskport;
+ int num_terms = 2; /* one pmr for each L3 and L4 */
+ odp_queue_param_t qparam;
+ odp_cls_cos_param_t cls_param;
+ char cosname[ODP_COS_NAME_LEN];
+ char queuename[ODP_QUEUE_NAME_LEN];
+ char poolname[ODP_POOL_NAME_LEN];
+ uint32_t addr = 0;
+ uint32_t mask;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparam.sched.group = ODP_SCHED_GROUP_ALL;
+ sprintf(queuename, "%s", "cos_pmr_composite_queue");
+
+ queue_list[CLS_PMR_SET] = odp_queue_create(queuename, &qparam);
+ CU_ASSERT_FATAL(queue_list[CLS_PMR_SET] != ODP_QUEUE_INVALID);
+
+ sprintf(poolname, "cos_pmr_composite_pool");
+ pool_list[CLS_PMR_SET] = pool_create(poolname);
+ CU_ASSERT_FATAL(pool_list[CLS_PMR_SET] != ODP_POOL_INVALID);
+
+ sprintf(cosname, "cos_pmr_composite");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool_list[CLS_PMR_SET];
+ cls_param.queue = queue_list[CLS_PMR_SET];
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+ cos_list[CLS_PMR_SET] = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos_list[CLS_PMR_SET] != ODP_COS_INVALID);
+
+ parse_ipv4_string(CLS_PMR_SET_SADDR, &addr, &mask);
+ odp_cls_pmr_param_init(&pmr_params[0]);
+ pmr_params[0].term = ODP_PMR_SIP_ADDR;
+ pmr_params[0].match.value = &addr;
+ pmr_params[0].match.mask = &mask;
+ pmr_params[0].val_sz = sizeof(addr);
+
+ val = CLS_PMR_SET_PORT;
+ maskport = 0xffff;
+ odp_cls_pmr_param_init(&pmr_params[1]);
+ pmr_params[1].term = find_first_supported_l3_pmr();
+ pmr_params[1].match.value = &val;
+ pmr_params[1].match.mask = &maskport;
+ pmr_params[1].range_term = false;
+ pmr_params[1].val_sz = sizeof(val);
+
+ pmr_list[CLS_PMR_SET] = odp_cls_pmr_create(pmr_params, num_terms,
+ cos_list[CLS_DEFAULT],
+ cos_list[CLS_PMR_SET]);
+ CU_ASSERT_FATAL(pmr_list[CLS_PMR_SET] != ODP_PMR_INVAL);
+}
+
+void test_pktio_pmr_composite_cos(void)
+{
+ uint32_t addr = 0;
+ uint32_t mask;
+ odph_ipv4hdr_t *ip;
+ odp_packet_t pkt;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ uint32_t seqno = 0;
+
+ pkt = create_packet(pool_default, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ parse_ipv4_string(CLS_PMR_SET_SADDR, &addr, &mask);
+ ip->src_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = 0;
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ set_first_supported_pmr_port(pkt, CLS_PMR_SET_PORT);
+ enqueue_pktio_interface(pkt, pktio_loop);
+ pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(queue == queue_list[CLS_PMR_SET]);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_list[CLS_PMR_SET]);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ odp_packet_free(pkt);
+}
+
+void classification_test_pktio_configure(void)
+{
+ /* Configure the Different CoS for the pktio interface */
+ if (TEST_DEFAULT)
+ configure_pktio_default_cos();
+ if (TEST_ERROR)
+ configure_pktio_error_cos();
+ if (TEST_PMR_CHAIN)
+ configure_cls_pmr_chain();
+ if (TEST_L2_QOS)
+ configure_cos_with_l2_priority();
+ if (TEST_PMR)
+ configure_pmr_cos();
+ if (TEST_PMR_SET)
+ configure_pktio_pmr_composite();
+}
+
+void classification_test_pktio_test(void)
+{
+ /* Test Different CoS on the pktio interface */
+ if (TEST_DEFAULT)
+ test_pktio_default_cos();
+ if (TEST_ERROR)
+ test_pktio_error_cos();
+ if (TEST_PMR_CHAIN)
+ test_cls_pmr_chain();
+ if (TEST_L2_QOS)
+ test_cos_with_l2_priority();
+ if (TEST_PMR)
+ test_pmr_cos();
+ if (TEST_PMR_SET)
+ test_pktio_pmr_composite_cos();
+}
+
+odp_testinfo_t classification_suite[] = {
+ ODP_TEST_INFO(classification_test_pktio_set_skip),
+ ODP_TEST_INFO(classification_test_pktio_set_headroom),
+ ODP_TEST_INFO(classification_test_pktio_configure),
+ ODP_TEST_INFO(classification_test_pktio_test),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/classification/odp_classification_testsuites.h b/test/common_plat/validation/api/classification/odp_classification_testsuites.h
new file mode 100644
index 000000000..aea3de1b1
--- /dev/null
+++ b/test/common_plat/validation/api/classification/odp_classification_testsuites.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_CLASSIFICATION_TESTSUITES_H_
+#define ODP_CLASSIFICATION_TESTSUITES_H_
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <stdbool.h>
+
+extern odp_testinfo_t classification_suite[];
+extern odp_testinfo_t classification_suite_basic[];
+extern odp_testinfo_t classification_suite_pmr[];
+
+int classification_suite_init(void);
+int classification_suite_term(void);
+
+int classification_suite_pmr_term(void);
+int classification_suite_pmr_init(void);
+
+odp_packet_t create_packet(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool udp);
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len);
+int cls_pkt_set_seq(odp_packet_t pkt);
+uint32_t cls_pkt_get_seq(odp_packet_t pkt);
+odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool);
+void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
+ odp_queue_t *queue, odp_pool_t *pool);
+int parse_ipv4_string(const char *ipaddress, uint32_t *addr, uint32_t *mask);
+void enqueue_pktio_interface(odp_packet_t pkt, odp_pktio_t pktio);
+odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns);
+odp_pool_t pool_create(const char *poolname);
+odp_queue_t queue_create(const char *queuename, bool sched);
+void configure_pktio_default_cos(void);
+void test_pktio_default_cos(void);
+void configure_pktio_error_cos(void);
+void test_pktio_error_cos(void);
+void configure_cls_pmr_chain(void);
+void test_cls_pmr_chain(void);
+void configure_cos_with_l2_priority(void);
+void test_cos_with_l2_priority(void);
+void configure_pmr_cos(void);
+void test_pmr_cos(void);
+void configure_pktio_pmr_composite(void);
+void test_pktio_pmr_composite_cos(void);
+int stop_pktio(odp_pktio_t pktio);
+odp_cls_pmr_term_t find_first_supported_l3_pmr(void);
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
+
+#endif /* ODP_BUFFER_TESTSUITES_H_ */
diff --git a/test/common_plat/validation/api/cpumask/.gitignore b/test/common_plat/validation/api/cpumask/.gitignore
new file mode 100644
index 000000000..655a1640f
--- /dev/null
+++ b/test/common_plat/validation/api/cpumask/.gitignore
@@ -0,0 +1 @@
+cpumask_main
diff --git a/test/common_plat/validation/api/cpumask/Makefile.am b/test/common_plat/validation/api/cpumask/Makefile.am
new file mode 100644
index 000000000..ec5fce338
--- /dev/null
+++ b/test/common_plat/validation/api/cpumask/Makefile.am
@@ -0,0 +1,11 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestcpumask.la
+libtestcpumask_la_SOURCES = cpumask.c
+libtestcpumask_la_LIBADD = $(LIBCPUMASK_COMMON)
+
+test_PROGRAMS = cpumask_main$(EXEEXT)
+dist_cpumask_main_SOURCES = cpumask_main.c
+cpumask_main_LDADD = libtestcpumask.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = cpumask.h
diff --git a/test/common_plat/validation/api/cpumask/cpumask.c b/test/common_plat/validation/api/cpumask/cpumask.c
new file mode 100644
index 000000000..a0cb559fb
--- /dev/null
+++ b/test/common_plat/validation/api/cpumask/cpumask.c
@@ -0,0 +1,116 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+
+#include "odp_cunit_common.h"
+#include "cpumask.h"
+#include "mask_common.h"
+
+/* default worker parameter to get all that may be available */
+#define ALL_AVAILABLE 0
+
+void cpumask_test_odp_cpumask_def_control(void)
+{
+ unsigned num;
+ unsigned mask_count;
+ unsigned max_cpus = mask_capacity();
+ odp_cpumask_t mask;
+
+ num = odp_cpumask_default_control(&mask, ALL_AVAILABLE);
+ mask_count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(mask_count == num);
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= max_cpus);
+}
+
+void cpumask_test_odp_cpumask_def_worker(void)
+{
+ unsigned num;
+ unsigned mask_count;
+ unsigned max_cpus = mask_capacity();
+ odp_cpumask_t mask;
+
+ num = odp_cpumask_default_worker(&mask, ALL_AVAILABLE);
+ mask_count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(mask_count == num);
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= max_cpus);
+}
+
+void cpumask_test_odp_cpumask_def(void)
+{
+ unsigned mask_count;
+ unsigned num_worker;
+ unsigned num_control;
+ unsigned max_cpus = mask_capacity();
+ unsigned available_cpus = odp_cpu_count();
+ unsigned requested_cpus;
+ odp_cpumask_t mask;
+
+ CU_ASSERT(available_cpus <= max_cpus);
+
+ if (available_cpus > 1)
+ requested_cpus = available_cpus - 1;
+ else
+ requested_cpus = available_cpus;
+ num_worker = odp_cpumask_default_worker(&mask, requested_cpus);
+ mask_count = odp_cpumask_count(&mask);
+ CU_ASSERT(mask_count == num_worker);
+
+ num_control = odp_cpumask_default_control(&mask, 1);
+ mask_count = odp_cpumask_count(&mask);
+ CU_ASSERT(mask_count == num_control);
+
+ CU_ASSERT(num_control >= 1);
+ CU_ASSERT(num_worker <= available_cpus);
+ CU_ASSERT(num_worker > 0);
+}
+
+odp_testinfo_t cpumask_suite[] = {
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_to_from_str),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_equal),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_zero),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_set),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_clr),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_isset),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_count),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_and),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_or),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_xor),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_copy),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_first),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_last),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_next),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_setall),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def_control),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def_worker),
+ ODP_TEST_INFO(cpumask_test_odp_cpumask_def),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t cpumask_suites[] = {
+ {"Cpumask", NULL, NULL, cpumask_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int cpumask_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(cpumask_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/cpumask/cpumask.h b/test/common_plat/validation/api/cpumask/cpumask.h
new file mode 100644
index 000000000..87a4512bf
--- /dev/null
+++ b/test/common_plat/validation/api/cpumask/cpumask.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_CPUMASK_H_
+#define _ODP_TEST_CPUMASK_H_
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+/* test functions: */
+#include "mask_common.h"
+void cpumask_test_odp_cpumask_def_control(void);
+void cpumask_test_odp_cpumask_def_worker(void);
+void cpumask_test_odp_cpumask_def(void);
+
+/* test arrays: */
+extern odp_testinfo_t cpumask_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t cpumask_suites[];
+
+/* main test program: */
+int cpumask_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/cpumask/cpumask_main.c b/test/common_plat/validation/api/cpumask/cpumask_main.c
new file mode 100644
index 000000000..39e3171ca
--- /dev/null
+++ b/test/common_plat/validation/api/cpumask/cpumask_main.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "cpumask.h"
+
+int main(int argc, char *argv[])
+{
+ return cpumask_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/crypto/.gitignore b/test/common_plat/validation/api/crypto/.gitignore
new file mode 100644
index 000000000..0ac55e35e
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/.gitignore
@@ -0,0 +1 @@
+crypto_main
diff --git a/test/common_plat/validation/api/crypto/Makefile.am b/test/common_plat/validation/api/crypto/Makefile.am
new file mode 100644
index 000000000..3ea41b41f
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/Makefile.am
@@ -0,0 +1,11 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestcrypto.la
+libtestcrypto_la_SOURCES = crypto.c \
+ odp_crypto_test_inp.c
+
+test_PROGRAMS = crypto_main$(EXEEXT)
+dist_crypto_main_SOURCES = crypto_main.c
+crypto_main_LDADD = libtestcrypto.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = crypto.h odp_crypto_test_inp.h test_vectors.h test_vectors_len.h
diff --git a/test/common_plat/validation/api/crypto/crypto.c b/test/common_plat/validation/api/crypto/crypto.c
new file mode 100644
index 000000000..8946cde62
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/crypto.c
@@ -0,0 +1,121 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "odp_crypto_test_inp.h"
+#include "crypto.h"
+
+#define SHM_PKT_POOL_SIZE (512 * 2048 * 2)
+#define SHM_PKT_POOL_BUF_SIZE (1024 * 32)
+
+#define SHM_COMPL_POOL_SIZE (128 * 1024)
+#define SHM_COMPL_POOL_BUF_SIZE 128
+
+odp_suiteinfo_t crypto_suites[] = {
+ {ODP_CRYPTO_SYNC_INP, crypto_suite_sync_init, NULL, crypto_suite},
+ {ODP_CRYPTO_ASYNC_INP, crypto_suite_async_init, NULL, crypto_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int crypto_init(odp_instance_t *inst)
+{
+ odp_pool_param_t params;
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+ odp_pool_capability_t pool_capa;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ fprintf(stderr, "error: odp_pool_capability() failed.\n");
+ return -1;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE;
+ params.type = ODP_POOL_PACKET;
+
+ if (SHM_PKT_POOL_BUF_SIZE > pool_capa.pkt.max_len)
+ params.pkt.len = pool_capa.pkt.max_len;
+
+ pool = odp_pool_create("packet_pool", &params);
+
+ if (ODP_POOL_INVALID == pool) {
+ fprintf(stderr, "Packet pool creation failed.\n");
+ return -1;
+ }
+ out_queue = odp_queue_create("crypto-out", NULL);
+ if (ODP_QUEUE_INVALID == out_queue) {
+ fprintf(stderr, "Crypto outq creation failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int crypto_term(odp_instance_t inst)
+{
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+
+ out_queue = odp_queue_lookup("crypto-out");
+ if (ODP_QUEUE_INVALID != out_queue) {
+ if (odp_queue_destroy(out_queue))
+ fprintf(stderr, "Crypto outq destroy failed.\n");
+ } else {
+ fprintf(stderr, "Crypto outq not found.\n");
+ }
+
+ pool = odp_pool_lookup("packet_pool");
+ if (ODP_POOL_INVALID != pool) {
+ if (odp_pool_destroy(pool))
+ fprintf(stderr, "Packet pool destroy failed.\n");
+ } else {
+ fprintf(stderr, "Packet pool not found.\n");
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int crypto_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(crypto_init);
+ odp_cunit_register_global_term(crypto_term);
+
+ ret = odp_cunit_register(crypto_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/crypto/crypto.h b/test/common_plat/validation/api/crypto/crypto.h
new file mode 100644
index 000000000..9b909aa04
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/crypto.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_CRYPTO_H_
+#define _ODP_TEST_CRYPTO_H_
+
+#include "odp_cunit_common.h"
+
+/* test functions: */
+void crypto_test_enc_alg_3des_cbc(void);
+void crypto_test_enc_alg_3des_cbc_ovr_iv(void);
+void crypto_test_dec_alg_3des_cbc(void);
+void crypto_test_dec_alg_3des_cbc_ovr_iv(void);
+void crypto_test_enc_alg_aes128_cbc(void);
+void crypto_test_enc_alg_aes128_cbc_ovr_iv(void);
+void crypto_test_dec_alg_aes128_cbc(void);
+void crypto_test_dec_alg_aes128_cbc_ovr_iv(void);
+void crypto_test_enc_alg_aes128_gcm(void);
+void crypto_test_enc_alg_aes128_gcm_ovr_iv(void);
+void crypto_test_dec_alg_aes128_gcm(void);
+void crypto_test_dec_alg_aes128_gcm_ovr_iv(void);
+void crypto_test_alg_hmac_md5(void);
+void crypto_test_alg_hmac_sha256(void);
+
+/* test arrays: */
+extern odp_testinfo_t crypto_suite[];
+
+/* test array init/term functions: */
+int crypto_suite_sync_init(void);
+int crypto_suite_async_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t crypto_suites[];
+
+/* executable init/term functions: */
+int crypto_init(odp_instance_t *inst);
+int crypto_term(odp_instance_t inst);
+
+/* main test program: */
+int crypto_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/crypto/crypto_main.c b/test/common_plat/validation/api/crypto/crypto_main.c
new file mode 100644
index 000000000..d8c26fa25
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/crypto_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "crypto.h"
+
+int main(int argc, char *argv[])
+{
+ return crypto_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
new file mode 100644
index 000000000..4ac4a0700
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
@@ -0,0 +1,726 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include "test_vectors.h"
+#include "odp_crypto_test_inp.h"
+#include "crypto.h"
+
+struct suite_context_s {
+ odp_crypto_op_mode_t pref_mode;
+ odp_pool_t pool;
+ odp_queue_t queue;
+};
+
+static struct suite_context_s suite_context;
+
+/* Basic algorithm run function for async inplace mode.
+ * Creates a session from input parameters and runs one operation
+ * on input_vec. Checks the output of the crypto operation against
+ * output_vec. Operation completion event is dequeued polling the
+ * session output queue. Completion context pointer is retrieved
+ * and checked against the one set before the operation.
+ * Completion event can be a separate buffer or the input packet
+ * buffer can be used.
+ * */
+static void alg_test(odp_crypto_op_t op,
+ odp_cipher_alg_t cipher_alg,
+ odp_crypto_iv_t ses_iv,
+ uint8_t *op_iv_ptr,
+ odp_crypto_key_t cipher_key,
+ odp_auth_alg_t auth_alg,
+ odp_crypto_key_t auth_key,
+ odp_crypto_data_range_t *cipher_range,
+ odp_crypto_data_range_t *auth_range,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len,
+ const uint8_t *ciphertext,
+ unsigned int ciphertext_len,
+ const uint8_t *digest,
+ unsigned int digest_len
+ )
+{
+ odp_crypto_session_t session;
+ odp_crypto_capability_t capability;
+ int rc;
+ odp_crypto_ses_create_err_t status;
+ odp_bool_t posted;
+ odp_event_t event;
+ odp_crypto_compl_t compl_event;
+ odp_crypto_op_result_t result;
+ odp_crypto_session_params_t ses_params;
+ odp_crypto_op_params_t op_params;
+ uint8_t *data_addr;
+ int data_off;
+
+ rc = odp_crypto_capability(&capability);
+ CU_ASSERT(!rc);
+
+ if (capability.hw_ciphers.all_bits) {
+ if (cipher_alg == ODP_CIPHER_ALG_3DES_CBC &&
+ !(capability.hw_ciphers.bit.trides_cbc))
+ rc = -1;
+ if (cipher_alg == ODP_CIPHER_ALG_AES128_CBC &&
+ !(capability.hw_ciphers.bit.aes128_cbc))
+ rc = -1;
+ if (cipher_alg == ODP_CIPHER_ALG_AES128_GCM &&
+ !(capability.hw_ciphers.bit.aes128_gcm))
+ rc = -1;
+ } else {
+ if (cipher_alg == ODP_CIPHER_ALG_3DES_CBC &&
+ !(capability.ciphers.bit.trides_cbc))
+ rc = -1;
+ if (cipher_alg == ODP_CIPHER_ALG_AES128_CBC &&
+ !(capability.ciphers.bit.aes128_cbc))
+ rc = -1;
+ if (cipher_alg == ODP_CIPHER_ALG_AES128_GCM &&
+ !(capability.ciphers.bit.aes128_gcm))
+ rc = -1;
+ }
+
+ CU_ASSERT(!rc);
+
+ if (capability.hw_auths.all_bits) {
+ if (auth_alg == ODP_AUTH_ALG_AES128_GCM &&
+ !(capability.hw_auths.bit.aes128_gcm))
+ rc = -1;
+ if (auth_alg == ODP_AUTH_ALG_NULL &&
+ !(capability.hw_auths.bit.null))
+ rc = -1;
+ } else {
+ if (auth_alg == ODP_AUTH_ALG_AES128_GCM &&
+ !(capability.auths.bit.aes128_gcm))
+ rc = -1;
+ if (auth_alg == ODP_AUTH_ALG_NULL &&
+ !(capability.auths.bit.null))
+ rc = -1;
+ }
+
+ CU_ASSERT(!rc);
+
+ /* Create a crypto session */
+ memset(&ses_params, 0, sizeof(ses_params));
+ ses_params.op = op;
+ ses_params.auth_cipher_text = false;
+ ses_params.pref_mode = suite_context.pref_mode;
+ ses_params.cipher_alg = cipher_alg;
+ ses_params.auth_alg = auth_alg;
+ ses_params.compl_queue = suite_context.queue;
+ ses_params.output_pool = suite_context.pool;
+ ses_params.cipher_key = cipher_key;
+ ses_params.iv = ses_iv;
+ ses_params.auth_key = auth_key;
+
+ rc = odp_crypto_session_create(&ses_params, &session, &status);
+ CU_ASSERT_FATAL(!rc);
+ CU_ASSERT(status == ODP_CRYPTO_SES_CREATE_ERR_NONE);
+ CU_ASSERT(odp_crypto_session_to_u64(session) !=
+ odp_crypto_session_to_u64(ODP_CRYPTO_SESSION_INVALID));
+
+ /* Prepare input data */
+ odp_packet_t pkt = odp_packet_alloc(suite_context.pool,
+ plaintext_len + digest_len);
+ CU_ASSERT(pkt != ODP_PACKET_INVALID);
+ data_addr = odp_packet_data(pkt);
+ memcpy(data_addr, plaintext, plaintext_len);
+ data_off = 0;
+
+ /* Prepare input/output params */
+ memset(&op_params, 0, sizeof(op_params));
+ op_params.session = session;
+ op_params.pkt = pkt;
+ op_params.out_pkt = pkt;
+ op_params.ctx = (void *)0xdeadbeef;
+
+ if (cipher_range) {
+ op_params.cipher_range = *cipher_range;
+ data_off = cipher_range->offset;
+ } else {
+ op_params.cipher_range.offset = data_off;
+ op_params.cipher_range.length = plaintext_len;
+ }
+ if (auth_range) {
+ op_params.auth_range = *auth_range;
+ } else {
+ op_params.auth_range.offset = data_off;
+ op_params.auth_range.length = plaintext_len;
+ }
+ if (op_iv_ptr)
+ op_params.override_iv_ptr = op_iv_ptr;
+
+ op_params.hash_result_offset = plaintext_len;
+
+ rc = odp_crypto_operation(&op_params, &posted, &result);
+ if (rc < 0) {
+ CU_FAIL("Failed odp_crypto_operation()");
+ goto cleanup;
+ }
+
+ if (posted) {
+ /* Poll completion queue for results */
+ do {
+ event = odp_queue_deq(suite_context.queue);
+ } while (event == ODP_EVENT_INVALID);
+
+ compl_event = odp_crypto_compl_from_event(event);
+ CU_ASSERT(odp_crypto_compl_to_u64(compl_event) ==
+ odp_crypto_compl_to_u64(odp_crypto_compl_from_event(event)));
+ odp_crypto_compl_result(compl_event, &result);
+ odp_crypto_compl_free(compl_event);
+ }
+
+ CU_ASSERT(result.ok);
+ CU_ASSERT(result.pkt == pkt);
+
+ if (cipher_alg != ODP_CIPHER_ALG_NULL)
+ CU_ASSERT(!memcmp(data_addr, ciphertext, ciphertext_len));
+
+ if (op == ODP_CRYPTO_OP_ENCODE && auth_alg != ODP_AUTH_ALG_NULL)
+ CU_ASSERT(!memcmp(data_addr + op_params.hash_result_offset,
+ digest, digest_len));
+
+ CU_ASSERT(result.ctx == (void *)0xdeadbeef);
+cleanup:
+ rc = odp_crypto_session_destroy(session);
+ CU_ASSERT(!rc);
+
+ odp_packet_free(pkt);
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the session IV.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.*/
+void crypto_test_enc_alg_3des_cbc(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv;
+ unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
+ sizeof(tdes_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = tdes_cbc_reference_key[i];
+ cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
+ iv.data = tdes_cbc_reference_iv[i];
+ iv.length = sizeof(tdes_cbc_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_3DES_CBC,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ tdes_cbc_reference_plaintext[i],
+ tdes_cbc_reference_length[i],
+ tdes_cbc_reference_ciphertext[i],
+ tdes_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the operation IV.
+ * */
+void crypto_test_enc_alg_3des_cbc_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = TDES_CBC_IV_LEN };
+ unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
+ sizeof(tdes_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = tdes_cbc_reference_key[i];
+ cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_3DES_CBC,
+ iv,
+ tdes_cbc_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ tdes_cbc_reference_plaintext[i],
+ tdes_cbc_reference_length[i],
+ tdes_cbc_reference_ciphertext[i],
+ tdes_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_3des_cbc(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
+ unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
+ sizeof(tdes_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = tdes_cbc_reference_key[i];
+ cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
+ iv.data = tdes_cbc_reference_iv[i];
+ iv.length = sizeof(tdes_cbc_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_3DES_CBC,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ tdes_cbc_reference_ciphertext[i],
+ tdes_cbc_reference_length[i],
+ tdes_cbc_reference_plaintext[i],
+ tdes_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_3des_cbc_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = TDES_CBC_IV_LEN };
+ unsigned int test_vec_num = (sizeof(tdes_cbc_reference_length) /
+ sizeof(tdes_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = tdes_cbc_reference_key[i];
+ cipher_key.length = sizeof(tdes_cbc_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_3DES_CBC,
+ iv,
+ tdes_cbc_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ tdes_cbc_reference_ciphertext[i],
+ tdes_cbc_reference_length[i],
+ tdes_cbc_reference_plaintext[i],
+ tdes_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for AES128_GCM algorithm. IV for the operation is the session IV.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.*/
+void crypto_test_enc_alg_aes128_gcm(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
+ sizeof(aes128_gcm_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_gcm_reference_key[i];
+ cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
+ iv.data = aes128_gcm_reference_iv[i];
+ iv.length = sizeof(aes128_gcm_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_AES128_GCM,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_AES128_GCM,
+ auth_key,
+ &aes128_gcm_cipher_range[i],
+ &aes128_gcm_auth_range[i],
+ aes128_gcm_reference_plaintext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i] +
+ aes128_gcm_reference_length[i],
+ AES128_GCM_CHECK_LEN);
+ }
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for AES128_GCM algorithm. IV for the operation is the session IV.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.*/
+void crypto_test_enc_alg_aes128_gcm_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
+ sizeof(aes128_gcm_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_gcm_reference_key[i];
+ cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_AES128_GCM,
+ iv,
+ aes128_gcm_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_AES128_GCM,
+ auth_key,
+ &aes128_gcm_cipher_range[i],
+ &aes128_gcm_auth_range[i],
+ aes128_gcm_reference_plaintext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i] +
+ aes128_gcm_reference_length[i],
+ AES128_GCM_CHECK_LEN);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_aes128_gcm(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
+ sizeof(aes128_gcm_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_gcm_reference_key[i];
+ cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
+ iv.data = aes128_gcm_reference_iv[i];
+ iv.length = sizeof(aes128_gcm_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_AES128_GCM,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_AES128_GCM,
+ auth_key,
+ &aes128_gcm_cipher_range[i],
+ &aes128_gcm_auth_range[i],
+ aes128_gcm_reference_ciphertext[i],
+ aes128_gcm_reference_length[i] + AES128_GCM_CHECK_LEN,
+ aes128_gcm_reference_plaintext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i] +
+ aes128_gcm_reference_length[i],
+ AES128_GCM_CHECK_LEN);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for 3DES_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_aes128_gcm_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_GCM_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_gcm_reference_length) /
+ sizeof(aes128_gcm_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_gcm_reference_key[i];
+ cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_AES128_GCM,
+ iv,
+ aes128_gcm_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_AES128_GCM,
+ auth_key,
+ &aes128_gcm_cipher_range[i],
+ &aes128_gcm_auth_range[i],
+ aes128_gcm_reference_ciphertext[i],
+ aes128_gcm_reference_length[i] + AES128_GCM_CHECK_LEN,
+ aes128_gcm_reference_plaintext[i],
+ aes128_gcm_reference_length[i],
+ aes128_gcm_reference_ciphertext[i] +
+ aes128_gcm_reference_length[i],
+ AES128_GCM_CHECK_LEN);
+ }
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for AES128_CBC algorithm. IV for the operation is the session IV.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.*/
+void crypto_test_enc_alg_aes128_cbc(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv;
+ unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
+ sizeof(aes128_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_cbc_reference_key[i];
+ cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
+ iv.data = aes128_cbc_reference_iv[i];
+ iv.length = sizeof(aes128_cbc_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_AES128_CBC,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ aes128_cbc_reference_plaintext[i],
+ aes128_cbc_reference_length[i],
+ aes128_cbc_reference_ciphertext[i],
+ aes128_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of encode (plaintext -> ciphertext)
+ * operation for AES128_CBC algorithm. IV for the operation is the operation IV.
+ * */
+void crypto_test_enc_alg_aes128_cbc_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_CBC_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
+ sizeof(aes128_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_cbc_reference_key[i];
+ cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_AES128_CBC,
+ iv,
+ aes128_cbc_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ aes128_cbc_reference_plaintext[i],
+ aes128_cbc_reference_length[i],
+ aes128_cbc_reference_ciphertext[i],
+ aes128_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for AES128_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_aes128_cbc(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
+ unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
+ sizeof(aes128_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_cbc_reference_key[i];
+ cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
+ iv.data = aes128_cbc_reference_iv[i];
+ iv.length = sizeof(aes128_cbc_reference_iv[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_AES128_CBC,
+ iv,
+ NULL,
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ aes128_cbc_reference_ciphertext[i],
+ aes128_cbc_reference_length[i],
+ aes128_cbc_reference_plaintext[i],
+ aes128_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of decode (ciphertext -> plaintext)
+ * operation for AES128_CBC algorithm. IV for the operation is the session IV
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_dec_alg_aes128_cbc_ovr_iv(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = AES128_CBC_IV_LEN };
+ unsigned int test_vec_num = (sizeof(aes128_cbc_reference_length) /
+ sizeof(aes128_cbc_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ cipher_key.data = aes128_cbc_reference_key[i];
+ cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_DECODE,
+ ODP_CIPHER_ALG_AES128_CBC,
+ iv,
+ aes128_cbc_reference_iv[i],
+ cipher_key,
+ ODP_AUTH_ALG_NULL,
+ auth_key,
+ NULL, NULL,
+ aes128_cbc_reference_ciphertext[i],
+ aes128_cbc_reference_length[i],
+ aes128_cbc_reference_plaintext[i],
+ aes128_cbc_reference_length[i], NULL, 0);
+ }
+}
+
+/* This test verifies the correctness of HMAC_MD5 digest operation.
+ * The output check length is truncated to 12 bytes (96 bits) as
+ * returned by the crypto operation API call.
+ * Note that hash digest is a one-way operation.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_alg_hmac_md5(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
+
+ unsigned int test_vec_num = (sizeof(hmac_md5_reference_length) /
+ sizeof(hmac_md5_reference_length[0]));
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ auth_key.data = hmac_md5_reference_key[i];
+ auth_key.length = sizeof(hmac_md5_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_NULL,
+ iv,
+ iv.data,
+ cipher_key,
+ ODP_AUTH_ALG_MD5_96,
+ auth_key,
+ NULL, NULL,
+ hmac_md5_reference_plaintext[i],
+ hmac_md5_reference_length[i],
+ NULL, 0,
+ hmac_md5_reference_digest[i],
+ HMAC_MD5_96_CHECK_LEN);
+ }
+}
+
+/* This test verifies the correctness of HMAC_MD5 digest operation.
+ * The output check length is truncated to 12 bytes (96 bits) as
+ * returned by the crypto operation API call.
+ * Note that hash digest is a one-way operation.
+ * In addition the test verifies if the implementation can use the
+ * packet buffer as completion event buffer.
+ * */
+void crypto_test_alg_hmac_sha256(void)
+{
+ odp_crypto_key_t cipher_key = { .data = NULL, .length = 0 },
+ auth_key = { .data = NULL, .length = 0 };
+ odp_crypto_iv_t iv = { .data = NULL, .length = 0 };
+
+ unsigned int test_vec_num = (sizeof(hmac_sha256_reference_length) /
+ sizeof(hmac_sha256_reference_length[0]));
+
+ unsigned int i;
+
+ for (i = 0; i < test_vec_num; i++) {
+ auth_key.data = hmac_sha256_reference_key[i];
+ auth_key.length = sizeof(hmac_sha256_reference_key[i]);
+
+ alg_test(ODP_CRYPTO_OP_ENCODE,
+ ODP_CIPHER_ALG_NULL,
+ iv,
+ iv.data,
+ cipher_key,
+ ODP_AUTH_ALG_SHA256_128,
+ auth_key,
+ NULL, NULL,
+ hmac_sha256_reference_plaintext[i],
+ hmac_sha256_reference_length[i],
+ NULL, 0,
+ hmac_sha256_reference_digest[i],
+ HMAC_SHA256_128_CHECK_LEN);
+ }
+}
+
+int crypto_suite_sync_init(void)
+{
+ suite_context.pool = odp_pool_lookup("packet_pool");
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ suite_context.queue = ODP_QUEUE_INVALID;
+ suite_context.pref_mode = ODP_CRYPTO_SYNC;
+ return 0;
+}
+
+int crypto_suite_async_init(void)
+{
+ suite_context.pool = odp_pool_lookup("packet_pool");
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+ suite_context.queue = odp_queue_lookup("crypto-out");
+ if (suite_context.queue == ODP_QUEUE_INVALID)
+ return -1;
+
+ suite_context.pref_mode = ODP_CRYPTO_ASYNC;
+ return 0;
+}
+
+odp_testinfo_t crypto_suite[] = {
+ ODP_TEST_INFO(crypto_test_enc_alg_3des_cbc),
+ ODP_TEST_INFO(crypto_test_dec_alg_3des_cbc),
+ ODP_TEST_INFO(crypto_test_enc_alg_3des_cbc_ovr_iv),
+ ODP_TEST_INFO(crypto_test_dec_alg_3des_cbc_ovr_iv),
+ ODP_TEST_INFO(crypto_test_enc_alg_aes128_cbc),
+ ODP_TEST_INFO(crypto_test_dec_alg_aes128_cbc),
+ ODP_TEST_INFO(crypto_test_enc_alg_aes128_cbc_ovr_iv),
+ ODP_TEST_INFO(crypto_test_dec_alg_aes128_cbc_ovr_iv),
+ ODP_TEST_INFO(crypto_test_enc_alg_aes128_gcm),
+ ODP_TEST_INFO(crypto_test_enc_alg_aes128_gcm_ovr_iv),
+ ODP_TEST_INFO(crypto_test_dec_alg_aes128_gcm),
+ ODP_TEST_INFO(crypto_test_dec_alg_aes128_gcm_ovr_iv),
+ ODP_TEST_INFO(crypto_test_alg_hmac_md5),
+ ODP_TEST_INFO(crypto_test_alg_hmac_sha256),
+ ODP_TEST_INFO_NULL,
+};
diff --git a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h
new file mode 100644
index 000000000..8bda34472
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ODP_CRYPTO_TEST_ASYNC_INP_
+#define ODP_CRYPTO_TEST_ASYNC_INP_
+
+#include <odp_cunit_common.h>
+
+/* Suite names */
+#define ODP_CRYPTO_ASYNC_INP "odp_crypto_async_inp"
+#define ODP_CRYPTO_SYNC_INP "odp_crypto_sync_inp"
+
+/* Suite test array */
+extern odp_testinfo_t crypto_suite[];
+
+int crypto_suite_sync_init(void);
+int crypto_suite_async_init(void);
+
+#endif
diff --git a/test/common_plat/validation/api/crypto/test_vectors.h b/test/common_plat/validation/api/crypto/test_vectors.h
new file mode 100644
index 000000000..da4610f33
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/test_vectors.h
@@ -0,0 +1,353 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_CRYPTO_VECTORS_H_
+#define _ODP_TEST_CRYPTO_VECTORS_H_
+
+#include "test_vectors_len.h"
+/* TDES-CBC reference vectors, according to
+ * "http://csrc.nist.gov/groups/STM/cavp/documents/des/DESMMT.pdf"
+ */
+static uint8_t tdes_cbc_reference_key[][TDES_CBC_KEY_LEN] = {
+ {0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10, 0x43, 0xcd, 0x26, 0x5d,
+ 0x58, 0x40, 0xea, 0xf1, 0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c,
+ },
+
+ {0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc, 0x07, 0x54, 0xb9, 0x4f,
+ 0x31, 0xcb, 0xb3, 0x85, 0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae}
+};
+
+static uint8_t tdes_cbc_reference_iv[][TDES_CBC_IV_LEN] = {
+ {0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75},
+
+ {0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65}
+};
+
+/** length in bytes */
+static uint32_t tdes_cbc_reference_length[] = { 8, 16 };
+
+static uint8_t
+tdes_cbc_reference_plaintext[][TDES_CBC_MAX_DATA_LEN] = {
+ {0x32, 0x6a, 0x49, 0x4c, 0xd3, 0x3f, 0xe7, 0x56},
+
+ {0x84, 0x40, 0x1f, 0x78, 0xfe, 0x6c, 0x10, 0x87, 0x6d, 0x8e, 0xa2, 0x30,
+ 0x94, 0xea, 0x53, 0x09}
+};
+
+static uint8_t
+tdes_cbc_reference_ciphertext[][TDES_CBC_MAX_DATA_LEN] = {
+ {0xb2, 0x2b, 0x8d, 0x66, 0xde, 0x97, 0x06, 0x92},
+
+ {0x7b, 0x1f, 0x7c, 0x7e, 0x3b, 0x1c, 0x94, 0x8e, 0xbd, 0x04, 0xa7, 0x5f,
+ 0xfb, 0xa7, 0xd2, 0xf5}
+};
+
+static uint8_t aes128_cbc_reference_key[][AES128_CBC_KEY_LEN] = {
+ {0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
+ {0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
+ {0x6c, 0x3e, 0xa0, 0x47, 0x76, 0x30, 0xce, 0x21,
+ 0xa2, 0xce, 0x33, 0x4a, 0xa7, 0x46, 0xc2, 0xcd },
+ {0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
+ 0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49 }
+};
+
+static uint8_t aes128_cbc_reference_iv[][AES128_CBC_IV_LEN] = {
+ { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ { 0xc7, 0x82, 0xdc, 0x4c, 0x09, 0x8c, 0x66, 0xcb,
+ 0xd9, 0xcd, 0x27, 0xd8, 0x25, 0x68, 0x2c, 0x81 },
+ { 0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
+ 0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9 }
+};
+
+/** length in bytes */
+static uint32_t aes128_cbc_reference_length[] = { 16, 32, 48, 64 };
+
+static uint8_t
+aes128_cbc_reference_plaintext[][AES128_CBC_MAX_DATA_LEN] = {
+ "Single block msg",
+ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ "This is a 48-byte message (exactly 3 AES blocks)",
+ { 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf }
+};
+
+static uint8_t
+aes128_cbc_reference_ciphertext[][AES128_CBC_MAX_DATA_LEN] = {
+ { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
+ 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
+ { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
+ 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
+ 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
+ 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
+ { 0xd0, 0xa0, 0x2b, 0x38, 0x36, 0x45, 0x17, 0x53,
+ 0xd4, 0x93, 0x66, 0x5d, 0x33, 0xf0, 0xe8, 0x86,
+ 0x2d, 0xea, 0x54, 0xcd, 0xb2, 0x93, 0xab, 0xc7,
+ 0x50, 0x69, 0x39, 0x27, 0x67, 0x72, 0xf8, 0xd5,
+ 0x02, 0x1c, 0x19, 0x21, 0x6b, 0xad, 0x52, 0x5c,
+ 0x85, 0x79, 0x69, 0x5d, 0x83, 0xba, 0x26, 0x84 },
+ { 0xc3, 0x0e, 0x32, 0xff, 0xed, 0xc0, 0x77, 0x4e,
+ 0x6a, 0xff, 0x6a, 0xf0, 0x86, 0x9f, 0x71, 0xaa,
+ 0x0f, 0x3a, 0xf0, 0x7a, 0x9a, 0x31, 0xa9, 0xc6,
+ 0x84, 0xdb, 0x20, 0x7e, 0xb0, 0xef, 0x8e, 0x4e,
+ 0x35, 0x90, 0x7a, 0xa6, 0x32, 0xc3, 0xff, 0xdf,
+ 0x86, 0x8b, 0xb7, 0xb2, 0x9d, 0x3d, 0x46, 0xad,
+ 0x83, 0xce, 0x9f, 0x9a, 0x10, 0x2e, 0xe9, 0x9d,
+ 0x49, 0xa5, 0x3e, 0x87, 0xf4, 0xc3, 0xda, 0x55 }
+};
+
+/* AES-GCM test vectors extracted from
+ * https://tools.ietf.org/html/draft-mcgrew-gcm-test-01#section-2
+ */
+static uint8_t aes128_gcm_reference_key[][AES128_GCM_KEY_LEN] = {
+ { 0x4c, 0x80, 0xcd, 0xef, 0xbb, 0x5d, 0x10, 0xda,
+ 0x90, 0x6a, 0xc7, 0x3c, 0x36, 0x13, 0xa6, 0x34 },
+ { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x3d, 0xe0, 0x98, 0x74, 0xb3, 0x88, 0xe6, 0x49,
+ 0x19, 0x88, 0xd0, 0xc3, 0x60, 0x7e, 0xae, 0x1f }
+};
+
+static uint8_t aes128_gcm_reference_iv[][AES128_GCM_IV_LEN] = {
+ { 0x2e, 0x44, 0x3b, 0x68, 0x49, 0x56, 0xed, 0x7e,
+ 0x3b, 0x24, 0x4c, 0xfe },
+ { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { 0x57, 0x69, 0x0e, 0x43, 0x4e, 0x28, 0x00, 0x00,
+ 0xa2, 0xfc, 0xa1, 0xa3 }
+};
+
+static uint32_t aes128_gcm_reference_length[] = { 84, 72, 72, 40};
+
+static odp_crypto_data_range_t aes128_gcm_cipher_range[] = {
+ { .offset = 12, .length = 72 },
+ { .offset = 8, .length = 64 },
+ { .offset = 8, .length = 64 },
+ { .offset = 12, .length = 28 },
+};
+
+static odp_crypto_data_range_t aes128_gcm_auth_range[] = {
+ { .offset = 0, .length = 84 },
+ { .offset = 0, .length = 72 },
+ { .offset = 0, .length = 72 },
+ { .offset = 0, .length = 40 },
+};
+
+static uint8_t
+aes128_gcm_reference_plaintext[][AES128_GCM_MAX_DATA_LEN] = {
+ { /* Aad */
+ 0x00, 0x00, 0x43, 0x21, 0x87, 0x65, 0x43, 0x21,
+ 0x00, 0x00, 0x00, 0x00,
+ /* Plain */
+ 0x45, 0x00, 0x00, 0x48, 0x69, 0x9a, 0x00, 0x00,
+ 0x80, 0x11, 0x4d, 0xb7, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x9b, 0xf1, 0x56,
+ 0x38, 0xd3, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x5f, 0x73, 0x69,
+ 0x70, 0x04, 0x5f, 0x75, 0x64, 0x70, 0x03, 0x73,
+ 0x69, 0x70, 0x09, 0x63, 0x79, 0x62, 0x65, 0x72,
+ 0x63, 0x69, 0x74, 0x79, 0x02, 0x64, 0x6b, 0x00,
+ 0x00, 0x21, 0x00, 0x01, 0x01, 0x02, 0x02, 0x01 },
+
+ { /* Aad */
+ 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
+ /* Plain */
+ 0x45, 0x00, 0x00, 0x3e, 0x69, 0x8f, 0x00, 0x00,
+ 0x80, 0x11, 0x4d, 0xcc, 0xc0, 0xa8, 0x01, 0x02,
+ 0xc0, 0xa8, 0x01, 0x01, 0x0a, 0x98, 0x00, 0x35,
+ 0x00, 0x2a, 0x23, 0x43, 0xb2, 0xd0, 0x01, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x73, 0x69, 0x70, 0x09, 0x63, 0x79, 0x62,
+ 0x65, 0x72, 0x63, 0x69, 0x74, 0x79, 0x02, 0x64,
+ 0x6b, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01 },
+
+ { /* Aad */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ /* Plain */
+ 0x45, 0x00, 0x00, 0x3c, 0x99, 0xc5, 0x00, 0x00,
+ 0x80, 0x01, 0xcb, 0x7a, 0x40, 0x67, 0x93, 0x18,
+ 0x01, 0x01, 0x01, 0x01, 0x08, 0x00, 0x07, 0x5c,
+ 0x02, 0x00, 0x44, 0x00, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c,
+ 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x75, 0x76, 0x77, 0x61, 0x62, 0x63, 0x64, 0x65,
+ 0x66, 0x67, 0x68, 0x69, 0x01, 0x02, 0x02, 0x01 },
+
+ { /* Aad */
+ 0x42, 0xf6, 0x7e, 0x3f, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ /* Plain */
+ 0x45, 0x00, 0x00, 0x1c, 0x42, 0xa2, 0x00, 0x00,
+ 0x80, 0x01, 0x44, 0x1f, 0x40, 0x67, 0x93, 0xb6,
+ 0xe0, 0x00, 0x00, 0x02, 0x0a, 0x00, 0xf5, 0xff,
+ 0x01, 0x02, 0x02, 0x01 }
+};
+
+static uint8_t
+aes128_gcm_reference_ciphertext[][AES128_GCM_MAX_DATA_LEN] = {
+ { /* Aad */
+ 0x00, 0x00, 0x43, 0x21, 0x87, 0x65, 0x43, 0x21,
+ 0x00, 0x00, 0x00, 0x00,
+ /* Plain */
+ 0xfe, 0xcf, 0x53, 0x7e, 0x72, 0x9d, 0x5b, 0x07,
+ 0xdc, 0x30, 0xdf, 0x52, 0x8d, 0xd2, 0x2b, 0x76,
+ 0x8d, 0x1b, 0x98, 0x73, 0x66, 0x96, 0xa6, 0xfd,
+ 0x34, 0x85, 0x09, 0xfa, 0x13, 0xce, 0xac, 0x34,
+ 0xcf, 0xa2, 0x43, 0x6f, 0x14, 0xa3, 0xf3, 0xcf,
+ 0x65, 0x92, 0x5b, 0xf1, 0xf4, 0xa1, 0x3c, 0x5d,
+ 0x15, 0xb2, 0x1e, 0x18, 0x84, 0xf5, 0xff, 0x62,
+ 0x47, 0xae, 0xab, 0xb7, 0x86, 0xb9, 0x3b, 0xce,
+ 0x61, 0xbc, 0x17, 0xd7, 0x68, 0xfd, 0x97, 0x32,
+ /* Digest */
+ 0x45, 0x90, 0x18, 0x14, 0x8f, 0x6c, 0xbe, 0x72,
+ 0x2f, 0xd0, 0x47, 0x96, 0x56, 0x2d, 0xfd, 0xb4 },
+
+ { /* Aad */
+ 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
+ /* Plain */
+ 0xde, 0xb2, 0x2c, 0xd9, 0xb0, 0x7c, 0x72, 0xc1,
+ 0x6e, 0x3a, 0x65, 0xbe, 0xeb, 0x8d, 0xf3, 0x04,
+ 0xa5, 0xa5, 0x89, 0x7d, 0x33, 0xae, 0x53, 0x0f,
+ 0x1b, 0xa7, 0x6d, 0x5d, 0x11, 0x4d, 0x2a, 0x5c,
+ 0x3d, 0xe8, 0x18, 0x27, 0xc1, 0x0e, 0x9a, 0x4f,
+ 0x51, 0x33, 0x0d, 0x0e, 0xec, 0x41, 0x66, 0x42,
+ 0xcf, 0xbb, 0x85, 0xa5, 0xb4, 0x7e, 0x48, 0xa4,
+ 0xec, 0x3b, 0x9b, 0xa9, 0x5d, 0x91, 0x8b, 0xd1,
+ /* Digest */
+ 0x83, 0xb7, 0x0d, 0x3a, 0xa8, 0xbc, 0x6e, 0xe4,
+ 0xc3, 0x09, 0xe9, 0xd8, 0x5a, 0x41, 0xad, 0x4a },
+ { /* Aad */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ /* Plain */
+ 0x46, 0x88, 0xda, 0xf2, 0xf9, 0x73, 0xa3, 0x92,
+ 0x73, 0x29, 0x09, 0xc3, 0x31, 0xd5, 0x6d, 0x60,
+ 0xf6, 0x94, 0xab, 0xaa, 0x41, 0x4b, 0x5e, 0x7f,
+ 0xf5, 0xfd, 0xcd, 0xff, 0xf5, 0xe9, 0xa2, 0x84,
+ 0x45, 0x64, 0x76, 0x49, 0x27, 0x19, 0xff, 0xb6,
+ 0x4d, 0xe7, 0xd9, 0xdc, 0xa1, 0xe1, 0xd8, 0x94,
+ 0xbc, 0x3b, 0xd5, 0x78, 0x73, 0xed, 0x4d, 0x18,
+ 0x1d, 0x19, 0xd4, 0xd5, 0xc8, 0xc1, 0x8a, 0xf3,
+ /* Digest */
+ 0xf8, 0x21, 0xd4, 0x96, 0xee, 0xb0, 0x96, 0xe9,
+ 0x8a, 0xd2, 0xb6, 0x9e, 0x47, 0x99, 0xc7, 0x1d },
+
+ { /* Aad */
+ 0x42, 0xf6, 0x7e, 0x3f, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ /* Plain */
+ 0xfb, 0xa2, 0xca, 0x84, 0x5e, 0x5d, 0xf9, 0xf0,
+ 0xf2, 0x2c, 0x3e, 0x6e, 0x86, 0xdd, 0x83, 0x1e,
+ 0x1f, 0xc6, 0x57, 0x92, 0xcd, 0x1a, 0xf9, 0x13,
+ 0x0e, 0x13, 0x79, 0xed,
+ /* Digest */
+ 0x36, 0x9f, 0x07, 0x1f, 0x35, 0xe0, 0x34, 0xbe,
+ 0x95, 0xf1, 0x12, 0xe4, 0xe7, 0xd0, 0x5d, 0x35 }
+};
+
+static uint8_t hmac_md5_reference_key[][HMAC_MD5_KEY_LEN] = {
+ { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b },
+
+ /* "Jefe" */
+ { 0x4a, 0x65, 0x66, 0x65 },
+
+ { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa }
+};
+
+static uint32_t hmac_md5_reference_length[] = { 8, 28, 50 };
+
+static uint8_t
+hmac_md5_reference_plaintext[][HMAC_MD5_MAX_DATA_LEN] = {
+ /* "Hi There" */
+ { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+
+ /* what do ya want for nothing?*/
+ { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+
+ { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd }
+};
+
+static uint8_t hmac_md5_reference_digest[][HMAC_MD5_DIGEST_LEN] = {
+ { 0x92, 0x94, 0x72, 0x7a, 0x36, 0x38, 0xbb, 0x1c,
+ 0x13, 0xf4, 0x8e, 0xf8, 0x15, 0x8b, 0xfc, 0x9d },
+
+ { 0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03,
+ 0xea, 0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38 },
+
+ { 0x56, 0xbe, 0x34, 0x52, 0x1d, 0x14, 0x4c, 0x88,
+ 0xdb, 0xb8, 0xc7, 0x33, 0xf0, 0xe8, 0xb3, 0xf6 }
+};
+
+static uint8_t hmac_sha256_reference_key[][HMAC_SHA256_KEY_LEN] = {
+ { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b },
+
+ /* "Jefe" */
+ { 0x4a, 0x65, 0x66, 0x65 },
+
+ { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa }
+};
+
+static uint32_t hmac_sha256_reference_length[] = { 8, 28, 50 };
+
+static uint8_t
+hmac_sha256_reference_plaintext[][HMAC_SHA256_MAX_DATA_LEN] = {
+ /* "Hi There" */
+ { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
+
+ /* what do ya want for nothing?*/
+ { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f },
+
+ { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd }
+};
+
+static uint8_t hmac_sha256_reference_digest[][HMAC_SHA256_DIGEST_LEN] = {
+ { 0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53,
+ 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b },
+
+ { 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e,
+ 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7 },
+
+ { 0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46,
+ 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7 }
+};
+
+#endif
diff --git a/test/common_plat/validation/api/crypto/test_vectors_len.h b/test/common_plat/validation/api/crypto/test_vectors_len.h
new file mode 100644
index 000000000..4fbb5cd70
--- /dev/null
+++ b/test/common_plat/validation/api/crypto/test_vectors_len.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef TEST_VECTORS_LEN_
+#define TEST_VECTORS_LEN_
+
+/* TDES-CBC */
+#define TDES_CBC_KEY_LEN 24
+#define TDES_CBC_IV_LEN 8
+#define TDES_CBC_MAX_DATA_LEN 16
+
+/* AES128-CBC */
+#define AES128_CBC_KEY_LEN 16
+#define AES128_CBC_IV_LEN 16
+#define AES128_CBC_MAX_DATA_LEN 64
+
+/* AES128-CBC */
+#define AES128_GCM_KEY_LEN 16
+#define AES128_GCM_IV_LEN 12
+#define AES128_GCM_MAX_DATA_LEN 106
+#define AES128_GCM_DIGEST_LEN 16
+#define AES128_GCM_CHECK_LEN 16
+
+/* HMAC-MD5 */
+#define HMAC_MD5_KEY_LEN 16
+#define HMAC_MD5_MAX_DATA_LEN 128
+#define HMAC_MD5_DIGEST_LEN 16
+#define HMAC_MD5_96_CHECK_LEN 12
+
+/* HMAC-SHA256 */
+#define HMAC_SHA256_KEY_LEN 32
+#define HMAC_SHA256_MAX_DATA_LEN 128
+#define HMAC_SHA256_DIGEST_LEN 32
+#define HMAC_SHA256_128_CHECK_LEN 16
+
+#endif
diff --git a/test/common_plat/validation/api/errno/.gitignore b/test/common_plat/validation/api/errno/.gitignore
new file mode 100644
index 000000000..12256e38c
--- /dev/null
+++ b/test/common_plat/validation/api/errno/.gitignore
@@ -0,0 +1 @@
+errno_main
diff --git a/test/common_plat/validation/api/errno/Makefile.am b/test/common_plat/validation/api/errno/Makefile.am
new file mode 100644
index 000000000..a24275d6e
--- /dev/null
+++ b/test/common_plat/validation/api/errno/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtesterrno.la
+libtesterrno_la_SOURCES = errno.c
+
+test_PROGRAMS = errno_main$(EXEEXT)
+dist_errno_main_SOURCES = errno_main.c
+errno_main_LDADD = libtesterrno.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = errno.h
diff --git a/test/common_plat/validation/api/errno/errno.c b/test/common_plat/validation/api/errno/errno.c
new file mode 100644
index 000000000..e3b6ced54
--- /dev/null
+++ b/test/common_plat/validation/api/errno/errno.c
@@ -0,0 +1,46 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "errno.h"
+
+void errno_test_odp_errno_sunny_day(void)
+{
+ int my_errno;
+
+ odp_errno_zero();
+ my_errno = odp_errno();
+ CU_ASSERT_TRUE(my_errno == 0);
+ odp_errno_print("odp_errno");
+ CU_ASSERT_PTR_NOT_NULL(odp_errno_str(my_errno));
+}
+
+odp_testinfo_t errno_suite[] = {
+ ODP_TEST_INFO(errno_test_odp_errno_sunny_day),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t errno_suites[] = {
+ {"Errno", NULL, NULL, errno_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int errno_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(errno_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/errno/errno.h b/test/common_plat/validation/api/errno/errno.h
new file mode 100644
index 000000000..720385196
--- /dev/null
+++ b/test/common_plat/validation/api/errno/errno.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_ERRNO_H_
+#define _ODP_TEST_ERRNO_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void errno_test_odp_errno_sunny_day(void);
+
+/* test arrays: */
+extern odp_testinfo_t errno_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t errno_suites[];
+
+/* main test program: */
+int errno_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/errno/errno_main.c b/test/common_plat/validation/api/errno/errno_main.c
new file mode 100644
index 000000000..0138279ef
--- /dev/null
+++ b/test/common_plat/validation/api/errno/errno_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "errno.h"
+
+int main(int argc, char *argv[])
+{
+ return errno_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/hash/.gitignore b/test/common_plat/validation/api/hash/.gitignore
new file mode 100644
index 000000000..6d0bc9314
--- /dev/null
+++ b/test/common_plat/validation/api/hash/.gitignore
@@ -0,0 +1 @@
+hash_main
diff --git a/test/common_plat/validation/api/hash/Makefile.am b/test/common_plat/validation/api/hash/Makefile.am
new file mode 100644
index 000000000..b899b8bd3
--- /dev/null
+++ b/test/common_plat/validation/api/hash/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtesthash.la
+libtesthash_la_SOURCES = hash.c
+
+test_PROGRAMS = hash_main$(EXEEXT)
+dist_hash_main_SOURCES = hash_main.c
+hash_main_LDADD = libtesthash.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = hash.h
diff --git a/test/common_plat/validation/api/hash/hash.c b/test/common_plat/validation/api/hash/hash.c
new file mode 100644
index 000000000..b353fcecd
--- /dev/null
+++ b/test/common_plat/validation/api/hash/hash.c
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "hash.h"
+
+void hash_test_crc32c(void)
+{
+ uint32_t test_value = 0x12345678;
+ uint32_t ret = odp_hash_crc32c(&test_value, 4, 0);
+
+ CU_ASSERT(ret == 0xfa745634);
+
+ test_value = 0x87654321;
+ ret = odp_hash_crc32c(&test_value, 4, 0);
+
+ CU_ASSERT(ret == 0xaca37da7);
+
+ uint32_t test_values[] = {0x12345678, 0x87654321};
+
+ ret = odp_hash_crc32c(test_values, 8, 0);
+
+ CU_ASSERT(ret == 0xe6e910b0);
+}
+
+odp_testinfo_t hash_suite[] = {
+ ODP_TEST_INFO(hash_test_crc32c),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t hash_suites[] = {
+ {"Hash", NULL, NULL, hash_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int hash_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(hash_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/hash/hash.h b/test/common_plat/validation/api/hash/hash.h
new file mode 100644
index 000000000..936571e6a
--- /dev/null
+++ b/test/common_plat/validation/api/hash/hash.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_HASH_H_
+#define _ODP_TEST_HASH_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void hash_test_crc32c(void);
+
+/* test arrays: */
+extern odp_testinfo_t hash_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t hash_suites[];
+
+/* main test program: */
+int hash_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/hash/hash_main.c b/test/common_plat/validation/api/hash/hash_main.c
new file mode 100644
index 000000000..f9818b7bb
--- /dev/null
+++ b/test/common_plat/validation/api/hash/hash_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "hash.h"
+
+int main(int argc, char *argv[])
+{
+ return hash_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/init/.gitignore b/test/common_plat/validation/api/init/.gitignore
new file mode 100644
index 000000000..f433708b0
--- /dev/null
+++ b/test/common_plat/validation/api/init/.gitignore
@@ -0,0 +1,3 @@
+init_main_abort
+init_main_log
+init_main_ok
diff --git a/test/common_plat/validation/api/init/Makefile.am b/test/common_plat/validation/api/init/Makefile.am
new file mode 100644
index 000000000..0793e6423
--- /dev/null
+++ b/test/common_plat/validation/api/init/Makefile.am
@@ -0,0 +1,16 @@
+include ../Makefile.inc
+noinst_LTLIBRARIES = libtestinit.la
+libtestinit_la_SOURCES = init.c
+
+# most platforms are expected not to support multiple ODP inits
+# following each other: therefore 3 separate binaries are
+# created, each containing its ODP init test.
+test_PROGRAMS = init_main_abort$(EXEEXT) init_main_log$(EXEEXT) init_main_ok$(EXEEXT)
+dist_init_main_abort_SOURCES = init_main_abort.c
+dist_init_main_log_SOURCES = init_main_log.c
+dist_init_main_ok_SOURCES = init_main_ok.c
+init_main_abort_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
+init_main_log_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
+init_main_ok_LDADD = libtestinit.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = init.h
diff --git a/test/common_plat/validation/api/init/init.c b/test/common_plat/validation/api/init/init.c
new file mode 100644
index 000000000..61055fad5
--- /dev/null
+++ b/test/common_plat/validation/api/init/init.c
@@ -0,0 +1,188 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <odp_api.h>
+#include <CUnit/Basic.h>
+#include "init.h"
+
+/* flag set when the replacement logging function is used */
+int replacement_logging_used;
+
+/* replacement abort function: */
+static void odp_init_abort(void) ODP_NORETURN;
+
+/* replacement log function: */
+ODP_PRINTF_FORMAT(2, 3)
+static int odp_init_log(odp_log_level_t level, const char *fmt, ...);
+
+/* test ODP global init, with alternate abort function */
+void init_test_odp_init_global_replace_abort(void)
+{
+ int status;
+ struct odp_init_t init_data;
+ odp_instance_t instance;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.abort_fn = &odp_init_abort;
+
+ status = odp_init_global(&instance, &init_data, NULL);
+ CU_ASSERT_FATAL(status == 0);
+
+ status = odp_term_global(instance);
+ CU_ASSERT(status == 0);
+}
+
+odp_testinfo_t init_suite_abort[] = {
+ ODP_TEST_INFO(init_test_odp_init_global_replace_abort),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t init_suites_abort[] = {
+ {"Init", NULL, NULL, init_suite_abort},
+ ODP_SUITE_INFO_NULL,
+};
+
+static void odp_init_abort(void)
+{
+ abort();
+}
+
+int init_main_abort(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ /* prevent default ODP init: */
+ odp_cunit_register_global_init(NULL);
+ odp_cunit_register_global_term(NULL);
+
+ /* run the tests: */
+ ret = odp_cunit_register(init_suites_abort);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
+
+/* test ODP global init, with alternate log function */
+void init_test_odp_init_global_replace_log(void)
+{
+ int status;
+ struct odp_init_t init_data;
+ odp_instance_t instance;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.log_fn = &odp_init_log;
+
+ replacement_logging_used = 0;
+
+ status = odp_init_global(&instance, &init_data, NULL);
+ CU_ASSERT_FATAL(status == 0);
+
+ CU_ASSERT_TRUE(replacement_logging_used || ODP_DEBUG_PRINT == 0);
+
+ status = odp_term_global(instance);
+ CU_ASSERT(status == 0);
+}
+
+odp_testinfo_t init_suite_log[] = {
+ ODP_TEST_INFO(init_test_odp_init_global_replace_log),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t init_suites_log[] = {
+ {"Init", NULL, NULL, init_suite_log},
+ ODP_SUITE_INFO_NULL,
+};
+
+static int odp_init_log(odp_log_level_t level __attribute__((unused)),
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ /* just set a flag to be sure the replacement fn was used */
+ replacement_logging_used = 1;
+
+ va_start(args, fmt);
+ r = vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
+int init_main_log(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ /* prevent default ODP init: */
+ odp_cunit_register_global_init(NULL);
+ odp_cunit_register_global_term(NULL);
+
+ /* register the tests: */
+ ret = odp_cunit_register(init_suites_log);
+
+ /* run the tests: */
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
+
+/* test normal ODP global init */
+void init_test_odp_init_global(void)
+{
+ int status;
+ odp_instance_t instance;
+
+ status = odp_init_global(&instance, NULL, NULL);
+ CU_ASSERT_FATAL(status == 0);
+
+ status = odp_term_global(instance);
+ CU_ASSERT(status == 0);
+}
+
+odp_testinfo_t init_suite_ok[] = {
+ ODP_TEST_INFO(init_test_odp_init_global),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t init_suites_ok[] = {
+ {"Init", NULL, NULL, init_suite_ok},
+ ODP_SUITE_INFO_NULL,
+};
+
+int init_main_ok(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ /* prevent default ODP init: */
+ odp_cunit_register_global_init(NULL);
+ odp_cunit_register_global_term(NULL);
+
+ /* register the tests: */
+ ret = odp_cunit_register(init_suites_ok);
+
+ /* run the tests: */
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/init/init.h b/test/common_plat/validation/api/init/init.h
new file mode 100644
index 000000000..cad9cf988
--- /dev/null
+++ b/test/common_plat/validation/api/init/init.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_INIT_H_
+#define _ODP_TEST_INIT_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void init_test_odp_init_global_replace_abort(void);
+void init_test_odp_init_global_replace_log(void);
+void init_test_odp_init_global(void);
+
+/* test arrays: */
+extern odp_testinfo_t init_suite_abort[];
+extern odp_testinfo_t init_suite_log[];
+extern odp_testinfo_t init_suite_ok[];
+
+/* test registry: */
+extern odp_suiteinfo_t init_suites_abort[];
+extern odp_suiteinfo_t init_suites_log[];
+extern odp_suiteinfo_t init_suites_ok[];
+
+/* main test program: */
+int init_main_abort(int argc, char *argv[]);
+int init_main_log(int argc, char *argv[]);
+int init_main_ok(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/init/init_main_abort.c b/test/common_plat/validation/api/init/init_main_abort.c
new file mode 100644
index 000000000..2e0faafb8
--- /dev/null
+++ b/test/common_plat/validation/api/init/init_main_abort.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "init.h"
+
+int main(int argc, char *argv[])
+{
+ return init_main_abort(argc, argv);
+}
diff --git a/test/common_plat/validation/api/init/init_main_log.c b/test/common_plat/validation/api/init/init_main_log.c
new file mode 100644
index 000000000..41dd00d72
--- /dev/null
+++ b/test/common_plat/validation/api/init/init_main_log.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "init.h"
+
+int main(int argc, char *argv[])
+{
+ return init_main_log(argc, argv);
+}
diff --git a/test/common_plat/validation/api/init/init_main_ok.c b/test/common_plat/validation/api/init/init_main_ok.c
new file mode 100644
index 000000000..6053ec188
--- /dev/null
+++ b/test/common_plat/validation/api/init/init_main_ok.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "init.h"
+
+int main(int argc, char *argv[])
+{
+ return init_main_ok(argc, argv);
+}
diff --git a/test/common_plat/validation/api/lock/.gitignore b/test/common_plat/validation/api/lock/.gitignore
new file mode 100644
index 000000000..ff16646f4
--- /dev/null
+++ b/test/common_plat/validation/api/lock/.gitignore
@@ -0,0 +1 @@
+lock_main
diff --git a/test/common_plat/validation/api/lock/Makefile.am b/test/common_plat/validation/api/lock/Makefile.am
new file mode 100644
index 000000000..29993df44
--- /dev/null
+++ b/test/common_plat/validation/api/lock/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestlock.la
+libtestlock_la_SOURCES = lock.c
+
+test_PROGRAMS = lock_main$(EXEEXT)
+dist_lock_main_SOURCES = lock_main.c
+lock_main_LDADD = libtestlock.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = lock.h
diff --git a/test/common_plat/validation/api/lock/lock.c b/test/common_plat/validation/api/lock/lock.c
new file mode 100644
index 000000000..a668a3157
--- /dev/null
+++ b/test/common_plat/validation/api/lock/lock.c
@@ -0,0 +1,1224 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp_api.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "lock.h"
+
+#define VERBOSE 0
+
+#define MIN_ITERATIONS 1000
+#define MAX_ITERATIONS 30000
+#define ITER_MPLY_FACTOR 3
+
+#define SLOW_BARRIER_DELAY 400
+#define BASE_DELAY 6
+#define MIN_DELAY 1
+
+#define NUM_RESYNC_BARRIERS 100
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ odp_atomic_u32_t wait_cnt;
+} custom_barrier_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ volatile_u32_t slow_thread_num;
+ volatile_u32_t barrier_cnt1;
+ volatile_u32_t barrier_cnt2;
+ odp_barrier_t global_barrier;
+
+ /* Used to periodically resync within the lock functional tests */
+ odp_barrier_t barrier_array[NUM_RESYNC_BARRIERS];
+
+ /* Locks */
+ odp_spinlock_t global_spinlock;
+ odp_spinlock_recursive_t global_recursive_spinlock;
+ odp_ticketlock_t global_ticketlock;
+ odp_rwlock_t global_rwlock;
+ odp_rwlock_recursive_t global_recursive_rwlock;
+
+ volatile_u32_t global_lock_owner;
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ odp_spinlock_t per_thread_spinlock;
+ odp_spinlock_recursive_t per_thread_recursive_spinlock;
+ odp_ticketlock_t per_thread_ticketlock;
+ odp_rwlock_t per_thread_rwlock;
+ odp_rwlock_recursive_t per_thread_recursive_rwlock;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/*
+* Delay a consistent amount of time. Ideally the amount of CPU time taken
+* is linearly proportional to "iterations". The goal is to try to do some
+* work that the compiler optimizer won't optimize away, and also to
+* minimize loads and stores (at least to different memory addresses)
+* so as to not affect or be affected by caching issues. This does NOT have to
+* correlate to a specific number of cpu cycles or be consistent across
+* CPU architectures.
+*/
+static void thread_delay(per_thread_mem_t *per_thread_mem, uint32_t iterations)
+{
+ volatile_u64_t *counter_ptr;
+ uint32_t cnt;
+
+ counter_ptr = &per_thread_mem->delay_counter;
+
+ for (cnt = 1; cnt <= iterations; cnt++)
+ (*counter_ptr)++;
+}
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void spinlock_api_test(odp_spinlock_t *spinlock)
+{
+ odp_spinlock_init(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+
+ odp_spinlock_lock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 1);
+
+ odp_spinlock_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+
+ CU_ASSERT(odp_spinlock_trylock(spinlock) == 1);
+
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 1);
+
+ odp_spinlock_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_is_locked(spinlock) == 0);
+}
+
+static int spinlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_spinlock_t local_spin_lock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ spinlock_api_test(&local_spin_lock);
+ spinlock_api_test(&per_thread_mem->per_thread_spinlock);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void spinlock_recursive_api_test(odp_spinlock_recursive_t *spinlock)
+{
+ odp_spinlock_recursive_init(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 0);
+
+ odp_spinlock_recursive_lock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ odp_spinlock_recursive_lock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ odp_spinlock_recursive_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ odp_spinlock_recursive_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 0);
+
+ CU_ASSERT(odp_spinlock_recursive_trylock(spinlock) == 1);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ CU_ASSERT(odp_spinlock_recursive_trylock(spinlock) == 1);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ odp_spinlock_recursive_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 1);
+
+ odp_spinlock_recursive_unlock(spinlock);
+ CU_ASSERT(odp_spinlock_recursive_is_locked(spinlock) == 0);
+}
+
+static int spinlock_recursive_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_spinlock_recursive_t local_recursive_spin_lock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ spinlock_recursive_api_test(&local_recursive_spin_lock);
+ spinlock_recursive_api_test(
+ &per_thread_mem->per_thread_recursive_spinlock);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void ticketlock_api_test(odp_ticketlock_t *ticketlock)
+{
+ odp_ticketlock_init(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+
+ odp_ticketlock_lock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 1);
+
+ odp_ticketlock_unlock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+
+ CU_ASSERT(odp_ticketlock_trylock(ticketlock) == 1);
+ CU_ASSERT(odp_ticketlock_trylock(ticketlock) == 0);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 1);
+
+ odp_ticketlock_unlock(ticketlock);
+ CU_ASSERT(odp_ticketlock_is_locked(ticketlock) == 0);
+}
+
+static int ticketlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_ticketlock_t local_ticket_lock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ ticketlock_api_test(&local_ticket_lock);
+ ticketlock_api_test(&per_thread_mem->per_thread_ticketlock);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void rwlock_api_test(odp_rwlock_t *rw_lock)
+{
+ int rc;
+
+ odp_rwlock_init(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+
+ odp_rwlock_read_lock(rw_lock);
+
+ rc = odp_rwlock_read_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+ rc = odp_rwlock_write_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+
+ odp_rwlock_read_unlock(rw_lock);
+
+ rc = odp_rwlock_read_trylock(rw_lock);
+ if (rc == 1)
+ odp_rwlock_read_unlock(rw_lock);
+
+ odp_rwlock_write_lock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 1); */
+
+ odp_rwlock_write_unlock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+
+ rc = odp_rwlock_write_trylock(rw_lock);
+ if (rc == 1)
+ odp_rwlock_write_unlock(rw_lock);
+}
+
+static int rwlock_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_rwlock_t local_rwlock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ rwlock_api_test(&local_rwlock);
+ rwlock_api_test(&per_thread_mem->per_thread_rwlock);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static void rwlock_recursive_api_test(odp_rwlock_recursive_t *rw_lock)
+{
+ int rc;
+
+ odp_rwlock_recursive_init(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+
+ odp_rwlock_recursive_read_lock(rw_lock);
+ odp_rwlock_recursive_read_lock(rw_lock);
+ rc = odp_rwlock_recursive_read_trylock(rw_lock);
+ CU_ASSERT(rc == 1);
+ rc = odp_rwlock_recursive_write_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+
+ odp_rwlock_recursive_read_unlock(rw_lock);
+ odp_rwlock_recursive_read_unlock(rw_lock);
+ odp_rwlock_recursive_read_unlock(rw_lock);
+
+ odp_rwlock_recursive_write_lock(rw_lock);
+ odp_rwlock_recursive_write_lock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 1); */
+ rc = odp_rwlock_recursive_read_trylock(rw_lock);
+ CU_ASSERT(rc == 0);
+ rc = odp_rwlock_recursive_write_trylock(rw_lock);
+ CU_ASSERT(rc == 1);
+
+ odp_rwlock_recursive_write_unlock(rw_lock);
+ odp_rwlock_recursive_write_unlock(rw_lock);
+ odp_rwlock_recursive_write_unlock(rw_lock);
+ /* CU_ASSERT(odp_rwlock_is_locked(rw_lock) == 0); */
+}
+
+static int rwlock_recursive_api_tests(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_rwlock_recursive_t local_recursive_rwlock;
+
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ rwlock_recursive_api_test(&local_recursive_rwlock);
+ rwlock_recursive_api_test(&per_thread_mem->per_thread_recursive_rwlock);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+/*
+ * Tests that we do have contention between threads when running.
+ * Also adjust the number of iterations to be done (by other tests)
+ * so we have a fair chance to see that the tested synchronizer
+ * does avoid the race condition.
+ */
+static int no_lock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, current_errs, lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = 0;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = MAX_ITERATIONS / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ /*
+ * Tunning the iteration number:
+ * Here, we search for an iteration number that guarantees to show
+ * race conditions between the odp threads.
+ * Iterations is set to ITER_MPLY_FACTOR * cnt where cnt is when
+ * the threads start to see "errors" (i.e. effect of other threads
+ * running concurrentely without any synchronisation mechanism).
+ * In other words, "iterations" is set to ITER_MPLY_FACTOR times the
+ * minimum loop count necessary to see a need for synchronisation
+ * mechanism.
+ * If, later, these "errors" disappear when running other tests up to
+ * "iterations" with synchro, the effect of the tested synchro mechanism
+ * is likely proven.
+ * If we reach "MAX_ITERATIONS", and "iteration" remains zero,
+ * it means that we cannot see any race condition between the different
+ * running theads (e.g. the OS is not preemptive) and all other tests
+ * being passed won't tell much about the functionality of the
+ * tested synchro mechanism.
+ */
+ for (cnt = 1; cnt <= MAX_ITERATIONS; cnt++) {
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ if (!iterations)
+ iterations = cnt;
+ }
+
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ thread_delay(per_thread_mem, MIN_DELAY);
+
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ if (!iterations)
+ iterations = cnt;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if (global_mem->g_verbose)
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures in %" PRIu32 " iterations\n",
+ thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, iterations);
+
+ /* Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_lock test it should see sync_failures or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees sync_failures or a single
+ * worker was used.
+ */
+ CU_ASSERT(sync_failures != 0 || global_mem->g_num_threads == 1);
+
+ /*
+ * set the iterration for the future tests to be far above the
+ * contention level
+ */
+ iterations *= ITER_MPLY_FACTOR;
+
+ if (iterations > MAX_ITERATIONS)
+ iterations = MAX_ITERATIONS;
+ if (iterations < MIN_ITERATIONS)
+ iterations = MIN_ITERATIONS;
+
+ /*
+ * Note that the following statement has race conditions:
+ * global_mem->g_iterations should really be an atomic and a TAS
+ * function be used. But this would mean that we would be testing
+ * synchronisers assuming synchronisers works...
+ * If we do not use atomic TAS, we may not get the grand max for
+ * all threads, but we are guaranteed to have passed the error
+ * threshold, for at least some threads, which is good enough
+ */
+ if (iterations > global_mem->g_iterations)
+ global_mem->g_iterations = iterations;
+
+ odp_mb_full();
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int spinlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, is_locked_errs, current_errs;
+ uint32_t lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ is_locked_errs = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_spinlock_lock(&global_mem->global_spinlock);
+
+ /* Make sure we have the lock AND didn't previously own it */
+ if (odp_spinlock_is_locked(&global_mem->global_spinlock) != 1)
+ is_locked_errs++;
+
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ odp_spinlock_unlock(&global_mem->global_spinlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if ((global_mem->g_verbose) &&
+ ((sync_failures != 0) || (is_locked_errs != 0)))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " is_locked_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(is_locked_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int spinlock_recursive_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, recursive_errs, is_locked_errs, current_errs;
+ uint32_t lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ recursive_errs = 0;
+ is_locked_errs = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_spinlock_recursive_lock(
+ &global_mem->global_recursive_spinlock);
+
+ /* Make sure we have the lock AND didn't previously own it */
+ if (odp_spinlock_recursive_is_locked(
+ &global_mem->global_recursive_spinlock) != 1)
+ is_locked_errs++;
+
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Verify that we can acquire the lock recursively */
+ odp_spinlock_recursive_lock(
+ &global_mem->global_recursive_spinlock);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ recursive_errs++;
+ }
+
+ /* Release the lock and verify that we still have it*/
+ odp_spinlock_recursive_unlock(
+ &global_mem->global_recursive_spinlock);
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ recursive_errs++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ odp_spinlock_recursive_unlock(
+ &global_mem->global_recursive_spinlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if ((global_mem->g_verbose) &&
+ (sync_failures != 0 || recursive_errs != 0 || is_locked_errs != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " recursive_errs and %" PRIu32
+ " is_locked_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, recursive_errs, is_locked_errs,
+ iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(recursive_errs == 0);
+ CU_ASSERT(is_locked_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int ticketlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, is_locked_errs, current_errs;
+ uint32_t lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ /* Wait here until all of the threads have also reached this point */
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ is_locked_errs = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Acquire the shared global lock */
+ odp_ticketlock_lock(&global_mem->global_ticketlock);
+
+ /* Make sure we have the lock AND didn't previously own it */
+ if (odp_ticketlock_is_locked(&global_mem->global_ticketlock)
+ != 1)
+ is_locked_errs++;
+
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ odp_ticketlock_unlock(&global_mem->global_ticketlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and then rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if ((global_mem->g_verbose) &&
+ ((sync_failures != 0) || (is_locked_errs != 0)))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " is_locked_errs in %" PRIu32 " iterations\n",
+ thread_num,
+ per_thread_mem->thread_id, per_thread_mem->thread_core,
+ sync_failures, is_locked_errs, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(is_locked_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int rwlock_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, current_errs, lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ /* Wait here until all of the threads have also reached this point */
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Verify that we can obtain a read lock */
+ odp_rwlock_read_lock(&global_mem->global_rwlock);
+
+ /* Verify lock is unowned (no writer holds it) */
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release the read lock */
+ odp_rwlock_read_unlock(&global_mem->global_rwlock);
+
+ /* Acquire the shared global lock */
+ odp_rwlock_write_lock(&global_mem->global_rwlock);
+
+ /* Make sure we have lock now AND didn't previously own it */
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ odp_rwlock_write_unlock(&global_mem->global_rwlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and then rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if ((global_mem->g_verbose) && (sync_failures != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures in %" PRIu32 " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+static int rwlock_recursive_functional_test(void *arg UNUSED)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ uint32_t thread_num, resync_cnt, rs_idx, iterations, cnt;
+ uint32_t sync_failures, recursive_errs, current_errs, lock_owner_delay;
+
+ thread_num = odp_cpu_id() + 1;
+ per_thread_mem = thread_init();
+ global_mem = per_thread_mem->global_mem;
+ iterations = global_mem->g_iterations;
+
+ /* Wait here until all of the threads have also reached this point */
+ odp_barrier_wait(&global_mem->global_barrier);
+
+ sync_failures = 0;
+ recursive_errs = 0;
+ current_errs = 0;
+ rs_idx = 0;
+ resync_cnt = iterations / NUM_RESYNC_BARRIERS;
+ lock_owner_delay = BASE_DELAY;
+
+ for (cnt = 1; cnt <= iterations; cnt++) {
+ /* Verify that we can obtain a read lock */
+ odp_rwlock_recursive_read_lock(
+ &global_mem->global_recursive_rwlock);
+
+ /* Verify lock is unowned (no writer holds it) */
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Verify we can get read lock recursively */
+ odp_rwlock_recursive_read_lock(
+ &global_mem->global_recursive_rwlock);
+
+ /* Verify lock is unowned (no writer holds it) */
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Release the read lock */
+ odp_rwlock_recursive_read_unlock(
+ &global_mem->global_recursive_rwlock);
+ odp_rwlock_recursive_read_unlock(
+ &global_mem->global_recursive_rwlock);
+
+ /* Acquire the shared global lock */
+ odp_rwlock_recursive_write_lock(
+ &global_mem->global_recursive_rwlock);
+
+ /* Make sure we have lock now AND didn't previously own it */
+ if (global_mem->global_lock_owner != 0) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Now set the global_lock_owner to be us, wait a while, and
+ * then we see if anyone else has snuck in and changed the
+ * global_lock_owner to be themselves
+ */
+ global_mem->global_lock_owner = thread_num;
+ odp_mb_full();
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ /* Acquire it again and verify we still own it */
+ odp_rwlock_recursive_write_lock(
+ &global_mem->global_recursive_rwlock);
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ recursive_errs++;
+ }
+
+ /* Release the recursive lock and make sure we still own it */
+ odp_rwlock_recursive_write_unlock(
+ &global_mem->global_recursive_rwlock);
+ thread_delay(per_thread_mem, lock_owner_delay);
+ if (global_mem->global_lock_owner != thread_num) {
+ current_errs++;
+ recursive_errs++;
+ }
+
+ /* Release shared lock, and make sure we no longer have it */
+ global_mem->global_lock_owner = 0;
+ odp_mb_full();
+ odp_rwlock_recursive_write_unlock(
+ &global_mem->global_recursive_rwlock);
+ if (global_mem->global_lock_owner == thread_num) {
+ current_errs++;
+ sync_failures++;
+ }
+
+ if (current_errs == 0)
+ lock_owner_delay++;
+
+ /* Wait a small amount of time and then rerun the test */
+ thread_delay(per_thread_mem, BASE_DELAY);
+
+ /* Try to resync all of the threads to increase contention */
+ if ((rs_idx < NUM_RESYNC_BARRIERS) &&
+ ((cnt % resync_cnt) == (resync_cnt - 1)))
+ odp_barrier_wait(&global_mem->barrier_array[rs_idx++]);
+ }
+
+ if ((global_mem->g_verbose) && (sync_failures != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " sync_failures and %" PRIu32
+ " recursive_errs in %" PRIu32
+ " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core,
+ sync_failures, recursive_errs, iterations);
+
+ CU_ASSERT(sync_failures == 0);
+ CU_ASSERT(recursive_errs == 0);
+
+ thread_finalize(per_thread_mem);
+
+ return CU_get_number_of_failures();
+}
+
+/* Thread-unsafe tests */
+void lock_test_no_lock_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(no_lock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t lock_suite_no_locking[] = {
+ ODP_TEST_INFO(lock_test_no_lock_functional), /* must be first */
+ ODP_TEST_INFO_NULL
+};
+
+/* Spin lock tests */
+void lock_test_spinlock_api(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(spinlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_spinlock_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_spinlock_init(&global_mem->global_spinlock);
+ odp_cunit_thread_create(spinlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_spinlock_recursive_api(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(spinlock_recursive_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_spinlock_recursive_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_spinlock_recursive_init(&global_mem->global_recursive_spinlock);
+ odp_cunit_thread_create(spinlock_recursive_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t lock_suite_spinlock[] = {
+ ODP_TEST_INFO(lock_test_spinlock_api),
+ ODP_TEST_INFO(lock_test_spinlock_functional),
+ ODP_TEST_INFO_NULL
+};
+
+odp_testinfo_t lock_suite_spinlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_spinlock_recursive_api),
+ ODP_TEST_INFO(lock_test_spinlock_recursive_functional),
+ ODP_TEST_INFO_NULL
+};
+
+/* Ticket lock tests */
+void lock_test_ticketlock_api(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(ticketlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_ticketlock_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_ticketlock_init(&global_mem->global_ticketlock);
+
+ odp_cunit_thread_create(ticketlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t lock_suite_ticketlock[] = {
+ ODP_TEST_INFO(lock_test_ticketlock_api),
+ ODP_TEST_INFO(lock_test_ticketlock_functional),
+ ODP_TEST_INFO_NULL
+};
+
+/* RW lock tests */
+void lock_test_rwlock_api(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(rwlock_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_rwlock_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_rwlock_init(&global_mem->global_rwlock);
+ odp_cunit_thread_create(rwlock_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t lock_suite_rwlock[] = {
+ ODP_TEST_INFO(lock_test_rwlock_api),
+ ODP_TEST_INFO(lock_test_rwlock_functional),
+ ODP_TEST_INFO_NULL
+};
+
+void lock_test_rwlock_recursive_api(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_cunit_thread_create(rwlock_recursive_api_tests, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void lock_test_rwlock_recursive_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ odp_rwlock_recursive_init(&global_mem->global_recursive_rwlock);
+ odp_cunit_thread_create(rwlock_recursive_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t lock_suite_rwlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_rwlock_recursive_api),
+ ODP_TEST_INFO(lock_test_rwlock_recursive_functional),
+ ODP_TEST_INFO_NULL
+};
+
+int lock_suite_init(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+ odp_barrier_init(&global_mem->global_barrier, num_threads);
+ for (idx = 0; idx < NUM_RESYNC_BARRIERS; idx++)
+ odp_barrier_init(&global_mem->barrier_array[idx], num_threads);
+
+ return 0;
+}
+
+int lock_init(odp_instance_t *inst)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = 0; /* tuned by first test */
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+odp_suiteinfo_t lock_suites[] = {
+ {"nolocking", lock_suite_init, NULL,
+ lock_suite_no_locking}, /* must be first */
+ {"spinlock", lock_suite_init, NULL,
+ lock_suite_spinlock},
+ {"spinlock_recursive", lock_suite_init, NULL,
+ lock_suite_spinlock_recursive},
+ {"ticketlock", lock_suite_init, NULL,
+ lock_suite_ticketlock},
+ {"rwlock", lock_suite_init, NULL,
+ lock_suite_rwlock},
+ {"rwlock_recursive", lock_suite_init, NULL,
+ lock_suite_rwlock_recursive},
+ ODP_SUITE_INFO_NULL
+};
+
+int lock_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(lock_init);
+
+ ret = odp_cunit_register(lock_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/lock/lock.h b/test/common_plat/validation/api/lock/lock.h
new file mode 100644
index 000000000..5adc63352
--- /dev/null
+++ b/test/common_plat/validation/api/lock/lock.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_LOCK_H_
+#define _ODP_TEST_LOCK_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void lock_test_no_lock_functional(void);
+void lock_test_spinlock_api(void);
+void lock_test_spinlock_functional(void);
+void lock_test_spinlock_recursive_api(void);
+void lock_test_spinlock_recursive_functional(void);
+void lock_test_ticketlock_api(void);
+void lock_test_ticketlock_functional(void);
+void lock_test_rwlock_api(void);
+void lock_test_rwlock_functional(void);
+void lock_test_rwlock_recursive_api(void);
+void lock_test_rwlock_recursive_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t lock_suite_no_locking[];
+extern odp_testinfo_t lock_suite_spinlock[];
+extern odp_testinfo_t lock_suite_spinlock_recursive[];
+extern odp_testinfo_t lock_suite_ticketlock[];
+extern odp_testinfo_t lock_suite_rwlock[];
+extern odp_testinfo_t lock_suite_rwlock_recursive[];
+
+/* test array init/term functions: */
+int lock_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t lock_suites[];
+
+/* executable init/term functions: */
+int lock_init(odp_instance_t *inst);
+
+/* main test program: */
+int lock_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/lock/lock_main.c b/test/common_plat/validation/api/lock/lock_main.c
new file mode 100644
index 000000000..5a30f02b4
--- /dev/null
+++ b/test/common_plat/validation/api/lock/lock_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "lock.h"
+
+int main(int argc, char *argv[])
+{
+ return lock_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/packet/.gitignore b/test/common_plat/validation/api/packet/.gitignore
new file mode 100644
index 000000000..c05530d2d
--- /dev/null
+++ b/test/common_plat/validation/api/packet/.gitignore
@@ -0,0 +1 @@
+packet_main
diff --git a/test/common_plat/validation/api/packet/Makefile.am b/test/common_plat/validation/api/packet/Makefile.am
new file mode 100644
index 000000000..d8ebc1a23
--- /dev/null
+++ b/test/common_plat/validation/api/packet/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestpacket.la
+libtestpacket_la_SOURCES = packet.c
+
+test_PROGRAMS = packet_main$(EXEEXT)
+dist_packet_main_SOURCES = packet_main.c
+packet_main_LDADD = libtestpacket.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = packet.h
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c
new file mode 100644
index 000000000..a4426e22f
--- /dev/null
+++ b/test/common_plat/validation/api/packet/packet.c
@@ -0,0 +1,1369 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "packet.h"
+
+#define PACKET_BUF_LEN ODP_CONFIG_PACKET_SEG_LEN_MIN
+/* Reserve some tailroom for tests */
+#define PACKET_TAILROOM_RESERVE 4
+
+static odp_pool_t packet_pool, packet_pool_no_uarea, packet_pool_double_uarea;
+static uint32_t packet_len;
+
+static uint32_t segmented_packet_len;
+static odp_bool_t segmentation_supported = true;
+
+odp_packet_t test_packet, segmented_test_packet;
+
+static struct udata_struct {
+ uint64_t u64;
+ uint32_t u32;
+ char str[10];
+} test_packet_udata = {
+ 123456,
+ 789912,
+ "abcdefg",
+};
+
+int packet_suite_init(void)
+{
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+ struct udata_struct *udat;
+ uint32_t udat_size;
+ uint8_t data = 0;
+ uint32_t i;
+
+ if (odp_pool_capability(&capa) < 0)
+ return -1;
+
+ packet_len = capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE;
+
+ if (capa.pkt.max_len) {
+ segmented_packet_len = capa.pkt.max_len;
+ } else {
+ segmented_packet_len = capa.pkt.min_seg_len *
+ capa.pkt.max_segs_per_pkt;
+ }
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = capa.pkt.min_seg_len;
+ params.pkt.len = capa.pkt.min_seg_len;
+ params.pkt.num = 100;
+ params.pkt.uarea_size = sizeof(struct udata_struct);
+
+ packet_pool = odp_pool_create("packet_pool", &params);
+ if (packet_pool == ODP_POOL_INVALID)
+ return -1;
+
+ params.pkt.uarea_size = 0;
+ packet_pool_no_uarea = odp_pool_create("packet_pool_no_uarea",
+ &params);
+ if (packet_pool_no_uarea == ODP_POOL_INVALID) {
+ odp_pool_destroy(packet_pool);
+ return -1;
+ }
+
+ params.pkt.uarea_size = 2 * sizeof(struct udata_struct);
+ packet_pool_double_uarea = odp_pool_create("packet_pool_double_uarea",
+ &params);
+
+ if (packet_pool_double_uarea == ODP_POOL_INVALID) {
+ odp_pool_destroy(packet_pool_no_uarea);
+ odp_pool_destroy(packet_pool);
+ return -1;
+ }
+
+ test_packet = odp_packet_alloc(packet_pool, packet_len);
+
+ for (i = 0; i < packet_len; i++) {
+ odp_packet_copy_from_mem(test_packet, i, 1, &data);
+ data++;
+ }
+
+ /* Try to allocate the largest possible packet to see
+ * if segmentation is supported */
+ do {
+ segmented_test_packet = odp_packet_alloc(packet_pool,
+ segmented_packet_len);
+ if (segmented_test_packet == ODP_PACKET_INVALID)
+ segmented_packet_len -= capa.pkt.min_seg_len;
+ } while (segmented_test_packet == ODP_PACKET_INVALID);
+
+ if (odp_packet_is_valid(test_packet) == 0 ||
+ odp_packet_is_valid(segmented_test_packet) == 0)
+ return -1;
+
+ segmentation_supported = odp_packet_is_segmented(segmented_test_packet);
+
+ data = 0;
+ for (i = 0; i < segmented_packet_len; i++) {
+ odp_packet_copy_from_mem(segmented_test_packet, i, 1, &data);
+ data++;
+ }
+
+ udat = odp_packet_user_area(test_packet);
+ udat_size = odp_packet_user_area_size(test_packet);
+ if (!udat || udat_size != sizeof(struct udata_struct))
+ return -1;
+ odp_pool_print(packet_pool);
+ memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+
+ udat = odp_packet_user_area(segmented_test_packet);
+ udat_size = odp_packet_user_area_size(segmented_test_packet);
+ if (udat == NULL || udat_size != sizeof(struct udata_struct))
+ return -1;
+ memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+
+ return 0;
+}
+
+int packet_suite_term(void)
+{
+ odp_packet_free(test_packet);
+ odp_packet_free(segmented_test_packet);
+
+ if (odp_pool_destroy(packet_pool_double_uarea) != 0 ||
+ odp_pool_destroy(packet_pool_no_uarea) != 0 ||
+ odp_pool_destroy(packet_pool) != 0)
+ return -1;
+
+ return 0;
+}
+
+void packet_test_alloc_free(void)
+{
+ odp_pool_t pool;
+ odp_packet_t packet;
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = capa.pkt.min_seg_len;
+ params.pkt.len = capa.pkt.min_seg_len;
+ params.pkt.num = 1;
+
+ pool = odp_pool_create("packet_pool_alloc", &params);
+
+ /* Allocate the only buffer from the pool */
+ packet = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet) == packet_len);
+ CU_ASSERT(odp_event_type(odp_packet_to_event(packet)) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(odp_packet_to_u64(packet) !=
+ odp_packet_to_u64(ODP_PACKET_INVALID));
+
+ /* Pool should have only one packet */
+ CU_ASSERT_FATAL(odp_packet_alloc(pool, packet_len)
+ == ODP_PACKET_INVALID);
+
+ odp_packet_free(packet);
+
+ /* Check that the buffer was returned back to the pool */
+ packet = odp_packet_alloc(pool, packet_len);
+ CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet) == packet_len);
+
+ odp_packet_free(packet);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+/* Wrapper to call odp_packet_alloc_multi multiple times until
+ * either no mure buffers are returned, or num buffers were alloced */
+static int packet_alloc_multi(odp_pool_t pool, uint32_t pkt_len,
+ odp_packet_t pkt[], int num)
+{
+ int ret, total = 0;
+
+ do {
+ ret = odp_packet_alloc_multi(pool, pkt_len, pkt + total,
+ num - total);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret <= num - total);
+ total += ret;
+ } while (total < num && ret);
+
+ return total;
+}
+
+void packet_test_alloc_free_multi(void)
+{
+ const int num_pkt = 2;
+ odp_pool_t pool[2];
+ int i, ret;
+ odp_packet_t packet[2 * num_pkt + 1];
+ odp_packet_t inval_pkt[num_pkt];
+ odp_pool_param_t params;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = capa.pkt.min_seg_len;
+ params.pkt.len = capa.pkt.min_seg_len;
+ params.pkt.num = num_pkt;
+
+ pool[0] = odp_pool_create("packet_pool_alloc_multi_0", &params);
+ pool[1] = odp_pool_create("packet_pool_alloc_multi_1", &params);
+ CU_ASSERT_FATAL(pool[0] != ODP_POOL_INVALID);
+ CU_ASSERT_FATAL(pool[1] != ODP_POOL_INVALID);
+
+ /* Allocate all the packets from the pools */
+
+ ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt + 1);
+ CU_ASSERT_FATAL(ret == num_pkt);
+ ret = packet_alloc_multi(pool[1], packet_len,
+ &packet[num_pkt], num_pkt + 1);
+ CU_ASSERT_FATAL(ret == num_pkt);
+
+ for (i = 0; i < 2 * num_pkt; ++i) {
+ CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
+ CU_ASSERT(odp_event_type(odp_packet_to_event(packet[i])) ==
+ ODP_EVENT_PACKET);
+ CU_ASSERT(odp_packet_to_u64(packet[i]) !=
+ odp_packet_to_u64(ODP_PACKET_INVALID));
+ }
+
+ /* Pools should have no more packets */
+ ret = odp_packet_alloc_multi(pool[0], packet_len, inval_pkt, num_pkt);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_alloc_multi(pool[1], packet_len, inval_pkt, num_pkt);
+ CU_ASSERT(ret == 0);
+
+ /* Free all packets from all pools at once */
+ odp_packet_free_multi(packet, 2 * num_pkt);
+
+ /* Check that all the packets were returned back to their pools */
+ ret = packet_alloc_multi(pool[0], packet_len, &packet[0], num_pkt);
+ CU_ASSERT(ret);
+ ret = packet_alloc_multi(pool[1], packet_len,
+ &packet[num_pkt], num_pkt);
+ CU_ASSERT(ret);
+
+ for (i = 0; i < 2 * num_pkt; ++i) {
+ CU_ASSERT_FATAL(packet[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(packet[i]) == packet_len);
+ }
+ odp_packet_free_multi(packet, 2 * num_pkt);
+ CU_ASSERT(odp_pool_destroy(pool[0]) == 0);
+ CU_ASSERT(odp_pool_destroy(pool[1]) == 0);
+}
+
+void packet_test_alloc_segmented(void)
+{
+ odp_packet_t pkt;
+ uint32_t len;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len)
+ len = capa.pkt.max_len;
+ else
+ len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt;
+
+ pkt = odp_packet_alloc(packet_pool, len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ if (segmentation_supported)
+ CU_ASSERT(odp_packet_is_segmented(pkt) == 1);
+ odp_packet_free(pkt);
+}
+
+void packet_test_event_conversion(void)
+{
+ odp_packet_t pkt = test_packet;
+ odp_packet_t tmp_pkt;
+ odp_event_t ev;
+
+ ev = odp_packet_to_event(pkt);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+
+ tmp_pkt = odp_packet_from_event(ev);
+ CU_ASSERT_FATAL(tmp_pkt != ODP_PACKET_INVALID);
+ /** @todo: Need an API to compare packets */
+}
+
+void packet_test_basic_metadata(void)
+{
+ odp_packet_t pkt = test_packet;
+ odp_time_t ts;
+
+ CU_ASSERT_PTR_NOT_NULL(odp_packet_head(pkt));
+ CU_ASSERT_PTR_NOT_NULL(odp_packet_data(pkt));
+
+ CU_ASSERT(odp_packet_pool(pkt) != ODP_POOL_INVALID);
+ /* Packet was allocated by application so shouldn't have valid pktio. */
+ CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_packet_input_index(pkt) < 0);
+
+ odp_packet_flow_hash_set(pkt, UINT32_MAX);
+ CU_ASSERT(odp_packet_has_flow_hash(pkt));
+ CU_ASSERT(odp_packet_flow_hash(pkt) == UINT32_MAX);
+ odp_packet_has_flow_hash_clr(pkt);
+ CU_ASSERT(!odp_packet_has_flow_hash(pkt));
+
+ ts = odp_time_global();
+ odp_packet_ts_set(pkt, ts);
+ CU_ASSERT_FATAL(odp_packet_has_ts(pkt));
+ CU_ASSERT(!odp_time_cmp(ts, odp_packet_ts(pkt)));
+ odp_packet_has_ts_clr(pkt);
+ CU_ASSERT(!odp_packet_has_ts(pkt));
+}
+
+void packet_test_length(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t buf_len, headroom, tailroom;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ buf_len = odp_packet_buf_len(pkt);
+ headroom = odp_packet_headroom(pkt);
+ tailroom = odp_packet_tailroom(pkt);
+
+ CU_ASSERT(odp_packet_len(pkt) == packet_len);
+ CU_ASSERT(headroom >= capa.pkt.min_headroom);
+ CU_ASSERT(tailroom >= capa.pkt.min_tailroom);
+
+ CU_ASSERT(buf_len >= packet_len + headroom + tailroom);
+}
+
+void packet_test_prefetch(void)
+{
+ odp_packet_prefetch(test_packet, 0, odp_packet_len(test_packet));
+ CU_PASS();
+}
+
+void packet_test_debug(void)
+{
+ CU_ASSERT(odp_packet_is_valid(test_packet) == 1);
+ odp_packet_print(test_packet);
+}
+
+void packet_test_context(void)
+{
+ odp_packet_t pkt = test_packet;
+ char ptr_test_value = 2;
+ void *prev_ptr;
+ struct udata_struct *udat;
+
+ prev_ptr = odp_packet_user_ptr(pkt);
+ odp_packet_user_ptr_set(pkt, &ptr_test_value);
+ CU_ASSERT(odp_packet_user_ptr(pkt) == &ptr_test_value);
+ odp_packet_user_ptr_set(pkt, prev_ptr);
+
+ udat = odp_packet_user_area(pkt);
+ CU_ASSERT_PTR_NOT_NULL(udat);
+ CU_ASSERT(odp_packet_user_area_size(pkt) ==
+ sizeof(struct udata_struct));
+ CU_ASSERT(memcmp(udat, &test_packet_udata, sizeof(struct udata_struct))
+ == 0);
+
+ odp_packet_reset(pkt, packet_len);
+}
+
+void packet_test_layer_offsets(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint8_t *l2_addr, *l3_addr, *l4_addr;
+ uint32_t seg_len;
+ const uint32_t l2_off = 2;
+ const uint32_t l3_off = l2_off + 14;
+ const uint32_t l4_off = l3_off + 14;
+ int ret;
+
+ /* Set offsets to the same value */
+ ret = odp_packet_l2_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_l3_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+ ret = odp_packet_l4_offset_set(pkt, l2_off);
+ CU_ASSERT(ret == 0);
+
+ /* Addresses should be the same */
+ l2_addr = odp_packet_l2_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ l3_addr = odp_packet_l3_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ l4_addr = odp_packet_l4_ptr(pkt, &seg_len);
+ CU_ASSERT(seg_len != 0);
+ CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ CU_ASSERT(l2_addr == l3_addr);
+ CU_ASSERT(l2_addr == l4_addr);
+
+ /* Set offsets to the different values */
+ odp_packet_l2_offset_set(pkt, l2_off);
+ CU_ASSERT(odp_packet_l2_offset(pkt) == l2_off);
+ odp_packet_l3_offset_set(pkt, l3_off);
+ CU_ASSERT(odp_packet_l3_offset(pkt) == l3_off);
+ odp_packet_l4_offset_set(pkt, l4_off);
+ CU_ASSERT(odp_packet_l4_offset(pkt) == l4_off);
+
+ /* Addresses should not be the same */
+ l2_addr = odp_packet_l2_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l2_addr);
+ l3_addr = odp_packet_l3_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l3_addr);
+ l4_addr = odp_packet_l4_ptr(pkt, NULL);
+ CU_ASSERT_PTR_NOT_NULL(l4_addr);
+
+ CU_ASSERT(l2_addr != l3_addr);
+ CU_ASSERT(l2_addr != l4_addr);
+ CU_ASSERT(l3_addr != l4_addr);
+}
+
+static void _verify_headroom_shift(odp_packet_t *pkt,
+ int shift)
+{
+ uint32_t room = odp_packet_headroom(*pkt);
+ uint32_t seg_data_len = odp_packet_seg_len(*pkt);
+ uint32_t pkt_data_len = odp_packet_len(*pkt);
+ void *data;
+ char *data_orig = odp_packet_data(*pkt);
+ char *head_orig = odp_packet_head(*pkt);
+ uint32_t seg_len;
+ int extended, rc;
+
+ if (shift >= 0) {
+ if ((uint32_t)abs(shift) <= room) {
+ data = odp_packet_push_head(*pkt, shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_extend_head(pkt, shift,
+ &data, &seg_len);
+ extended = 1;
+ }
+ } else {
+ if ((uint32_t)abs(shift) <= seg_data_len) {
+ data = odp_packet_pull_head(*pkt, -shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_trunc_head(pkt, -shift,
+ &data, &seg_len);
+ extended = 1;
+ }
+ }
+
+ CU_ASSERT_PTR_NOT_NULL(data);
+ if (extended) {
+ CU_ASSERT(rc >= 0);
+ if (shift >= 0) {
+ CU_ASSERT(odp_packet_seg_len(*pkt) == shift - room);
+ } else {
+ CU_ASSERT(odp_packet_headroom(*pkt) >=
+ (uint32_t)abs(shift) - seg_data_len);
+ }
+ CU_ASSERT(odp_packet_head(*pkt) != head_orig);
+ } else {
+ CU_ASSERT(odp_packet_headroom(*pkt) == room - shift);
+ CU_ASSERT(odp_packet_seg_len(*pkt) == seg_data_len + shift);
+ CU_ASSERT(data == data_orig - shift);
+ CU_ASSERT(odp_packet_head(*pkt) == head_orig);
+ }
+
+ CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
+ CU_ASSERT(odp_packet_data(*pkt) == data);
+}
+
+void packet_test_headroom(void)
+{
+ odp_packet_t pkt = odp_packet_copy(test_packet,
+ odp_packet_pool(test_packet));
+ uint32_t room;
+ uint32_t seg_data_len;
+ uint32_t push_val, pull_val;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ room = odp_packet_headroom(pkt);
+
+ CU_ASSERT(room >= capa.pkt.min_headroom);
+
+ seg_data_len = odp_packet_seg_len(pkt);
+ CU_ASSERT(seg_data_len >= 1);
+ /** @todo: should be len - 1 */
+ pull_val = seg_data_len / 2;
+ push_val = room;
+
+ _verify_headroom_shift(&pkt, -pull_val);
+ _verify_headroom_shift(&pkt, push_val + pull_val);
+ _verify_headroom_shift(&pkt, -push_val);
+ _verify_headroom_shift(&pkt, 0);
+
+ if (segmentation_supported) {
+ push_val = room * 2;
+ _verify_headroom_shift(&pkt, push_val);
+ _verify_headroom_shift(&pkt, 0);
+ _verify_headroom_shift(&pkt, -push_val);
+ }
+
+ odp_packet_free(pkt);
+}
+
+static void _verify_tailroom_shift(odp_packet_t *pkt,
+ int shift)
+{
+ odp_packet_seg_t seg;
+ uint32_t room;
+ uint32_t seg_data_len, pkt_data_len, seg_len;
+ void *tail;
+ char *tail_orig;
+ int extended, rc;
+
+ room = odp_packet_tailroom(*pkt);
+ pkt_data_len = odp_packet_len(*pkt);
+ tail_orig = odp_packet_tail(*pkt);
+
+ seg = odp_packet_last_seg(*pkt);
+ CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
+ seg_data_len = odp_packet_seg_data_len(*pkt, seg);
+
+ if (shift >= 0) {
+ uint32_t l2_off, l3_off, l4_off;
+
+ l2_off = odp_packet_l2_offset(*pkt);
+ l3_off = odp_packet_l3_offset(*pkt);
+ l4_off = odp_packet_l4_offset(*pkt);
+
+ if ((uint32_t)abs(shift) <= room) {
+ tail = odp_packet_push_tail(*pkt, shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_extend_tail(pkt, shift,
+ &tail, &seg_len);
+ extended = 1;
+ }
+
+ CU_ASSERT(l2_off == odp_packet_l2_offset(*pkt));
+ CU_ASSERT(l3_off == odp_packet_l3_offset(*pkt));
+ CU_ASSERT(l4_off == odp_packet_l4_offset(*pkt));
+ } else {
+ if ((uint32_t)abs(shift) <= seg_data_len) {
+ tail = odp_packet_pull_tail(*pkt, -shift);
+ extended = 0;
+ } else {
+ rc = odp_packet_trunc_tail(pkt, -shift,
+ &tail, &seg_len);
+ extended = 1;
+ }
+ }
+
+ CU_ASSERT_PTR_NOT_NULL(tail);
+ if (extended) {
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(odp_packet_last_seg(*pkt) != seg);
+ seg = odp_packet_last_seg(*pkt);
+ if (shift > 0) {
+ CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
+ shift - room);
+ } else {
+ CU_ASSERT(odp_packet_tailroom(*pkt) >=
+ (uint32_t)abs(shift) - seg_data_len);
+ CU_ASSERT(seg_len == odp_packet_tailroom(*pkt));
+ }
+ } else {
+ CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
+ seg_data_len + shift);
+ CU_ASSERT(odp_packet_tailroom(*pkt) == room - shift);
+ if (room == 0 || (room - shift) == 0)
+ return;
+ if (shift >= 0) {
+ CU_ASSERT(odp_packet_tail(*pkt) ==
+ tail_orig + shift);
+ } else {
+ CU_ASSERT(tail == tail_orig + shift);
+ }
+ }
+
+ CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
+ if (shift >= 0) {
+ CU_ASSERT(tail == tail_orig);
+ } else {
+ CU_ASSERT(odp_packet_tail(*pkt) == tail);
+ }
+}
+
+void packet_test_tailroom(void)
+{
+ odp_packet_t pkt = odp_packet_copy(test_packet,
+ odp_packet_pool(test_packet));
+ odp_packet_seg_t segment;
+ uint32_t room;
+ uint32_t seg_data_len;
+ uint32_t push_val, pull_val;
+ odp_pool_capability_t capa;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ segment = odp_packet_last_seg(pkt);
+ CU_ASSERT(segment != ODP_PACKET_SEG_INVALID);
+ room = odp_packet_tailroom(pkt);
+ CU_ASSERT(room >= capa.pkt.min_tailroom);
+
+ seg_data_len = odp_packet_seg_data_len(pkt, segment);
+ CU_ASSERT(seg_data_len >= 1);
+ /** @todo: should be len - 1 */
+ pull_val = seg_data_len / 2;
+ /* Leave one byte in a tailroom for odp_packet_tail() to succeed */
+ push_val = (room > 0) ? room - 1 : room;
+
+ _verify_tailroom_shift(&pkt, -pull_val);
+ _verify_tailroom_shift(&pkt, push_val + pull_val);
+ _verify_tailroom_shift(&pkt, -push_val);
+ _verify_tailroom_shift(&pkt, 0);
+
+ if (segmentation_supported) {
+ _verify_tailroom_shift(&pkt, pull_val);
+ _verify_tailroom_shift(&pkt, 0);
+ _verify_tailroom_shift(&pkt, -pull_val);
+ }
+
+ odp_packet_free(pkt);
+}
+
+void packet_test_segments(void)
+{
+ int num_segs, seg_index;
+ uint32_t data_len;
+ odp_packet_seg_t seg;
+ odp_packet_t pkt = test_packet;
+ odp_packet_t seg_pkt = segmented_test_packet;
+
+ CU_ASSERT(odp_packet_is_valid(pkt) == 1);
+
+ num_segs = odp_packet_num_segs(pkt);
+ CU_ASSERT(num_segs != 0);
+
+ if (odp_packet_is_segmented(pkt)) {
+ CU_ASSERT(num_segs > 1);
+ } else {
+ CU_ASSERT(num_segs == 1);
+ }
+
+ CU_ASSERT(odp_packet_is_segmented(pkt) == 0);
+ if (segmentation_supported)
+ CU_ASSERT(odp_packet_is_segmented(seg_pkt) == 1);
+
+ seg = odp_packet_first_seg(pkt);
+ data_len = 0;
+ seg_index = 0;
+ while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
+ uint32_t seg_data_len;
+ void *seg_data;
+
+ seg_data_len = odp_packet_seg_data_len(pkt, seg);
+ seg_data = odp_packet_seg_data(pkt, seg);
+
+ CU_ASSERT(seg_data_len > 0);
+ CU_ASSERT_PTR_NOT_NULL(seg_data);
+ CU_ASSERT(odp_packet_seg_to_u64(seg) !=
+ odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
+
+ data_len += seg_data_len;
+
+ /** @todo: touch memory in a segment */
+ seg_index++;
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ CU_ASSERT(seg_index == num_segs);
+ CU_ASSERT(data_len <= odp_packet_buf_len(pkt));
+ CU_ASSERT(data_len == odp_packet_len(pkt));
+
+ if (seg_index == num_segs)
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+
+ seg = odp_packet_first_seg(seg_pkt);
+ num_segs = odp_packet_num_segs(seg_pkt);
+
+ data_len = 0;
+ seg_index = 0;
+
+ while (seg_index < num_segs && seg != ODP_PACKET_SEG_INVALID) {
+ uint32_t seg_data_len;
+ void *seg_data;
+
+ seg_data_len = odp_packet_seg_data_len(seg_pkt, seg);
+ seg_data = odp_packet_seg_data(seg_pkt, seg);
+
+ CU_ASSERT(seg_data_len > 0);
+ CU_ASSERT(seg_data != NULL);
+ CU_ASSERT(odp_packet_seg_to_u64(seg) !=
+ odp_packet_seg_to_u64(ODP_PACKET_SEG_INVALID));
+
+ data_len += seg_data_len;
+
+ /** @todo: touch memory in a segment */
+ seg_index++;
+ seg = odp_packet_next_seg(seg_pkt, seg);
+ }
+
+ CU_ASSERT(seg_index == num_segs);
+ CU_ASSERT(data_len <= odp_packet_buf_len(seg_pkt));
+ CU_ASSERT(data_len == odp_packet_len(seg_pkt));
+
+ if (seg_index == num_segs)
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+}
+
+void packet_test_segment_last(void)
+{
+ odp_packet_t pkt = test_packet;
+ odp_packet_seg_t seg;
+
+ seg = odp_packet_last_seg(pkt);
+ CU_ASSERT_FATAL(seg != ODP_PACKET_SEG_INVALID);
+
+ seg = odp_packet_next_seg(pkt, seg);
+ CU_ASSERT(seg == ODP_PACKET_SEG_INVALID);
+}
+
+#define TEST_INFLAG(packet, flag) \
+do { \
+ odp_packet_has_##flag##_set(packet, 0); \
+ CU_ASSERT(odp_packet_has_##flag(packet) == 0); \
+ odp_packet_has_##flag##_set(packet, 1); \
+ CU_ASSERT(odp_packet_has_##flag(packet) != 0); \
+} while (0)
+
+void packet_test_in_flags(void)
+{
+ odp_packet_t pkt = test_packet;
+
+ TEST_INFLAG(pkt, l2);
+ TEST_INFLAG(pkt, l3);
+ TEST_INFLAG(pkt, l4);
+ TEST_INFLAG(pkt, eth);
+ TEST_INFLAG(pkt, eth_bcast);
+ TEST_INFLAG(pkt, eth_mcast);
+ TEST_INFLAG(pkt, jumbo);
+ TEST_INFLAG(pkt, vlan);
+ TEST_INFLAG(pkt, vlan_qinq);
+ TEST_INFLAG(pkt, arp);
+ TEST_INFLAG(pkt, ipv4);
+ TEST_INFLAG(pkt, ipv6);
+ TEST_INFLAG(pkt, ip_bcast);
+ TEST_INFLAG(pkt, ip_mcast);
+ TEST_INFLAG(pkt, ipfrag);
+ TEST_INFLAG(pkt, ipopt);
+ TEST_INFLAG(pkt, ipsec);
+ TEST_INFLAG(pkt, udp);
+ TEST_INFLAG(pkt, tcp);
+ TEST_INFLAG(pkt, sctp);
+ TEST_INFLAG(pkt, icmp);
+}
+
+void packet_test_error_flags(void)
+{
+ odp_packet_t pkt = test_packet;
+ int err;
+
+ /**
+ * The packet have not been classified so it doesn't have error flags
+ * properly set. Just check that functions return one of allowed values.
+ * @todo: try with known good and bad packets.
+ */
+ err = odp_packet_has_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l2_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l3_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+
+ err = odp_packet_has_l4_error(pkt);
+ CU_ASSERT(err == 0 || err == 1);
+}
+
+struct packet_metadata {
+ uint32_t l2_off;
+ uint32_t l3_off;
+ uint32_t l4_off;
+ void *usr_ptr;
+ uint64_t usr_u64;
+};
+
+void packet_test_add_rem_data(void)
+{
+ odp_packet_t pkt, new_pkt;
+ uint32_t pkt_len, offset, add_len;
+ void *usr_ptr;
+ struct udata_struct *udat, *new_udat;
+ int ret;
+ odp_pool_capability_t capa;
+ uint32_t min_seg_len;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ min_seg_len = capa.pkt.min_seg_len;
+
+ pkt = odp_packet_alloc(packet_pool, packet_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_len = odp_packet_len(pkt);
+ usr_ptr = odp_packet_user_ptr(pkt);
+ udat = odp_packet_user_area(pkt);
+ CU_ASSERT(odp_packet_user_area_size(pkt) ==
+ sizeof(struct udata_struct));
+ memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+
+ offset = pkt_len / 2;
+
+ if (segmentation_supported) {
+ /* Insert one more packet length in the middle of a packet */
+ add_len = min_seg_len;
+ } else {
+ /* Add diff between largest and smaller packets
+ * which is at least tailroom */
+ add_len = segmented_packet_len - packet_len;
+ }
+
+ new_pkt = pkt;
+ ret = odp_packet_add_data(&new_pkt, offset, add_len);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ goto free_packet;
+ CU_ASSERT(odp_packet_len(new_pkt) == pkt_len + add_len);
+ /* Verify that user metadata is preserved */
+ CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
+
+ /* Verify that user metadata has been preserved */
+ new_udat = odp_packet_user_area(new_pkt);
+ CU_ASSERT_PTR_NOT_NULL(new_udat);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
+ sizeof(struct udata_struct));
+ CU_ASSERT(memcmp(new_udat, &test_packet_udata,
+ sizeof(struct udata_struct)) == 0);
+
+ pkt = new_pkt;
+
+ pkt_len = odp_packet_len(pkt);
+ usr_ptr = odp_packet_user_ptr(pkt);
+
+ ret = odp_packet_rem_data(&new_pkt, offset, add_len);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ goto free_packet;
+ CU_ASSERT(odp_packet_len(new_pkt) == pkt_len - add_len);
+ CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
+
+ /* Verify that user metadata has been preserved */
+ new_udat = odp_packet_user_area(new_pkt);
+ CU_ASSERT_PTR_NOT_NULL(new_udat);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
+ sizeof(struct udata_struct));
+ CU_ASSERT(memcmp(new_udat, &test_packet_udata,
+ sizeof(struct udata_struct)) == 0);
+
+ pkt = new_pkt;
+
+free_packet:
+ odp_packet_free(pkt);
+}
+
+#define COMPARE_HAS_INFLAG(p1, p2, flag) \
+ CU_ASSERT(odp_packet_has_##flag(p1) == odp_packet_has_##flag(p2))
+
+#define COMPARE_INFLAG(p1, p2, flag) \
+ CU_ASSERT(odp_packet_##flag(p1) == odp_packet_##flag(p2))
+
+static void _packet_compare_inflags(odp_packet_t pkt1, odp_packet_t pkt2)
+{
+ COMPARE_HAS_INFLAG(pkt1, pkt2, l2);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, l3);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, l4);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, eth);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, eth_bcast);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, eth_mcast);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, jumbo);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, vlan);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, vlan_qinq);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, arp);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ipv4);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ipv6);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ip_bcast);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ip_mcast);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ipfrag);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ipopt);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ipsec);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, udp);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, tcp);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, sctp);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, icmp);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, flow_hash);
+ COMPARE_HAS_INFLAG(pkt1, pkt2, ts);
+
+ COMPARE_INFLAG(pkt1, pkt2, color);
+ COMPARE_INFLAG(pkt1, pkt2, drop_eligible);
+ COMPARE_INFLAG(pkt1, pkt2, shaper_len_adjust);
+}
+
+static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2)
+{
+ uint32_t len = odp_packet_len(pkt1);
+ uint32_t offset = 0;
+ uint32_t seglen1, seglen2, cmplen;
+
+ CU_ASSERT_FATAL(len == odp_packet_len(pkt2));
+
+ while (len > 0) {
+ void *pkt1map = odp_packet_offset(pkt1, offset, &seglen1, NULL);
+ void *pkt2map = odp_packet_offset(pkt2, offset, &seglen2, NULL);
+
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
+ CU_ASSERT(!memcmp(pkt1map, pkt2map, cmplen));
+
+ offset += cmplen;
+ len -= cmplen;
+ }
+}
+
+static void _packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
+{
+ uint32_t usize1 = odp_packet_user_area_size(pkt1);
+ uint32_t usize2 = odp_packet_user_area_size(pkt2);
+
+ void *uaddr1 = odp_packet_user_area(pkt1);
+ void *uaddr2 = odp_packet_user_area(pkt2);
+
+ uint32_t cmplen = usize1 <= usize2 ? usize1 : usize2;
+
+ if (cmplen)
+ CU_ASSERT(!memcmp(uaddr1, uaddr2, cmplen));
+}
+
+static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
+ odp_packet_t pkt2, uint32_t off2,
+ uint32_t len)
+{
+ uint32_t seglen1, seglen2, cmplen;
+
+ if (off1 + len > odp_packet_len(pkt1) ||
+ off2 + len > odp_packet_len(pkt2))
+ return;
+
+ while (len > 0) {
+ void *pkt1map = odp_packet_offset(pkt1, off1, &seglen1, NULL);
+ void *pkt2map = odp_packet_offset(pkt2, off2, &seglen2, NULL);
+
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
+ cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
+ if (len < cmplen)
+ cmplen = len;
+ CU_ASSERT(!memcmp(pkt1map, pkt2map, cmplen));
+
+ off1 += cmplen;
+ off2 += cmplen;
+ len -= cmplen;
+ }
+}
+
+void packet_test_copy(void)
+{
+ odp_packet_t pkt;
+ odp_packet_t pkt_copy, pkt_part;
+ odp_pool_t pool;
+ uint32_t i, plen, seg_len, src_offset, dst_offset;
+ void *pkt_data;
+
+ pkt = odp_packet_copy(test_packet, packet_pool_no_uarea);
+ CU_ASSERT(pkt == ODP_PACKET_INVALID);
+ if (pkt != ODP_PACKET_INVALID)
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ _packet_compare_data(pkt, test_packet);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt_copy = odp_packet_copy(pkt, pool);
+ CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
+
+ CU_ASSERT(pkt != pkt_copy);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
+
+ _packet_compare_inflags(pkt, pkt_copy);
+ _packet_compare_data(pkt, pkt_copy);
+ CU_ASSERT(odp_packet_user_area_size(pkt) ==
+ odp_packet_user_area_size(test_packet));
+ _packet_compare_udata(pkt, pkt_copy);
+ odp_packet_free(pkt_copy);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_copy(test_packet, packet_pool_double_uarea);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ _packet_compare_data(pkt, test_packet);
+ pool = odp_packet_pool(pkt);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ pkt_copy = odp_packet_copy(pkt, pool);
+ CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
+
+ CU_ASSERT(pkt != pkt_copy);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
+
+ _packet_compare_inflags(pkt, pkt_copy);
+ _packet_compare_data(pkt, pkt_copy);
+ CU_ASSERT(odp_packet_user_area_size(pkt) ==
+ 2 * odp_packet_user_area_size(test_packet));
+ _packet_compare_udata(pkt, pkt_copy);
+ _packet_compare_udata(pkt, test_packet);
+ odp_packet_free(pkt_copy);
+
+ /* Now test copy_part */
+ pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt) + 1, pool);
+ CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
+ pkt_part = odp_packet_copy_part(pkt, odp_packet_len(pkt), 1, pool);
+ CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
+
+ pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt), pool);
+ CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != pkt_part);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_part));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_part));
+
+ _packet_compare_data(pkt, pkt_part);
+ odp_packet_free(pkt_part);
+
+ plen = odp_packet_len(pkt);
+ for (i = 0; i < plen / 2; i += 5) {
+ pkt_part = odp_packet_copy_part(pkt, i, plen / 4, pool);
+ CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt_part) == plen / 4);
+ _packet_compare_offset(pkt_part, 0, pkt, i, plen / 4);
+ odp_packet_free(pkt_part);
+ }
+
+ /* Test copy and move apis */
+ CU_ASSERT(odp_packet_copy_data(pkt, 0, plen - plen / 8, plen / 8) == 0);
+ _packet_compare_offset(pkt, 0, pkt, plen - plen / 8, plen / 8);
+ _packet_compare_offset(pkt, 0, test_packet, plen - plen / 8, plen / 8);
+
+ /* Test segment crossing if we support segments */
+ pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
+ CU_ASSERT(pkt_data != NULL);
+
+ if (seg_len < plen) {
+ src_offset = seg_len - 15;
+ dst_offset = seg_len - 5;
+ } else {
+ src_offset = seg_len - 40;
+ dst_offset = seg_len - 25;
+ }
+
+ pkt_part = odp_packet_copy_part(pkt, src_offset, 20, pool);
+ CU_ASSERT(odp_packet_move_data(pkt, dst_offset, src_offset, 20) == 0);
+ _packet_compare_offset(pkt, dst_offset, pkt_part, 0, 20);
+
+ odp_packet_free(pkt_part);
+ odp_packet_free(pkt);
+}
+
+void packet_test_copydata(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t pkt_len = odp_packet_len(pkt);
+ uint8_t *data_buf;
+ uint32_t i;
+ int correct_memory;
+
+ CU_ASSERT_FATAL(pkt_len > 0);
+
+ data_buf = malloc(pkt_len);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(data_buf);
+
+ for (i = 0; i < pkt_len; i++)
+ data_buf[i] = (uint8_t)i;
+
+ CU_ASSERT(!odp_packet_copy_from_mem(pkt, 0, pkt_len, data_buf));
+ memset(data_buf, 0, pkt_len);
+ CU_ASSERT(!odp_packet_copy_to_mem(pkt, 0, pkt_len, data_buf));
+
+ correct_memory = 1;
+ for (i = 0; i < pkt_len; i++)
+ if (data_buf[i] != (uint8_t)i) {
+ correct_memory = 0;
+ break;
+ }
+ CU_ASSERT(correct_memory);
+
+ free(data_buf);
+
+ pkt = odp_packet_alloc(odp_packet_pool(test_packet), pkt_len / 2);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, test_packet, 0,
+ pkt_len) < 0);
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, pkt_len, test_packet, 0,
+ 1) < 0);
+
+ for (i = 0; i < pkt_len / 2; i++) {
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, i, test_packet, i,
+ 1) == 0);
+ }
+
+ _packet_compare_offset(pkt, 0, test_packet, 0, pkt_len / 2);
+ odp_packet_free(pkt);
+
+ pkt = odp_packet_alloc(odp_packet_pool(segmented_test_packet),
+ odp_packet_len(segmented_test_packet) / 2);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, segmented_test_packet,
+ odp_packet_len(pkt) / 4,
+ odp_packet_len(pkt)) == 0);
+ _packet_compare_offset(pkt, 0, segmented_test_packet,
+ odp_packet_len(pkt) / 4,
+ odp_packet_len(pkt));
+ odp_packet_free(pkt);
+}
+
+void packet_test_concatsplit(void)
+{
+ odp_packet_t pkt, pkt2;
+ uint32_t pkt_len;
+ odp_packet_t splits[4];
+
+ pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+ pkt_len = odp_packet_len(test_packet);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ CU_ASSERT(odp_packet_concat(&pkt, pkt) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len * 2);
+ _packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
+
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len, &pkt2) == 0);
+ CU_ASSERT(pkt != pkt2);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt2));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt2));
+ _packet_compare_data(pkt, pkt2);
+ _packet_compare_data(pkt, test_packet);
+
+ odp_packet_free(pkt);
+ odp_packet_free(pkt2);
+
+ pkt = odp_packet_copy(segmented_test_packet,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pkt_len = odp_packet_len(pkt);
+
+ _packet_compare_data(pkt, segmented_test_packet);
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
+ CU_ASSERT(pkt != splits[0]);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(splits[0]));
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len / 2);
+ CU_ASSERT(odp_packet_len(pkt) + odp_packet_len(splits[0]) == pkt_len);
+
+ _packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ _packet_compare_offset(splits[0], 0, segmented_test_packet,
+ pkt_len / 2, odp_packet_len(splits[0]));
+
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) == 0);
+ _packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ _packet_compare_offset(pkt, pkt_len / 2, segmented_test_packet,
+ pkt_len / 2, pkt_len / 2);
+ _packet_compare_offset(pkt, 0, segmented_test_packet, 0,
+ pkt_len);
+
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
+ _packet_compare_data(pkt, segmented_test_packet);
+
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 4, &splits[1]) == 0);
+ CU_ASSERT(odp_packet_split(&pkt, pkt_len / 8, &splits[2]) == 0);
+
+ CU_ASSERT(odp_packet_len(splits[0]) + odp_packet_len(splits[1]) +
+ odp_packet_len(splits[2]) + odp_packet_len(pkt) == pkt_len);
+
+ CU_ASSERT(odp_packet_concat(&pkt, splits[2]) == 0);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[1]) == 0);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) == 0);
+
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
+ _packet_compare_data(pkt, segmented_test_packet);
+
+ odp_packet_free(pkt);
+}
+
+void packet_test_align(void)
+{
+ odp_packet_t pkt;
+ uint32_t pkt_len, seg_len, offset, aligned_seglen;
+ void *pkt_data, *aligned_data;
+ const uint32_t max_align = 32;
+
+ pkt = odp_packet_copy_part(segmented_test_packet, 0,
+ odp_packet_len(segmented_test_packet) / 2,
+ odp_packet_pool(segmented_test_packet));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ pkt_len = odp_packet_len(pkt);
+ seg_len = odp_packet_seg_len(pkt);
+
+ if (odp_packet_is_segmented(pkt)) {
+ /* Can't address across segment boundaries */
+ CU_ASSERT(odp_packet_align(&pkt, 0, pkt_len, 0) < 0);
+
+ offset = seg_len - 5;
+ (void)odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ /* Realign for addressability */
+ CU_ASSERT(odp_packet_align(&pkt, offset,
+ seg_len + 2, 0) >= 0);
+
+ /* Alignment doesn't change packet length or contents */
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ (void)odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
+ _packet_compare_offset(pkt, offset,
+ segmented_test_packet, offset,
+ aligned_seglen);
+
+ /* Verify requested contiguous addressabilty */
+ CU_ASSERT(aligned_seglen >= seg_len + 2);
+ }
+
+ /* Get a misaligned address */
+ pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
+ offset = seg_len - 5;
+ pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ if ((uintptr_t)pkt_data % max_align == 0) {
+ offset--;
+ pkt_data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ }
+
+ /* Realign for alignment */
+ CU_ASSERT(odp_packet_align(&pkt, offset, 1, max_align) >= 0);
+ aligned_data = odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
+
+ CU_ASSERT(odp_packet_len(pkt) == pkt_len);
+ _packet_compare_offset(pkt, offset, segmented_test_packet, offset,
+ aligned_seglen);
+ CU_ASSERT((uintptr_t)aligned_data % max_align == 0);
+
+ odp_packet_free(pkt);
+}
+
+void packet_test_offset(void)
+{
+ odp_packet_t pkt = test_packet;
+ uint32_t seg_len, full_seg_len;
+ odp_packet_seg_t seg;
+ uint8_t *ptr, *start_ptr;
+ uint32_t offset;
+
+ ptr = odp_packet_offset(pkt, 0, &seg_len, &seg);
+ CU_ASSERT(seg != ODP_PACKET_SEG_INVALID);
+ CU_ASSERT(seg_len > 1);
+ CU_ASSERT(seg_len == odp_packet_seg_len(pkt));
+ CU_ASSERT(seg_len == odp_packet_seg_data_len(pkt, seg));
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == odp_packet_data(pkt));
+ CU_ASSERT(ptr == odp_packet_seg_data(pkt, seg));
+
+ /* Query a second byte */
+ start_ptr = ptr;
+ full_seg_len = seg_len;
+ offset = 1;
+
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == start_ptr + offset);
+ CU_ASSERT(seg_len == full_seg_len - offset);
+
+ /* Query the last byte in a segment */
+ offset = full_seg_len - 1;
+
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(ptr == start_ptr + offset);
+ CU_ASSERT(seg_len == full_seg_len - offset);
+
+ /* Query the last byte in a packet */
+ offset = odp_packet_len(pkt) - 1;
+ ptr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+ CU_ASSERT(seg_len == 1);
+
+ /* Pass NULL to [out] arguments */
+ ptr = odp_packet_offset(pkt, 0, NULL, NULL);
+ CU_ASSERT_PTR_NOT_NULL(ptr);
+}
+
+odp_testinfo_t packet_suite[] = {
+ ODP_TEST_INFO(packet_test_alloc_free),
+ ODP_TEST_INFO(packet_test_alloc_free_multi),
+ ODP_TEST_INFO(packet_test_alloc_segmented),
+ ODP_TEST_INFO(packet_test_basic_metadata),
+ ODP_TEST_INFO(packet_test_debug),
+ ODP_TEST_INFO(packet_test_segments),
+ ODP_TEST_INFO(packet_test_length),
+ ODP_TEST_INFO(packet_test_prefetch),
+ ODP_TEST_INFO(packet_test_headroom),
+ ODP_TEST_INFO(packet_test_tailroom),
+ ODP_TEST_INFO(packet_test_context),
+ ODP_TEST_INFO(packet_test_event_conversion),
+ ODP_TEST_INFO(packet_test_layer_offsets),
+ ODP_TEST_INFO(packet_test_segment_last),
+ ODP_TEST_INFO(packet_test_in_flags),
+ ODP_TEST_INFO(packet_test_error_flags),
+ ODP_TEST_INFO(packet_test_add_rem_data),
+ ODP_TEST_INFO(packet_test_copy),
+ ODP_TEST_INFO(packet_test_copydata),
+ ODP_TEST_INFO(packet_test_concatsplit),
+ ODP_TEST_INFO(packet_test_align),
+ ODP_TEST_INFO(packet_test_offset),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t packet_suites[] = {
+ { .pName = "packet tests",
+ .pTests = packet_suite,
+ .pInitFunc = packet_suite_init,
+ .pCleanupFunc = packet_suite_term,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int packet_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(packet_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/packet/packet.h b/test/common_plat/validation/api/packet/packet.h
new file mode 100644
index 000000000..10a377cf0
--- /dev/null
+++ b/test/common_plat/validation/api/packet/packet.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_PACKET_H_
+#define _ODP_TEST_PACKET_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void packet_test_alloc_free(void);
+void packet_test_alloc_free_multi(void);
+void packet_test_alloc_segmented(void);
+void packet_test_event_conversion(void);
+void packet_test_basic_metadata(void);
+void packet_test_length(void);
+void packet_test_prefetch(void);
+void packet_test_debug(void);
+void packet_test_context(void);
+void packet_test_layer_offsets(void);
+void packet_test_headroom(void);
+void packet_test_tailroom(void);
+void packet_test_segments(void);
+void packet_test_segment_last(void);
+void packet_test_in_flags(void);
+void packet_test_error_flags(void);
+void packet_test_add_rem_data(void);
+void packet_test_copy(void);
+void packet_test_copydata(void);
+void packet_test_concatsplit(void);
+void packet_test_align(void);
+void packet_test_offset(void);
+
+/* test arrays: */
+extern odp_testinfo_t packet_suite[];
+
+/* test array init/term functions: */
+int packet_suite_init(void);
+int packet_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t packet_suites[];
+
+/* main test program: */
+int packet_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/packet/packet_main.c b/test/common_plat/validation/api/packet/packet_main.c
new file mode 100644
index 000000000..511bb104b
--- /dev/null
+++ b/test/common_plat/validation/api/packet/packet_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "packet.h"
+
+int main(int argc, char *argv[])
+{
+ return packet_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/pktio/.gitignore b/test/common_plat/validation/api/pktio/.gitignore
new file mode 100644
index 000000000..1a5dd46e4
--- /dev/null
+++ b/test/common_plat/validation/api/pktio/.gitignore
@@ -0,0 +1 @@
+pktio_main
diff --git a/test/common_plat/validation/api/pktio/Makefile.am b/test/common_plat/validation/api/pktio/Makefile.am
new file mode 100644
index 000000000..466d690dc
--- /dev/null
+++ b/test/common_plat/validation/api/pktio/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestpktio.la
+libtestpktio_la_SOURCES = pktio.c
+
+test_PROGRAMS = pktio_main$(EXEEXT)
+dist_pktio_main_SOURCES = pktio_main.c
+pktio_main_LDADD = libtestpktio.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = pktio.h
diff --git a/test/common_plat/validation/api/pktio/pktio.c b/test/common_plat/validation/api/pktio/pktio.c
new file mode 100644
index 000000000..a6a18c352
--- /dev/null
+++ b/test/common_plat/validation/api/pktio/pktio.c
@@ -0,0 +1,2170 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+#include <odp/helper/udp.h>
+
+#include <stdlib.h>
+#include "pktio.h"
+
+#define PKT_BUF_NUM 32
+#define PKT_BUF_SIZE (9 * 1024)
+#define PKT_LEN_NORMAL 64
+#define PKT_LEN_MAX (PKT_BUF_SIZE - ODPH_ETHHDR_LEN - \
+ ODPH_IPV4HDR_LEN - ODPH_UDPHDR_LEN)
+
+#define USE_MTU 0
+#define MAX_NUM_IFACES 2
+#define TEST_SEQ_INVALID ((uint32_t)~0)
+#define TEST_SEQ_MAGIC 0x92749451
+#define TX_BATCH_LEN 4
+#define MAX_QUEUES 128
+
+#define PKTIN_TS_INTERVAL (50 * ODP_TIME_MSEC_IN_NS)
+#define PKTIN_TS_MIN_RES 1000
+#define PKTIN_TS_MAX_RES 10000000000
+#define PKTIN_TS_CMP_RES 1
+
+#undef DEBUG_STATS
+
+/** interface names used for testing */
+static const char *iface_name[MAX_NUM_IFACES];
+
+/** number of interfaces being used (1=loopback, 2=pair) */
+static int num_ifaces;
+
+/** while testing real-world interfaces additional time may be
+ needed for external network to enable link to pktio
+ interface that just become up.*/
+static bool wait_for_network;
+
+/** local container for pktio attributes */
+typedef struct {
+ const char *name;
+ odp_pktio_t id;
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue_out;
+ odp_queue_t inq;
+ odp_pktin_mode_t in_mode;
+} pktio_info_t;
+
+/** magic number and sequence at start of UDP payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+ odp_u32be_t seq;
+} pkt_head_t;
+
+/** magic number at end of UDP payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+} pkt_tail_t;
+
+/** Run mode */
+typedef enum {
+ PKT_POOL_UNSEGMENTED,
+ PKT_POOL_SEGMENTED,
+} pkt_segmented_e;
+
+typedef enum {
+ TXRX_MODE_SINGLE,
+ TXRX_MODE_MULTI,
+ TXRX_MODE_MULTI_EVENT
+} txrx_mode_e;
+
+typedef enum {
+ RECV_TMO,
+ RECV_MQ_TMO,
+ RECV_MQ_TMO_NO_IDX,
+} recv_tmo_mode_e;
+
+/** size of transmitted packets */
+static uint32_t packet_len = PKT_LEN_NORMAL;
+
+/** default packet pool */
+odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
+
+/** sequence number of IP packets */
+odp_atomic_u32_t ip_seq;
+
+/** Type of pool segmentation */
+pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
+
+odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+
+static inline void _pktio_wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret < 0 || ret == 1)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ if (ret != -1) {
+ /* assert only if link state supported and
+ * it's down. */
+ CU_ASSERT_FATAL(ret == 1);
+ }
+}
+
+static void set_pool_len(odp_pool_param_t *params)
+{
+ switch (pool_segmentation) {
+ case PKT_POOL_SEGMENTED:
+ /* Force segment to minimum size */
+ params->pkt.seg_len = 0;
+ params->pkt.len = PKT_BUF_SIZE;
+ break;
+ case PKT_POOL_UNSEGMENTED:
+ default:
+ params->pkt.seg_len = PKT_BUF_SIZE;
+ params->pkt.len = PKT_BUF_SIZE;
+ break;
+ }
+}
+
+static void pktio_pkt_set_macs(odp_packet_t pkt,
+ odp_pktio_t src, odp_pktio_t dst)
+{
+ uint32_t len;
+ odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
+ int ret;
+
+ ret = odp_pktio_mac_addr(src, &eth->src, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+}
+
+static uint32_t pktio_pkt_set_seq(odp_packet_t pkt)
+{
+ static uint32_t tstseq;
+ size_t off;
+ pkt_head_t head;
+ pkt_tail_t tail;
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ CU_FAIL("packet L4 offset not set");
+ return TEST_SEQ_INVALID;
+ }
+
+ head.magic = TEST_SEQ_MAGIC;
+ head.seq = tstseq;
+
+ off += ODPH_UDPHDR_LEN;
+ if (odp_packet_copy_from_mem(pkt, off, sizeof(head), &head) != 0)
+ return TEST_SEQ_INVALID;
+
+ tail.magic = TEST_SEQ_MAGIC;
+ off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
+ if (odp_packet_copy_from_mem(pkt, off, sizeof(tail), &tail) != 0)
+ return TEST_SEQ_INVALID;
+
+ tstseq++;
+
+ return head.seq;
+}
+
+static uint32_t pktio_pkt_seq(odp_packet_t pkt)
+{
+ size_t off;
+ uint32_t seq = TEST_SEQ_INVALID;
+ pkt_head_t head;
+ pkt_tail_t tail;
+
+ if (pkt == ODP_PACKET_INVALID) {
+ fprintf(stderr, "error: pkt invalid\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ fprintf(stderr, "error: offset invalid\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ off += ODPH_UDPHDR_LEN;
+ if (odp_packet_copy_to_mem(pkt, off, sizeof(head), &head) != 0) {
+ fprintf(stderr, "error: header copy failed\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ if (head.magic != TEST_SEQ_MAGIC) {
+ fprintf(stderr, "error: header magic invalid %u\n", head.magic);
+ return TEST_SEQ_INVALID;
+ }
+
+ if (odp_packet_len(pkt) == packet_len) {
+ off = packet_len - sizeof(tail);
+ if (odp_packet_copy_to_mem(pkt, off, sizeof(tail),
+ &tail) != 0) {
+ fprintf(stderr, "error: header copy failed\n");
+ return TEST_SEQ_INVALID;
+ }
+
+ if (tail.magic == TEST_SEQ_MAGIC) {
+ seq = head.seq;
+ CU_ASSERT(seq != TEST_SEQ_INVALID);
+ } else {
+ fprintf(stderr, "error: tail magic invalid %u\n",
+ tail.magic);
+ }
+ } else {
+ fprintf(stderr, "error: packet length invalid: %u (%u)\n",
+ odp_packet_len(pkt), packet_len);
+ }
+
+ return seq;
+}
+
+static uint32_t pktio_init_packet(odp_packet_t pkt)
+{
+ odph_ethhdr_t *eth;
+ odph_ipv4hdr_t *ip;
+ odph_udphdr_t *udp;
+ char *buf;
+ uint16_t seq;
+ uint8_t mac[ODP_PKTIO_MACADDR_MAXSIZE] = {0};
+ int pkt_len = odp_packet_len(pkt);
+
+ buf = odp_packet_data(pkt);
+
+ /* Ethernet */
+ odp_packet_l2_offset_set(pkt, 0);
+ eth = (odph_ethhdr_t *)buf;
+ memcpy(eth->src.addr, mac, ODPH_ETHADDR_LEN);
+ memcpy(eth->dst.addr, mac, ODPH_ETHADDR_LEN);
+ eth->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ /* IP */
+ odp_packet_l3_offset_set(pkt, ODPH_ETHHDR_LEN);
+ ip = (odph_ipv4hdr_t *)(buf + ODPH_ETHHDR_LEN);
+ ip->dst_addr = odp_cpu_to_be_32(0x0a000064);
+ ip->src_addr = odp_cpu_to_be_32(0x0a000001);
+ ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
+ ip->tot_len = odp_cpu_to_be_16(pkt_len - ODPH_ETHHDR_LEN);
+ ip->ttl = 128;
+ ip->proto = ODPH_IPPROTO_UDP;
+ seq = odp_atomic_fetch_inc_u32(&ip_seq);
+ ip->id = odp_cpu_to_be_16(seq);
+ ip->chksum = 0;
+ odph_ipv4_csum_update(pkt);
+
+ /* UDP */
+ odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ udp = (odph_udphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ udp->src_port = odp_cpu_to_be_16(12049);
+ udp->dst_port = odp_cpu_to_be_16(12050);
+ udp->length = odp_cpu_to_be_16(pkt_len -
+ ODPH_ETHHDR_LEN - ODPH_IPV4HDR_LEN);
+ udp->chksum = 0;
+
+ return pktio_pkt_set_seq(pkt);
+}
+
+static int pktio_fixup_checksums(odp_packet_t pkt)
+{
+ odph_ipv4hdr_t *ip;
+ odph_udphdr_t *udp;
+ uint32_t len;
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, &len);
+
+ if (ip->proto != ODPH_IPPROTO_UDP) {
+ CU_FAIL("unexpected L4 protocol");
+ return -1;
+ }
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, &len);
+
+ ip->chksum = 0;
+ odph_ipv4_csum_update(pkt);
+ udp->chksum = 0;
+ udp->chksum = odph_ipv4_udp_chksum(pkt);
+
+ return 0;
+}
+
+static int default_pool_create(void)
+{
+ odp_pool_param_t params;
+ char pool_name[ODP_POOL_NAME_LEN];
+
+ if (default_pkt_pool != ODP_POOL_INVALID)
+ return -1;
+
+ memset(&params, 0, sizeof(params));
+ set_pool_len(&params);
+ params.pkt.num = PKT_BUF_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ snprintf(pool_name, sizeof(pool_name),
+ "pkt_pool_default_%d", pool_segmentation);
+ default_pkt_pool = odp_pool_create(pool_name, &params);
+ if (default_pkt_pool == ODP_POOL_INVALID)
+ return -1;
+
+ return 0;
+}
+
+static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ const char *iface = iface_name[iface_idx];
+
+ odp_pktio_param_init(&pktio_param);
+
+ pktio_param.in_mode = imode;
+ pktio_param.out_mode = omode;
+
+ pktio = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_pktio_to_u64(pktio) !=
+ odp_pktio_to_u64(ODP_PKTIO_INVALID));
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ /* Atomic queue when in scheduled mode */
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+
+ /* By default, single input and output queue in all modes. Config can
+ * be overridden before starting the interface. */
+ CU_ASSERT(odp_pktin_queue_config(pktio, &pktin_param) == 0);
+ CU_ASSERT(odp_pktout_queue_config(pktio, NULL) == 0);
+
+ if (wait_for_network)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS / 4);
+
+ return pktio;
+}
+
+static int flush_input_queue(odp_pktio_t pktio, odp_pktin_mode_t imode)
+{
+ odp_event_t ev;
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+
+ if (imode == ODP_PKTIN_MODE_QUEUE) {
+ /* Assert breaks else-if without brackets */
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio, &queue, 1) == 1);
+ } else if (imode == ODP_PKTIN_MODE_DIRECT) {
+ return 0;
+ }
+
+ /* flush any pending events */
+ while (1) {
+ if (queue != ODP_QUEUE_INVALID)
+ ev = odp_queue_deq(queue);
+ else
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ break;
+ }
+
+ return 0;
+}
+
+static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
+ odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+
+ if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ int num, txrx_mode_e mode)
+{
+ odp_event_t evt_tbl[num];
+ int num_evts = 0;
+ int num_pkts = 0;
+ int i;
+
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_DIRECT) {
+ odp_pktin_queue_t pktin;
+
+ CU_ASSERT_FATAL(odp_pktin_queue(pktio_rx->id, &pktin, 1) == 1);
+ return odp_pktin_recv(pktin, pkt_tbl, num);
+ }
+
+ if (mode == TXRX_MODE_MULTI) {
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
+ num_evts = odp_queue_deq_multi(pktio_rx->inq, evt_tbl,
+ num);
+ else
+ num_evts = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
+ evt_tbl, num);
+ } else {
+ odp_event_t evt_tmp;
+
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
+ evt_tmp = odp_queue_deq(pktio_rx->inq);
+ else
+ evt_tmp = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (evt_tmp != ODP_EVENT_INVALID)
+ evt_tbl[num_evts++] = evt_tmp;
+ }
+
+ /* convert events to packets, discarding any non-packet events */
+ for (i = 0; i < num_evts; ++i) {
+ if (odp_event_type(evt_tbl[i]) == ODP_EVENT_PACKET)
+ pkt_tbl[num_pkts++] = odp_packet_from_event(evt_tbl[i]);
+ else
+ odp_event_free(evt_tbl[i]);
+ }
+
+ return num_pkts;
+}
+
+static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, txrx_mode_e mode,
+ uint64_t ns)
+{
+ odp_time_t wait_time, end;
+ int num_rx = 0;
+ int i;
+ odp_packet_t pkt_tmp[num];
+
+ wait_time = odp_time_local_from_ns(ns);
+ end = odp_time_sum(odp_time_local(), wait_time);
+
+ do {
+ int n = get_packets(pktio_rx, pkt_tmp, num - num_rx, mode);
+
+ if (n < 0)
+ break;
+
+ for (i = 0; i < n; ++i) {
+ if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
+ pkt_tbl[num_rx++] = pkt_tmp[i];
+ else
+ odp_packet_free(pkt_tmp[i]);
+ }
+ } while (num_rx < num && odp_time_cmp(end, odp_time_local()) > 0);
+
+ return num_rx;
+}
+
+static int recv_packets_tmo(odp_pktio_t pktio, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, recv_tmo_mode_e mode,
+ uint64_t tmo, uint64_t ns)
+{
+ odp_packet_t pkt_tmp[num];
+ odp_pktin_queue_t pktin[MAX_QUEUES];
+ odp_time_t ts1, ts2;
+ int num_rx = 0;
+ int num_q;
+ int i;
+ int n;
+ unsigned from_val;
+ unsigned *from = NULL;
+
+ if (mode == RECV_MQ_TMO)
+ from = &from_val;
+
+ num_q = odp_pktin_queue(pktio, pktin, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_q > 0);
+
+ /** Multiple odp_pktin_recv_tmo()/odp_pktin_recv_mq_tmo() calls may be
+ * required to discard possible non-test packets. */
+ do {
+ ts1 = odp_time_global();
+ if (mode == RECV_TMO)
+ n = odp_pktin_recv_tmo(pktin[0], pkt_tmp, num - num_rx,
+ tmo);
+ else
+ n = odp_pktin_recv_mq_tmo(pktin, (unsigned)num_q,
+ from, pkt_tmp,
+ num - num_rx, tmo);
+ ts2 = odp_time_global();
+
+ if (n <= 0)
+ break;
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
+ pkt_tbl[num_rx++] = pkt_tmp[i];
+ else
+ odp_packet_free(pkt_tmp[i]);
+ }
+ if (mode == RECV_MQ_TMO)
+ CU_ASSERT(from_val < (unsigned)num_q);
+ } while (num_rx < num);
+
+ if (tmo == ODP_PKTIN_WAIT)
+ CU_ASSERT(num_rx == num);
+ if (num_rx < num)
+ CU_ASSERT(odp_time_to_ns(odp_time_diff(ts2, ts1)) >= ns);
+
+ return num_rx;
+}
+
+static int send_packets(odp_pktout_queue_t pktout,
+ odp_packet_t *pkt_tbl, unsigned pkts)
+{
+ int ret;
+ unsigned sent = 0;
+
+ while (sent < pkts) {
+ ret = odp_pktout_send(pktout, &pkt_tbl[sent], pkts - sent);
+
+ if (ret < 0) {
+ CU_FAIL_FATAL("failed to send test packet");
+ return -1;
+ }
+
+ sent += ret;
+ }
+
+ return 0;
+}
+
+static int send_packet_events(odp_queue_t queue,
+ odp_packet_t *pkt_tbl, unsigned pkts)
+{
+ int ret;
+ unsigned i;
+ unsigned sent = 0;
+ odp_event_t ev_tbl[pkts];
+
+ for (i = 0; i < pkts; i++)
+ ev_tbl[i] = odp_packet_to_event(pkt_tbl[i]);
+
+ while (sent < pkts) {
+ ret = odp_queue_enq_multi(queue, &ev_tbl[sent], pkts - sent);
+
+ if (ret < 0) {
+ CU_FAIL_FATAL("failed to send test packet as events");
+ return -1;
+ }
+
+ sent += ret;
+ }
+
+ return 0;
+}
+
+static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
+ int num_pkts, txrx_mode_e mode)
+{
+ odp_packet_t tx_pkt[num_pkts];
+ odp_packet_t rx_pkt[num_pkts];
+ uint32_t tx_seq[num_pkts];
+ int i, ret, num_rx;
+
+ if (packet_len == USE_MTU) {
+ uint32_t mtu;
+
+ mtu = odp_pktio_mtu(pktio_a->id);
+ if (odp_pktio_mtu(pktio_b->id) < mtu)
+ mtu = odp_pktio_mtu(pktio_b->id);
+ CU_ASSERT_FATAL(mtu > 0);
+ packet_len = mtu;
+ if (packet_len > PKT_LEN_MAX)
+ packet_len = PKT_LEN_MAX;
+ }
+
+ /* generate test packets to send */
+ ret = create_packets(tx_pkt, tx_seq, num_pkts, pktio_a->id,
+ pktio_b->id);
+ if (ret != num_pkts) {
+ CU_FAIL("failed to generate test packets");
+ return;
+ }
+
+ /* send packet(s) out */
+ if (mode == TXRX_MODE_SINGLE) {
+ for (i = 0; i < num_pkts; ++i) {
+ ret = odp_pktout_send(pktio_a->pktout, &tx_pkt[i], 1);
+ if (ret != 1) {
+ CU_FAIL_FATAL("failed to send test packet");
+ odp_packet_free(tx_pkt[i]);
+ return;
+ }
+ }
+ } else if (mode == TXRX_MODE_MULTI) {
+ send_packets(pktio_a->pktout, tx_pkt, num_pkts);
+ } else {
+ send_packet_events(pktio_a->queue_out, tx_pkt, num_pkts);
+ }
+
+ /* and wait for them to arrive back */
+ num_rx = wait_for_packets(pktio_b, rx_pkt, tx_seq,
+ num_pkts, mode, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT(num_rx == num_pkts);
+
+ for (i = 0; i < num_rx; ++i) {
+ CU_ASSERT_FATAL(rx_pkt[i] != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_input(rx_pkt[i]) == pktio_b->id);
+ CU_ASSERT(odp_packet_has_error(rx_pkt[i]) == 0);
+ odp_packet_free(rx_pkt[i]);
+ }
+}
+
+static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
+ txrx_mode_e mode)
+{
+ int ret, i, if_b;
+ pktio_info_t pktios[MAX_NUM_IFACES];
+ pktio_info_t *io;
+
+ /* create pktios and associate input/output queues */
+ for (i = 0; i < num_ifaces; ++i) {
+ odp_pktout_queue_t pktout;
+ odp_queue_t queue;
+ odp_pktout_mode_t out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ if (mode == TXRX_MODE_MULTI_EVENT)
+ out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ io = &pktios[i];
+
+ io->name = iface_name[i];
+ io->id = create_pktio(i, in_mode, out_mode);
+ if (io->id == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open iface");
+ return;
+ }
+
+ if (mode == TXRX_MODE_MULTI_EVENT) {
+ CU_ASSERT_FATAL(odp_pktout_event_queue(io->id,
+ &queue, 1) == 1);
+ } else {
+ CU_ASSERT_FATAL(odp_pktout_queue(io->id,
+ &pktout, 1) == 1);
+ io->pktout = pktout;
+ queue = ODP_QUEUE_INVALID;
+ }
+
+ io->queue_out = queue;
+ io->in_mode = in_mode;
+
+ if (in_mode == ODP_PKTIN_MODE_QUEUE) {
+ CU_ASSERT_FATAL(odp_pktin_event_queue(io->id, &queue, 1)
+ == 1);
+ io->inq = queue;
+ } else {
+ io->inq = ODP_QUEUE_INVALID;
+ }
+
+ ret = odp_pktio_start(io->id);
+ CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(io->id);
+ }
+
+ /* if we have two interfaces then send through one and receive on
+ * another but if there's only one assume it's a loopback */
+ if_b = (num_ifaces == 1) ? 0 : 1;
+ pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts, mode);
+
+ for (i = 0; i < num_ifaces; ++i) {
+ ret = odp_pktio_stop(pktios[i].id);
+ CU_ASSERT_FATAL(ret == 0);
+ flush_input_queue(pktios[i].id, in_mode);
+ ret = odp_pktio_close(pktios[i].id);
+ CU_ASSERT(ret == 0);
+ }
+}
+
+void pktio_test_plain_queue(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+}
+
+void pktio_test_plain_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI);
+}
+
+void pktio_test_plain_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+}
+
+void pktio_test_sched_queue(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+}
+
+void pktio_test_sched_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI);
+}
+
+void pktio_test_sched_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_SCHED, 1, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_SCHED, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+}
+
+void pktio_test_recv(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE);
+}
+
+void pktio_test_recv_multi(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI);
+}
+
+void pktio_test_recv_multi_event(void)
+{
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_MULTI_EVENT);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI_EVENT);
+}
+
+void pktio_test_recv_queue(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue[MAX_QUEUES];
+ odp_pktin_queue_t pktin_queue[MAX_QUEUES];
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t tmp_pkt[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_time_t wait_time, end;
+ int num_rx = 0;
+ int num_queues;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ num_queues = capa.max_input_queues;
+ in_queue_param.num_queues = num_queues;
+ in_queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = capa.max_output_queues;
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ /* Allocate and initialize test packets */
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ if (ret != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ num_queues = odp_pktout_queue(pktio_tx, pktout_queue, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_queues > 0);
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+
+ ret = odp_pktout_send(pktout_queue[num_queues - 1], pkt_tbl,
+ TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Receive packets */
+ num_queues = odp_pktin_queue(pktio_rx, pktin_queue, MAX_QUEUES);
+ CU_ASSERT_FATAL(num_queues > 0);
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+
+ wait_time = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
+ end = odp_time_sum(odp_time_local(), wait_time);
+ do {
+ int n = 0;
+
+ for (i = 0; i < num_queues; i++) {
+ n = odp_pktin_recv(pktin_queue[i], tmp_pkt,
+ TX_BATCH_LEN);
+ if (n != 0)
+ break;
+ }
+ if (n < 0)
+ break;
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(tmp_pkt[i]) == pkt_seq[num_rx])
+ pkt_tbl[num_rx++] = tmp_pkt[i];
+ else
+ odp_packet_free(tmp_pkt[i]);
+ }
+ } while (num_rx < TX_BATCH_LEN &&
+ odp_time_cmp(end, odp_time_local()) > 0);
+
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void test_recv_tmo(recv_tmo_mode_e mode)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_t pktout_queue;
+ int test_pkt_count = 6;
+ odp_packet_t pkt_tbl[test_pkt_count];
+ uint32_t pkt_seq[test_pkt_count];
+ uint64_t ns;
+ unsigned num_q;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ if (mode == RECV_TMO)
+ num_q = 1;
+ else
+ num_q = (capa.max_input_queues < MAX_QUEUES) ?
+ capa.max_input_queues : MAX_QUEUES;
+ in_queue_param.num_queues = num_q;
+ in_queue_param.hash_enable = (num_q > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ memset(pkt_seq, 0, sizeof(pkt_seq));
+
+ /* No packets sent yet, so should wait */
+ ns = 100 * ODP_TIME_MSEC_IN_NS;
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[0], &pkt_seq[0], 1, mode,
+ odp_pktin_wait_time(ns), ns);
+ CU_ASSERT(ret == 0);
+
+ ret = create_packets(pkt_tbl, pkt_seq, test_pkt_count, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == test_pkt_count);
+
+ ret = odp_pktout_send(pktout_queue, pkt_tbl, test_pkt_count);
+ CU_ASSERT_FATAL(ret == test_pkt_count);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[0], &pkt_seq[0], 1, mode,
+ ODP_PKTIN_WAIT, 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[1], &pkt_seq[1], 1, mode,
+ ODP_PKTIN_NO_WAIT, 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[2], &pkt_seq[2], 1, mode,
+ odp_pktin_wait_time(0), 0);
+ CU_ASSERT_FATAL(ret == 1);
+
+ ret = recv_packets_tmo(pktio_rx, &pkt_tbl[3], &pkt_seq[3], 3, mode,
+ odp_pktin_wait_time(ns), ns);
+ CU_ASSERT_FATAL(ret == 3);
+
+ for (i = 0; i < test_pkt_count; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+void pktio_test_recv_tmo(void)
+{
+ test_recv_tmo(RECV_TMO);
+}
+
+void pktio_test_recv_mq_tmo(void)
+{
+ test_recv_tmo(RECV_MQ_TMO);
+ test_recv_tmo(RECV_MQ_TMO_NO_IDX);
+}
+
+void pktio_test_recv_mtu(void)
+{
+ packet_len = USE_MTU;
+ pktio_test_sched_multi();
+ packet_len = PKT_LEN_NORMAL;
+}
+
+void pktio_test_mtu(void)
+{
+ int ret;
+ uint32_t mtu;
+
+ odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ mtu = odp_pktio_mtu(pktio);
+ CU_ASSERT(mtu > 0);
+
+ printf(" %" PRIu32 " ", mtu);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+}
+
+void pktio_test_promisc(void)
+{
+ int ret;
+ odp_pktio_capability_t capa;
+
+ odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(ret >= 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+ if (!capa.set_op.op.promisc_mode) {
+ printf("promiscuous mode not supported\n");
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+ return;
+ }
+
+ ret = odp_pktio_promisc_mode_set(pktio, 1);
+ CU_ASSERT(0 == ret);
+
+ /* Verify that promisc mode set */
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(1 == ret);
+
+ ret = odp_pktio_promisc_mode_set(pktio, 0);
+ CU_ASSERT(0 == ret);
+
+ /* Verify that promisc mode is not set */
+ ret = odp_pktio_promisc_mode(pktio);
+ CU_ASSERT(0 == ret);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(ret == 0);
+}
+
+void pktio_test_mac(void)
+{
+ unsigned char mac_addr[ODP_PKTIO_MACADDR_MAXSIZE];
+ int mac_len;
+ int ret;
+ odp_pktio_t pktio;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ printf("testing mac for %s\n", iface_name[0]);
+
+ mac_len = odp_pktio_mac_addr(pktio, mac_addr,
+ ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ODPH_ETHADDR_LEN == mac_len);
+ CU_ASSERT(ODP_PKTIO_MACADDR_MAXSIZE >= mac_len);
+
+ printf(" %X:%X:%X:%X:%X:%X ",
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ /* Fail case: wrong addr_size. Expected <0. */
+ mac_len = odp_pktio_mac_addr(pktio, mac_addr, 2);
+ CU_ASSERT(mac_len < 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT(0 == ret);
+}
+
+void pktio_test_open(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_param_t pktio_param;
+ int i;
+
+ /* test the sequence open->close->open->close() */
+ for (i = 0; i < 2; ++i) {
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open("nothere", default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio == ODP_PKTIO_INVALID);
+}
+
+void pktio_test_lookup(void)
+{
+ odp_pktio_t pktio, pktio_inval;
+ odp_pktio_param_t pktio_param;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_lookup(iface_name[0]) == pktio);
+
+ pktio_inval = odp_pktio_open(iface_name[0], default_pkt_pool,
+ &pktio_param);
+ CU_ASSERT(odp_errno() != 0);
+ CU_ASSERT(pktio_inval == ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+
+ CU_ASSERT(odp_pktio_lookup(iface_name[0]) == ODP_PKTIO_INVALID);
+}
+
+void pktio_test_index(void)
+{
+ odp_pktio_t pktio, pktio_inval = ODP_PKTIO_INVALID;
+ odp_pktio_param_t pktio_param;
+ int ndx;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], default_pkt_pool, &pktio_param);
+ CU_ASSERT(pktio != ODP_PKTIO_INVALID);
+
+ ndx = odp_pktio_index(pktio);
+ CU_ASSERT(ndx >= 0);
+ CU_ASSERT(odp_pktio_index(pktio_inval) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ CU_ASSERT(odp_pktio_index(pktio) < 0);
+}
+
+static void pktio_test_print(void)
+{
+ odp_pktio_t pktio;
+ int i;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ /* Print pktio debug info and test that the
+ * odp_pktio_print() function is implemented. */
+ odp_pktio_print(pktio);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+}
+
+void pktio_test_pktio_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ odp_pktio_config_init(&config);
+
+ CU_ASSERT(odp_pktio_config(pktio, NULL) == 0);
+
+ CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
+
+ config = capa.config;
+ CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_info(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_info_t pktio_info;
+ int i;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_info(pktio, &pktio_info) == 0);
+
+ printf("pktio %d\n name %s\n driver %s\n", i,
+ pktio_info.name, pktio_info.drv_name);
+
+ CU_ASSERT(strcmp(pktio_info.name, iface_name[i]) == 0);
+ CU_ASSERT(pktio_info.pool == pool[i]);
+ CU_ASSERT(pktio_info.param.in_mode == ODP_PKTIN_MODE_QUEUE);
+ CU_ASSERT(pktio_info.param.out_mode == ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT(odp_pktio_info(ODP_PKTIO_INVALID, &pktio_info) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ }
+}
+
+void pktio_test_pktin_queue_config_direct(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(ODP_PKTIO_INVALID, &capa) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) < 0);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_sched(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_queue(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktout_queue_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t queue_param;
+ odp_pktout_queue_t pktout_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_output_queues > 0);
+ num_queues = capa.max_output_queues;
+
+ odp_pktout_queue_param_init(&queue_param);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue(pktio, pktout_queues, MAX_QUEUES)
+ == num_queues);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT;
+ queue_param.num_queues = 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_output_queues + 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+#ifdef DEBUG_STATS
+static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
+{
+ fprintf(stderr, "\n%s:\n"
+ " in_octets %" PRIu64 "\n"
+ " in_ucast_pkts %" PRIu64 "\n"
+ " in_discards %" PRIu64 "\n"
+ " in_errors %" PRIu64 "\n"
+ " in_unknown_protos %" PRIu64 "\n"
+ " out_octets %" PRIu64 "\n"
+ " out_ucast_pkts %" PRIu64 "\n"
+ " out_discards %" PRIu64 "\n"
+ " out_errors %" PRIu64 "\n",
+ name,
+ s->in_octets,
+ s->in_ucast_pkts,
+ s->in_discards,
+ s->in_errors,
+ s->in_unknown_protos,
+ s->out_octets,
+ s->out_ucast_pkts,
+ s->out_discards,
+ s->out_errors);
+}
+#endif
+
+/* some pktio like netmap support various methods to
+ * get statistics counters. ethtool strings are not standardised
+ * and sysfs may not be supported. skip pktio_stats test until
+ * we will solve that.*/
+int pktio_check_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_stats_t stats;
+ int ret;
+ odp_pktio_param_t pktio_param;
+ const char *iface = iface_name[0];
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface, pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_stats(pktio, &stats);
+ (void)odp_pktio_close(pktio);
+
+ if (ret == 0)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+void pktio_test_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ ret = odp_pktio_start(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* flush packets with magic number in pipes */
+ for (i = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ ret = odp_pktio_stats_reset(pktio_tx);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats_reset(pktio_rx);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ ret = odp_pktio_stats(pktio_tx, &stats[0]);
+ CU_ASSERT(ret == 0);
+
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats(pktio_rx, &stats[1]);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT((stats[1].in_ucast_pkts == 0) ||
+ (stats[1].in_ucast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((stats[0].out_octets == 0) ||
+ (stats[0].out_octets >=
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ } else {
+ CU_ASSERT((stats[0].in_ucast_pkts == 0) ||
+ (stats[0].in_ucast_pkts == (uint64_t)pkts));
+ CU_ASSERT((stats[0].in_octets == 0) ||
+ (stats[0].in_octets ==
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ }
+
+ CU_ASSERT(0 == stats[0].in_discards);
+ CU_ASSERT(0 == stats[0].in_errors);
+ CU_ASSERT(0 == stats[0].in_unknown_protos);
+ CU_ASSERT(0 == stats[0].out_discards);
+ CU_ASSERT(0 == stats[0].out_errors);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+void pktio_test_start_stop(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_t pktio_in;
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+
+ CU_ASSERT(odp_pktout_queue(pktio[0], &pktout, 1) == 1);
+
+ /* Interfaces are stopped by default,
+ * Check that stop when stopped generates an error */
+ ret = odp_pktio_stop(pktio[0]);
+ CU_ASSERT(ret < 0);
+
+ /* start first */
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret == 0);
+ /* Check that start when started generates an error */
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret < 0);
+
+ _pktio_wait_linkup(pktio[0]);
+
+ /* Test Rx on a stopped interface. Only works if there are 2 */
+ if (num_ifaces > 1) {
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0],
+ pktio[1]);
+
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts],
+ alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ /* check that packets did not arrive */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ if (pkts)
+ CU_FAIL("pktio stopped, received unexpected events");
+
+ /* start both, send and get packets */
+ /* 0 already started */
+ ret = odp_pktio_start(pktio[1]);
+ CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(pktio[1]);
+
+ /* flush packets with magic number in pipes */
+ for (i = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+ }
+
+ if (num_ifaces > 1)
+ pktio_in = pktio[1];
+ else
+ pktio_in = pktio[0];
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio[0], pktio_in);
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+ CU_ASSERT(pkts == alloc);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ /* Verify that a schedule call after stop and close does not generate
+ errors. */
+ ev = odp_schedule(NULL, wait);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+}
+
+/*
+ * This is a pre-condition check that the pktio_test_send_failure()
+ * test case can be run. If the TX interface MTU is larger that the
+ * biggest packet we can allocate then the test won't be able to
+ * attempt to send packets larger than the MTU, so skip the test.
+ */
+int pktio_check_send_failure(void)
+{
+ odp_pktio_t pktio_tx;
+ uint32_t mtu;
+ odp_pktio_param_t pktio_param;
+ int iface_idx = 0;
+ const char *iface = iface_name[iface_idx];
+ odp_pool_capability_t pool_capa;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ fprintf(stderr, "%s: pool capability failed\n", __func__);
+ return ODP_TEST_INACTIVE;
+ };
+
+ memset(&pktio_param, 0, sizeof(pktio_param));
+
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio_tx = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
+ if (pktio_tx == ODP_PKTIO_INVALID) {
+ fprintf(stderr, "%s: failed to open pktio\n", __func__);
+ return ODP_TEST_INACTIVE;
+ }
+
+ /* read the MTU from the transmit interface */
+ mtu = odp_pktio_mtu(pktio_tx);
+
+ odp_pktio_close(pktio_tx);
+
+ if (mtu <= pool_capa.pkt.max_len - 32)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+void pktio_test_send_failure(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret, i, alloc_pkts;
+ uint32_t mtu;
+ odp_pool_param_t pool_params;
+ odp_pool_t pkt_pool;
+ int long_pkt_idx = TX_BATCH_LEN / 2;
+ pktio_info_t info_rx;
+ odp_pktout_queue_t pktout;
+
+ pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ if (pktio_tx == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open pktio");
+ return;
+ }
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ /* read the MTU from the transmit interface */
+ mtu = odp_pktio_mtu(pktio_tx);
+
+ ret = odp_pktio_start(pktio_tx);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio_tx);
+
+ /* configure the pool so that we can generate test packets larger
+ * than the interface MTU */
+ memset(&pool_params, 0, sizeof(pool_params));
+ pool_params.pkt.len = mtu + 32;
+ pool_params.pkt.seg_len = pool_params.pkt.len;
+ pool_params.pkt.num = TX_BATCH_LEN + 1;
+ pool_params.type = ODP_POOL_PACKET;
+ pkt_pool = odp_pool_create("pkt_pool_oversize", &pool_params);
+ CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
+
+ if (num_ifaces > 1) {
+ pktio_rx = create_pktio(1, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ ret = odp_pktio_start(pktio_rx);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio_rx);
+ } else {
+ pktio_rx = pktio_tx;
+ }
+
+ /* generate a batch of packets with a single overly long packet
+ * in the middle */
+ for (i = 0; i < TX_BATCH_LEN; ++i) {
+ uint32_t pkt_len;
+
+ if (i == long_pkt_idx)
+ pkt_len = pool_params.pkt.len;
+ else
+ pkt_len = PKT_LEN_NORMAL;
+
+ pkt_tbl[i] = odp_packet_alloc(pkt_pool, pkt_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
+ if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+ alloc_pkts = i;
+
+ if (alloc_pkts == TX_BATCH_LEN) {
+ /* try to send the batch with the long packet in the middle,
+ * the initial short packets should be sent successfully */
+ odp_errno_zero();
+ ret = odp_pktout_send(pktout, pkt_tbl, TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == long_pkt_idx);
+ CU_ASSERT(odp_errno() == 0);
+
+ info_rx.id = pktio_rx;
+ info_rx.inq = ODP_QUEUE_INVALID;
+ info_rx.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ i = wait_for_packets(&info_rx, pkt_tbl, pkt_seq, ret,
+ TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
+
+ if (i == ret) {
+ /* now try to send starting with the too-long packet
+ * and verify it fails */
+ odp_errno_zero();
+ ret = odp_pktout_send(pktout,
+ &pkt_tbl[long_pkt_idx],
+ TX_BATCH_LEN - long_pkt_idx);
+ CU_ASSERT(ret == -1);
+ CU_ASSERT(odp_errno() != 0);
+ } else {
+ CU_FAIL("failed to receive transmitted packets\n");
+ }
+
+ /* now reduce the size of the long packet and attempt to send
+ * again - should work this time */
+ i = long_pkt_idx;
+ odp_packet_pull_tail(pkt_tbl[i],
+ odp_packet_len(pkt_tbl[i]) -
+ PKT_LEN_NORMAL);
+ pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
+ ret = pktio_fixup_checksums(pkt_tbl[i]);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(pkt_seq[i] != TEST_SEQ_INVALID);
+ ret = odp_pktout_send(pktout, &pkt_tbl[i], TX_BATCH_LEN - i);
+ CU_ASSERT_FATAL(ret == (TX_BATCH_LEN - i));
+
+ i = wait_for_packets(&info_rx, &pkt_tbl[i], &pkt_seq[i], ret,
+ TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT(i == ret);
+ } else {
+ CU_FAIL("failed to generate test packets\n");
+ }
+
+ for (i = 0; i < alloc_pkts; ++i) {
+ if (pkt_tbl[i] != ODP_PACKET_INVALID)
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ if (pktio_rx != pktio_tx) {
+ CU_ASSERT(odp_pktio_stop(pktio_rx) == 0);
+ CU_ASSERT(odp_pktio_close(pktio_rx) == 0);
+ }
+ CU_ASSERT(odp_pktio_stop(pktio_tx) == 0);
+ CU_ASSERT(odp_pktio_close(pktio_tx) == 0);
+ CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
+}
+
+void pktio_test_recv_on_wonly(void)
+{
+ odp_pktio_t pktio;
+ int ret;
+ odp_pktin_queue_t pktin;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open pktio");
+ return;
+ }
+
+ CU_ASSERT(odp_pktin_queue(pktio, &pktin, 1) == 0);
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio);
+
+ ret = odp_pktio_stop(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+void pktio_test_send_on_ronly(void)
+{
+ odp_pktio_t pktio;
+ int ret;
+ odp_pktout_queue_t pktout;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DISABLED);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ CU_FAIL("failed to open pktio");
+ return;
+ }
+
+ CU_ASSERT(odp_pktout_queue(pktio, &pktout, 1) == 0);
+
+ ret = odp_pktio_start(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio);
+
+ ret = odp_pktio_stop(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktio_close(pktio);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+int pktio_check_pktin_ts(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || !capa.config.pktin.bit.ts_all)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+void pktio_test_pktin_ts(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ pktio_info_t pktio_rx_info;
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ uint64_t ns1, ns2;
+ uint64_t res;
+ odp_time_t ts_prev;
+ odp_time_t ts;
+ int num_rx = 0;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.ts_all);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.ts_all = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio[i], &config) == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; i++)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ /* Test odp_pktin_ts_res() and odp_pktin_ts_from_ns() */
+ res = odp_pktin_ts_res(pktio_tx);
+ CU_ASSERT(res > PKTIN_TS_MIN_RES);
+ CU_ASSERT(res < PKTIN_TS_MAX_RES);
+ ns1 = 100;
+ ts = odp_pktin_ts_from_ns(pktio_tx, ns1);
+ ns2 = odp_time_to_ns(ts);
+ /* Allow some arithmetic tolerance */
+ CU_ASSERT((ns2 <= (ns1 + PKTIN_TS_CMP_RES)) &&
+ (ns2 >= (ns1 - PKTIN_TS_CMP_RES)));
+
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ /* Send packets one at a time and add delay between the packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ CU_ASSERT_FATAL(odp_pktout_send(pktout_queue,
+ &pkt_tbl[i], 1) == 1);
+ ret = wait_for_packets(&pktio_rx_info, &pkt_tbl[i], &pkt_seq[i],
+ 1, TXRX_MODE_SINGLE, ODP_TIME_SEC_IN_NS);
+ if (ret != 1)
+ break;
+ odp_time_wait_ns(PKTIN_TS_INTERVAL);
+ }
+ num_rx = i;
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+
+ ts_prev = ODP_TIME_NULL;
+ for (i = 0; i < num_rx; i++) {
+ ts = odp_packet_ts(pkt_tbl[i]);
+
+ CU_ASSERT(odp_time_cmp(ts, ts_prev) > 0);
+
+ ts_prev = ts;
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int create_pool(const char *iface, int num)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_param_t params;
+
+ memset(&params, 0, sizeof(params));
+ set_pool_len(&params);
+ params.pkt.num = PKT_BUF_NUM;
+ params.type = ODP_POOL_PACKET;
+
+ snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s_%d",
+ iface, pool_segmentation);
+
+ pool[num] = odp_pool_create(pool_name, &params);
+ if (ODP_POOL_INVALID == pool[num]) {
+ fprintf(stderr, "%s: failed to create pool: %d",
+ __func__, odp_errno());
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pktio_suite_init(void)
+{
+ int i;
+
+ odp_atomic_init_u32(&ip_seq, 0);
+
+ if (getenv("ODP_WAIT_FOR_NETWORK"))
+ wait_for_network = true;
+
+ iface_name[0] = getenv("ODP_PKTIO_IF0");
+ iface_name[1] = getenv("ODP_PKTIO_IF1");
+ num_ifaces = 1;
+
+ if (!iface_name[0]) {
+ printf("No interfaces specified, using default \"loop\".\n");
+ iface_name[0] = "loop";
+ } else if (!iface_name[1]) {
+ printf("Using loopback interface: %s\n", iface_name[0]);
+ } else {
+ num_ifaces = 2;
+ printf("Using paired interfaces: %s %s\n",
+ iface_name[0], iface_name[1]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ if (create_pool(iface_name[i], i) != 0)
+ return -1;
+ }
+
+ if (default_pool_create() != 0) {
+ fprintf(stderr, "error: failed to create default pool\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int pktio_suite_init_unsegmented(void)
+{
+ pool_segmentation = PKT_POOL_UNSEGMENTED;
+ return pktio_suite_init();
+}
+
+int pktio_suite_init_segmented(void)
+{
+ pool_segmentation = PKT_POOL_SEGMENTED;
+ return pktio_suite_init();
+}
+
+int pktio_suite_term(void)
+{
+ char pool_name[ODP_POOL_NAME_LEN];
+ odp_pool_t pool;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < num_ifaces; ++i) {
+ snprintf(pool_name, sizeof(pool_name),
+ "pkt_pool_%s_%d", iface_name[i], pool_segmentation);
+ pool = odp_pool_lookup(pool_name);
+ if (pool == ODP_POOL_INVALID)
+ continue;
+
+ if (odp_pool_destroy(pool) != 0) {
+ fprintf(stderr, "error: failed to destroy pool %s\n",
+ pool_name);
+ ret = -1;
+ }
+ }
+
+ if (odp_pool_destroy(default_pkt_pool) != 0) {
+ fprintf(stderr, "error: failed to destroy default pool\n");
+ ret = -1;
+ }
+ default_pkt_pool = ODP_POOL_INVALID;
+
+ return ret;
+}
+
+odp_testinfo_t pktio_suite_unsegmented[] = {
+ ODP_TEST_INFO(pktio_test_open),
+ ODP_TEST_INFO(pktio_test_lookup),
+ ODP_TEST_INFO(pktio_test_index),
+ ODP_TEST_INFO(pktio_test_print),
+ ODP_TEST_INFO(pktio_test_pktio_config),
+ ODP_TEST_INFO(pktio_test_info),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_direct),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_sched),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_queue),
+ ODP_TEST_INFO(pktio_test_pktout_queue_config),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
+ ODP_TEST_INFO(pktio_test_sched_queue),
+ ODP_TEST_INFO(pktio_test_sched_multi),
+ ODP_TEST_INFO(pktio_test_recv),
+ ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_queue),
+ ODP_TEST_INFO(pktio_test_recv_tmo),
+ ODP_TEST_INFO(pktio_test_recv_mq_tmo),
+ ODP_TEST_INFO(pktio_test_recv_mtu),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
+ pktio_check_send_failure),
+ ODP_TEST_INFO(pktio_test_mtu),
+ ODP_TEST_INFO(pktio_test_promisc),
+ ODP_TEST_INFO(pktio_test_mac),
+ ODP_TEST_INFO(pktio_test_start_stop),
+ ODP_TEST_INFO(pktio_test_recv_on_wonly),
+ ODP_TEST_INFO(pktio_test_send_on_ronly),
+ ODP_TEST_INFO(pktio_test_plain_multi_event),
+ ODP_TEST_INFO(pktio_test_sched_multi_event),
+ ODP_TEST_INFO(pktio_test_recv_multi_event),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
+ pktio_check_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
+ pktio_check_pktin_ts),
+ ODP_TEST_INFO_NULL
+};
+
+odp_testinfo_t pktio_suite_segmented[] = {
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
+ ODP_TEST_INFO(pktio_test_sched_queue),
+ ODP_TEST_INFO(pktio_test_sched_multi),
+ ODP_TEST_INFO(pktio_test_recv),
+ ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_mtu),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
+ pktio_check_send_failure),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t pktio_suites[] = {
+ {"Packet I/O Unsegmented", pktio_suite_init_unsegmented,
+ pktio_suite_term, pktio_suite_unsegmented},
+ {"Packet I/O Segmented", pktio_suite_init_segmented,
+ pktio_suite_term, pktio_suite_segmented},
+ ODP_SUITE_INFO_NULL
+};
+
+int pktio_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(pktio_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/pktio/pktio.h b/test/common_plat/validation/api/pktio/pktio.h
new file mode 100644
index 000000000..8131d05fe
--- /dev/null
+++ b/test/common_plat/validation/api/pktio/pktio.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_PKTIO_H_
+#define _ODP_TEST_PKTIO_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void pktio_test_plain_queue(void);
+void pktio_test_plain_multi(void);
+void pktio_test_sched_queue(void);
+void pktio_test_sched_multi(void);
+void pktio_test_recv(void);
+void pktio_test_recv_multi(void);
+void pktio_test_recv_queue(void);
+void pktio_test_recv_tmo(void);
+void pktio_test_recv_mq_tmo(void);
+void pktio_test_recv_mtu(void);
+void pktio_test_mtu(void);
+void pktio_test_promisc(void);
+void pktio_test_mac(void);
+void pktio_test_inq_remdef(void);
+void pktio_test_open(void);
+void pktio_test_lookup(void);
+void pktio_test_index(void);
+void pktio_test_info(void);
+void pktio_test_inq(void);
+void pktio_test_pktio_config(void);
+void pktio_test_pktin_queue_config_direct(void);
+void pktio_test_pktin_queue_config_sched(void);
+void pktio_test_pktin_queue_config_queue(void);
+void pktio_test_pktout_queue_config(void);
+void pktio_test_start_stop(void);
+int pktio_check_send_failure(void);
+void pktio_test_send_failure(void);
+void pktio_test_recv_on_wonly(void);
+void pktio_test_send_on_ronly(void);
+void pktio_test_plain_multi_event(void);
+void pktio_test_sched_multi_event(void);
+void pktio_test_recv_multi_event(void);
+int pktio_check_statistics_counters(void);
+void pktio_test_statistics_counters(void);
+int pktio_check_pktin_ts(void);
+void pktio_test_pktin_ts(void);
+
+/* test arrays: */
+extern odp_testinfo_t pktio_suite[];
+
+/* test array init/term functions: */
+int pktio_suite_term(void);
+int pktio_suite_init_segmented(void);
+int pktio_suite_init_unsegmented(void);
+
+/* test registry: */
+extern odp_suiteinfo_t pktio_suites[];
+
+/* main test program: */
+int pktio_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/pktio/pktio_main.c b/test/common_plat/validation/api/pktio/pktio_main.c
new file mode 100644
index 000000000..2928e1b8a
--- /dev/null
+++ b/test/common_plat/validation/api/pktio/pktio_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "pktio.h"
+
+int main(int argc, char *argv[])
+{
+ return pktio_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/pool/.gitignore b/test/common_plat/validation/api/pool/.gitignore
new file mode 100644
index 000000000..fc91b28d6
--- /dev/null
+++ b/test/common_plat/validation/api/pool/.gitignore
@@ -0,0 +1 @@
+pool_main
diff --git a/test/common_plat/validation/api/pool/Makefile.am b/test/common_plat/validation/api/pool/Makefile.am
new file mode 100644
index 000000000..1eb8d714b
--- /dev/null
+++ b/test/common_plat/validation/api/pool/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestpool.la
+libtestpool_la_SOURCES = pool.c
+
+test_PROGRAMS = pool_main$(EXEEXT)
+dist_pool_main_SOURCES = pool_main.c
+pool_main_LDADD = libtestpool.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = pool.h
diff --git a/test/common_plat/validation/api/pool/pool.c b/test/common_plat/validation/api/pool/pool.c
new file mode 100644
index 000000000..d48ac2a34
--- /dev/null
+++ b/test/common_plat/validation/api/pool/pool.c
@@ -0,0 +1,131 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "pool.h"
+
+static int pool_name_number = 1;
+static const int default_buffer_size = 1500;
+static const int default_buffer_num = 1000;
+
+static void pool_create_destroy(odp_pool_param_t *params)
+{
+ odp_pool_t pool;
+ char pool_name[ODP_POOL_NAME_LEN];
+
+ snprintf(pool_name, sizeof(pool_name),
+ "test_pool-%d", pool_name_number++);
+
+ pool = odp_pool_create(pool_name, params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ CU_ASSERT(odp_pool_to_u64(pool) !=
+ odp_pool_to_u64(ODP_POOL_INVALID));
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void pool_test_create_destroy_buffer(void)
+{
+ odp_pool_param_t params = {
+ .buf = {
+ .size = default_buffer_size,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = default_buffer_num,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool_create_destroy(&params);
+}
+
+void pool_test_create_destroy_packet(void)
+{
+ odp_pool_param_t params = {
+ .pkt = {
+ .seg_len = 0,
+ .len = default_buffer_size,
+ .num = default_buffer_num,
+ },
+ .type = ODP_POOL_PACKET,
+ };
+
+ pool_create_destroy(&params);
+}
+
+void pool_test_create_destroy_timeout(void)
+{
+ odp_pool_param_t params = {
+ .tmo = {
+ .num = default_buffer_num,
+ },
+ .type = ODP_POOL_TIMEOUT,
+ };
+
+ pool_create_destroy(&params);
+}
+
+void pool_test_lookup_info_print(void)
+{
+ odp_pool_t pool;
+ const char pool_name[] = "pool_for_lookup_test";
+ odp_pool_info_t info;
+ odp_pool_param_t params = {
+ .buf = {
+ .size = default_buffer_size,
+ .align = ODP_CACHE_LINE_SIZE,
+ .num = default_buffer_num,
+ },
+ .type = ODP_POOL_BUFFER,
+ };
+
+ pool = odp_pool_create(pool_name, &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ pool = odp_pool_lookup(pool_name);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ CU_ASSERT_FATAL(odp_pool_info(pool, &info) == 0);
+ CU_ASSERT(strncmp(pool_name, info.name, sizeof(pool_name)) == 0);
+ CU_ASSERT(params.buf.size <= info.params.buf.size);
+ CU_ASSERT(params.buf.align <= info.params.buf.align);
+ CU_ASSERT(params.buf.num <= info.params.buf.num);
+ CU_ASSERT(params.type == info.params.type);
+
+ odp_pool_print(pool);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+odp_testinfo_t pool_suite[] = {
+ ODP_TEST_INFO(pool_test_create_destroy_buffer),
+ ODP_TEST_INFO(pool_test_create_destroy_packet),
+ ODP_TEST_INFO(pool_test_create_destroy_timeout),
+ ODP_TEST_INFO(pool_test_lookup_info_print),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t pool_suites[] = {
+ { .pName = "Pool tests",
+ .pTests = pool_suite,
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int pool_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(pool_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/pool/pool.h b/test/common_plat/validation/api/pool/pool.h
new file mode 100644
index 000000000..29e517633
--- /dev/null
+++ b/test/common_plat/validation/api/pool/pool.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_POOL_H_
+#define _ODP_TEST_POOL_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void pool_test_create_destroy_buffer(void);
+void pool_test_create_destroy_packet(void);
+void pool_test_create_destroy_timeout(void);
+void pool_test_create_destroy_buffer_shm(void);
+void pool_test_lookup_info_print(void);
+
+/* test arrays: */
+extern odp_testinfo_t pool_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t pool_suites[];
+
+/* main test program: */
+int pool_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/pool/pool_main.c b/test/common_plat/validation/api/pool/pool_main.c
new file mode 100644
index 000000000..bf06585b5
--- /dev/null
+++ b/test/common_plat/validation/api/pool/pool_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "pool.h"
+
+int main(int argc, char *argv[])
+{
+ return pool_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/queue/.gitignore b/test/common_plat/validation/api/queue/.gitignore
new file mode 100644
index 000000000..469506a13
--- /dev/null
+++ b/test/common_plat/validation/api/queue/.gitignore
@@ -0,0 +1 @@
+queue_main
diff --git a/test/common_plat/validation/api/queue/Makefile.am b/test/common_plat/validation/api/queue/Makefile.am
new file mode 100644
index 000000000..a477e3c56
--- /dev/null
+++ b/test/common_plat/validation/api/queue/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestqueue.la
+libtestqueue_la_SOURCES = queue.c
+
+test_PROGRAMS = queue_main$(EXEEXT)
+dist_queue_main_SOURCES = queue_main.c
+queue_main_LDADD = libtestqueue.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = queue.h
diff --git a/test/common_plat/validation/api/queue/queue.c b/test/common_plat/validation/api/queue/queue.c
new file mode 100644
index 000000000..dc3a977cb
--- /dev/null
+++ b/test/common_plat/validation/api/queue/queue.c
@@ -0,0 +1,321 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "queue.h"
+
+#define MAX_BUFFER_QUEUE (8)
+#define MSG_POOL_SIZE (4 * 1024 * 1024)
+#define CONFIG_MAX_ITERATION (100)
+#define MAX_QUEUES (64 * 1024)
+
+static int queue_context = 0xff;
+static odp_pool_t pool;
+
+static void generate_name(char *name, uint32_t index)
+{
+ /* Uniqueue name for up to 300M queues */
+ name[0] = 'A' + ((index / (26 * 26 * 26 * 26 * 26)) % 26);
+ name[1] = 'A' + ((index / (26 * 26 * 26 * 26)) % 26);
+ name[2] = 'A' + ((index / (26 * 26 * 26)) % 26);
+ name[3] = 'A' + ((index / (26 * 26)) % 26);
+ name[4] = 'A' + ((index / 26) % 26);
+ name[5] = 'A' + (index % 26);
+}
+
+int queue_suite_init(void)
+{
+ odp_pool_param_t params;
+
+ params.buf.size = 0;
+ params.buf.align = ODP_CACHE_LINE_SIZE;
+ params.buf.num = 1024 * 10;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("msg_pool", &params);
+
+ if (ODP_POOL_INVALID == pool) {
+ printf("Pool create failed.\n");
+ return -1;
+ }
+ return 0;
+}
+
+int queue_suite_term(void)
+{
+ return odp_pool_destroy(pool);
+}
+
+void queue_test_capa(void)
+{
+ odp_queue_capability_t capa;
+ odp_queue_param_t qparams;
+ char name[ODP_QUEUE_NAME_LEN];
+ odp_queue_t queue[MAX_QUEUES];
+ uint32_t num_queues, i;
+
+ memset(&capa, 0, sizeof(odp_queue_capability_t));
+ CU_ASSERT(odp_queue_capability(&capa) == 0);
+
+ CU_ASSERT(capa.max_queues != 0);
+ CU_ASSERT(capa.max_ordered_locks != 0);
+ CU_ASSERT(capa.max_sched_groups != 0);
+ CU_ASSERT(capa.sched_prios != 0);
+
+ for (i = 0; i < ODP_QUEUE_NAME_LEN; i++)
+ name[i] = 'A' + (i % 26);
+
+ name[ODP_QUEUE_NAME_LEN - 1] = 0;
+
+ if (capa.max_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
+ else
+ num_queues = capa.max_queues;
+
+ odp_queue_param_init(&qparams);
+
+ for (i = 0; i < num_queues; i++) {
+ generate_name(name, i);
+ queue[i] = odp_queue_create(name, &qparams);
+
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ CU_FAIL("Queue create failed");
+ num_queues = i;
+ break;
+ }
+
+ CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
+ }
+
+ for (i = 0; i < num_queues; i++)
+ CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
+}
+
+void queue_test_mode(void)
+{
+ odp_queue_param_t qparams;
+ odp_queue_t queue;
+ int i, j;
+ odp_queue_op_mode_t mode[3] = { ODP_QUEUE_OP_MT,
+ ODP_QUEUE_OP_MT_UNSAFE,
+ ODP_QUEUE_OP_DISABLED };
+
+ odp_queue_param_init(&qparams);
+
+ /* Plain queue modes */
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 3; j++) {
+ /* Should not disable both enq and deq */
+ if (i == 2 && j == 2)
+ break;
+
+ qparams.enq_mode = mode[i];
+ qparams.deq_mode = mode[j];
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+ if (queue != ODP_QUEUE_INVALID)
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ }
+ }
+
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
+
+ /* Scheduled queue modes. Dequeue mode is fixed. */
+ for (i = 0; i < 3; i++) {
+ qparams.enq_mode = mode[i];
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(queue != ODP_QUEUE_INVALID);
+ if (queue != ODP_QUEUE_INVALID)
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+ }
+}
+
+void queue_test_param(void)
+{
+ odp_queue_t queue;
+ odp_event_t enev[MAX_BUFFER_QUEUE];
+ odp_event_t deev[MAX_BUFFER_QUEUE];
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_pool_t msg_pool;
+ odp_event_t *pev_tmp;
+ int i, deq_ret, ret;
+ int nr_deq_entries = 0;
+ int max_iteration = CONFIG_MAX_ITERATION;
+ odp_queue_param_t qparams;
+ odp_buffer_t enbuf;
+
+ /* Schedule type queue */
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
+ qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
+ qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qparams.sched.group = ODP_SCHED_GROUP_WORKER;
+
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(ODP_QUEUE_INVALID != queue);
+ CU_ASSERT(odp_queue_to_u64(queue) !=
+ odp_queue_to_u64(ODP_QUEUE_INVALID));
+ CU_ASSERT(queue == odp_queue_lookup("test_queue"));
+ CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue));
+ CU_ASSERT(ODP_SCHED_PRIO_LOWEST == odp_queue_sched_prio(queue));
+ CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
+ CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue));
+
+ CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context,
+ sizeof(queue_context)));
+
+ CU_ASSERT(&queue_context == odp_queue_context(queue));
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+
+ /* Plain type queue */
+ odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_PLAIN;
+ qparams.context = &queue_context;
+ qparams.context_len = sizeof(queue_context);
+
+ queue = odp_queue_create("test_queue", &qparams);
+ CU_ASSERT(ODP_QUEUE_INVALID != queue);
+ CU_ASSERT(queue == odp_queue_lookup("test_queue"));
+ CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
+ CU_ASSERT(&queue_context == odp_queue_context(queue));
+
+ msg_pool = odp_pool_lookup("msg_pool");
+ buf = odp_buffer_alloc(msg_pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+
+ if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) {
+ odp_buffer_free(buf);
+ } else {
+ CU_ASSERT(ev == odp_queue_deq(queue));
+ odp_buffer_free(buf);
+ }
+
+ for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
+ buf = odp_buffer_alloc(msg_pool);
+ enev[i] = odp_buffer_to_event(buf);
+ }
+
+ /*
+ * odp_queue_enq_multi may return 0..n buffers due to the resource
+ * constraints in the implementation at that given point of time.
+ * But here we assume that we succeed in enqueuing all buffers.
+ */
+ ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE);
+ CU_ASSERT(MAX_BUFFER_QUEUE == ret);
+ i = ret < 0 ? 0 : ret;
+ for ( ; i < MAX_BUFFER_QUEUE; i++)
+ odp_event_free(enev[i]);
+
+ pev_tmp = deev;
+ do {
+ deq_ret = odp_queue_deq_multi(queue, pev_tmp,
+ MAX_BUFFER_QUEUE);
+ nr_deq_entries += deq_ret;
+ max_iteration--;
+ pev_tmp += deq_ret;
+ CU_ASSERT(max_iteration >= 0);
+ } while (nr_deq_entries < MAX_BUFFER_QUEUE);
+
+ for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
+ enbuf = odp_buffer_from_event(enev[i]);
+ CU_ASSERT(enev[i] == deev[i]);
+ odp_buffer_free(enbuf);
+ }
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
+void queue_test_info(void)
+{
+ odp_queue_t q_plain, q_order;
+ const char *const nq_plain = "test_q_plain";
+ const char *const nq_order = "test_q_order";
+ odp_queue_info_t info;
+ odp_queue_param_t param;
+ char q_plain_ctx[] = "test_q_plain context data";
+ char q_order_ctx[] = "test_q_order context data";
+ unsigned lock_count;
+ char *ctx;
+ int ret;
+
+ /* Create a plain queue and set context */
+ q_plain = odp_queue_create(nq_plain, NULL);
+ CU_ASSERT(ODP_QUEUE_INVALID != q_plain);
+ CU_ASSERT(odp_queue_context_set(q_plain, q_plain_ctx,
+ sizeof(q_plain_ctx)) == 0);
+
+ /* Create a scheduled ordered queue with explicitly set params */
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
+ param.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ param.sched.group = ODP_SCHED_GROUP_ALL;
+ param.sched.lock_count = 1;
+ param.context = q_order_ctx;
+ q_order = odp_queue_create(nq_order, &param);
+ CU_ASSERT(ODP_QUEUE_INVALID != q_order);
+
+ /* Check info for the plain queue */
+ CU_ASSERT(odp_queue_info(q_plain, &info) == 0);
+ CU_ASSERT(strcmp(nq_plain, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(info.param.type == odp_queue_type(q_plain));
+ ctx = info.param.context; /* 'char' context ptr */
+ CU_ASSERT(ctx == q_plain_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_plain));
+
+ /* Check info for the scheduled ordered queue */
+ CU_ASSERT(odp_queue_info(q_order, &info) == 0);
+ CU_ASSERT(strcmp(nq_order, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(info.param.type == odp_queue_type(q_order));
+ ctx = info.param.context; /* 'char' context ptr */
+ CU_ASSERT(ctx == q_order_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_order));
+ CU_ASSERT(info.param.sched.prio == odp_queue_sched_prio(q_order));
+ CU_ASSERT(info.param.sched.sync == odp_queue_sched_type(q_order));
+ CU_ASSERT(info.param.sched.group == odp_queue_sched_group(q_order));
+ ret = odp_queue_lock_count(q_order);
+ CU_ASSERT(ret >= 0);
+ lock_count = (unsigned)ret;
+ CU_ASSERT(info.param.sched.lock_count == lock_count);
+
+ CU_ASSERT(odp_queue_destroy(q_plain) == 0);
+ CU_ASSERT(odp_queue_destroy(q_order) == 0);
+}
+
+odp_testinfo_t queue_suite[] = {
+ ODP_TEST_INFO(queue_test_capa),
+ ODP_TEST_INFO(queue_test_mode),
+ ODP_TEST_INFO(queue_test_param),
+ ODP_TEST_INFO(queue_test_info),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t queue_suites[] = {
+ {"Queue", queue_suite_init, queue_suite_term, queue_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int queue_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(queue_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/queue/queue.h b/test/common_plat/validation/api/queue/queue.h
new file mode 100644
index 000000000..6b787b1d6
--- /dev/null
+++ b/test/common_plat/validation/api/queue/queue.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_QUEUE_H_
+#define _ODP_TEST_QUEUE_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void queue_test_capa(void);
+void queue_test_mode(void);
+void queue_test_param(void);
+void queue_test_info(void);
+
+/* test arrays: */
+extern odp_testinfo_t queue_suite[];
+
+/* test array init/term functions: */
+int queue_suite_init(void);
+int queue_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t queue_suites[];
+
+/* main test program: */
+int queue_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/queue/queue_main.c b/test/common_plat/validation/api/queue/queue_main.c
new file mode 100644
index 000000000..b461b860a
--- /dev/null
+++ b/test/common_plat/validation/api/queue/queue_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "queue.h"
+
+int main(int argc, char *argv[])
+{
+ return queue_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/random/.gitignore b/test/common_plat/validation/api/random/.gitignore
new file mode 100644
index 000000000..2c88ec0b8
--- /dev/null
+++ b/test/common_plat/validation/api/random/.gitignore
@@ -0,0 +1 @@
+random_main
diff --git a/test/common_plat/validation/api/random/Makefile.am b/test/common_plat/validation/api/random/Makefile.am
new file mode 100644
index 000000000..69259a4db
--- /dev/null
+++ b/test/common_plat/validation/api/random/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestrandom.la
+libtestrandom_la_SOURCES = random.c
+
+test_PROGRAMS = random_main$(EXEEXT)
+dist_random_main_SOURCES = random_main.c
+random_main_LDADD = libtestrandom.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = random.h
diff --git a/test/common_plat/validation/api/random/random.c b/test/common_plat/validation/api/random/random.c
new file mode 100644
index 000000000..7572366c2
--- /dev/null
+++ b/test/common_plat/validation/api/random/random.c
@@ -0,0 +1,44 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "random.h"
+
+void random_test_get_size(void)
+{
+ int32_t ret;
+ uint8_t buf[32];
+
+ ret = odp_random_data(buf, sizeof(buf), false);
+ CU_ASSERT(ret == sizeof(buf));
+}
+
+odp_testinfo_t random_suite[] = {
+ ODP_TEST_INFO(random_test_get_size),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t random_suites[] = {
+ {"Random", NULL, NULL, random_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int random_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(random_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/random/random.h b/test/common_plat/validation/api/random/random.h
new file mode 100644
index 000000000..26202cc37
--- /dev/null
+++ b/test/common_plat/validation/api/random/random.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_RANDOM_H_
+#define _ODP_TEST_RANDOM_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void random_test_get_size(void);
+
+/* test arrays: */
+extern odp_testinfo_t random_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t random_suites[];
+
+/* main test program: */
+int random_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/random/random_main.c b/test/common_plat/validation/api/random/random_main.c
new file mode 100644
index 000000000..8f38a84c6
--- /dev/null
+++ b/test/common_plat/validation/api/random/random_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "random.h"
+
+int main(int argc, char *argv[])
+{
+ return random_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/scheduler/.gitignore b/test/common_plat/validation/api/scheduler/.gitignore
new file mode 100644
index 000000000..b4eb30091
--- /dev/null
+++ b/test/common_plat/validation/api/scheduler/.gitignore
@@ -0,0 +1 @@
+scheduler_main
diff --git a/test/common_plat/validation/api/scheduler/Makefile.am b/test/common_plat/validation/api/scheduler/Makefile.am
new file mode 100644
index 000000000..2555cab81
--- /dev/null
+++ b/test/common_plat/validation/api/scheduler/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestscheduler.la
+libtestscheduler_la_SOURCES = scheduler.c
+
+test_PROGRAMS = scheduler_main$(EXEEXT)
+dist_scheduler_main_SOURCES = scheduler_main.c
+scheduler_main_LDADD = libtestscheduler.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = scheduler.h
diff --git a/test/common_plat/validation/api/scheduler/scheduler.c b/test/common_plat/validation/api/scheduler/scheduler.c
new file mode 100644
index 000000000..919cfb6ce
--- /dev/null
+++ b/test/common_plat/validation/api/scheduler/scheduler.c
@@ -0,0 +1,1653 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "scheduler.h"
+
+#define MAX_WORKERS_THREADS 32
+#define MAX_ORDERED_LOCKS 2
+#define MSG_POOL_SIZE (64 * 1024)
+#define QUEUES_PER_PRIO 16
+#define BUF_SIZE 64
+#define BUFS_PER_QUEUE 100
+#define BUFS_PER_QUEUE_EXCL 10000
+#define BURST_BUF_SIZE 4
+#define NUM_BUFS_PAUSE 1000
+#define NUM_BUFS_BEFORE_PAUSE 10
+#define NUM_GROUPS 2
+
+#define GLOBALS_SHM_NAME "test_globals"
+#define MSG_POOL_NAME "msg_pool"
+#define QUEUE_CTX_POOL_NAME "queue_ctx_pool"
+#define SHM_THR_ARGS_NAME "shm_thr_args"
+
+#define ONE_Q 1
+#define MANY_QS QUEUES_PER_PRIO
+
+#define ONE_PRIO 1
+
+#define SCHD_ONE 0
+#define SCHD_MULTI 1
+
+#define DISABLE_EXCL_ATOMIC 0
+#define ENABLE_EXCL_ATOMIC 1
+
+#define MAGIC 0xdeadbeef
+#define MAGIC1 0xdeadbeef
+#define MAGIC2 0xcafef00d
+
+#define CHAOS_NUM_QUEUES 6
+#define CHAOS_NUM_BUFS_PER_QUEUE 6
+#define CHAOS_NUM_ROUNDS 1000
+#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE)
+#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000)
+#define CHAOS_PTR_TO_NDX(p) ((uint64_t)(uint32_t)(uintptr_t)p)
+#define CHAOS_NDX_TO_PTR(n) ((void *)(uintptr_t)n)
+
+#define ODP_WAIT_TOLERANCE (60 * ODP_TIME_MSEC_IN_NS)
+
+/* Test global variables */
+typedef struct {
+ int num_workers;
+ odp_barrier_t barrier;
+ int buf_count;
+ int buf_count_cpy;
+ odp_ticketlock_t lock;
+ odp_spinlock_t atomic_lock;
+ struct {
+ odp_queue_t handle;
+ char name[ODP_QUEUE_NAME_LEN];
+ } chaos_q[CHAOS_NUM_QUEUES];
+} test_globals_t;
+
+typedef struct {
+ pthrd_arg cu_thr;
+ test_globals_t *globals;
+ odp_schedule_sync_t sync;
+ int num_queues;
+ int num_prio;
+ int num_bufs;
+ int num_workers;
+ int enable_schd_multi;
+ int enable_excl_atomic;
+} thread_args_t;
+
+typedef struct {
+ uint64_t sequence;
+ uint64_t lock_sequence[MAX_ORDERED_LOCKS];
+ uint64_t output_sequence;
+} buf_contents;
+
+typedef struct {
+ odp_buffer_t ctx_handle;
+ odp_queue_t pq_handle;
+ uint64_t sequence;
+ uint64_t lock_sequence[MAX_ORDERED_LOCKS];
+} queue_context;
+
+typedef struct {
+ uint64_t evno;
+ uint64_t seqno;
+} chaos_buf;
+
+odp_pool_t pool;
+odp_pool_t queue_ctx_pool;
+
+static int drain_queues(void)
+{
+ odp_event_t ev;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ int ret = 0;
+
+ while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ ret++;
+ }
+
+ return ret;
+}
+
+static int exit_schedule_loop(void)
+{
+ odp_event_t ev;
+ int ret = 0;
+
+ odp_schedule_pause();
+
+ while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT))
+ != ODP_EVENT_INVALID) {
+ odp_event_free(ev);
+ ret++;
+ }
+
+ odp_schedule_resume();
+
+ return ret;
+}
+
+void scheduler_test_wait_time(void)
+{
+ int i;
+ odp_queue_t queue;
+ uint64_t wait_time;
+ odp_queue_param_t qp;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, diff;
+
+ /* check on read */
+ wait_time = odp_schedule_wait_time(0);
+ wait_time = odp_schedule_wait_time(1);
+
+ /* check ODP_SCHED_NO_WAIT */
+ odp_queue_param_init(&qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qp.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
+ queue = odp_queue_create("dummy_queue", &qp);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
+ start_time = odp_time_local();
+ odp_schedule(&queue, ODP_SCHED_NO_WAIT);
+ end_time = odp_time_local();
+
+ diff = odp_time_diff(end_time, start_time);
+ lower_limit = ODP_TIME_NULL;
+ upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);
+
+ CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
+ CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
+
+ /* check time correctness */
+ start_time = odp_time_local();
+ for (i = 1; i < 6; i++) {
+ odp_schedule(&queue, wait_time);
+ printf("%d..", i);
+ }
+ end_time = odp_time_local();
+
+ diff = odp_time_diff(end_time, start_time);
+ lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
+ ODP_WAIT_TOLERANCE);
+ upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
+ ODP_WAIT_TOLERANCE);
+
+ CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
+ CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+}
+
+void scheduler_test_num_prio(void)
+{
+ int prio;
+
+ prio = odp_schedule_num_prio();
+
+ CU_ASSERT(prio > 0);
+ CU_ASSERT(prio == odp_schedule_num_prio());
+}
+
+void scheduler_test_queue_destroy(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t params;
+ odp_queue_param_t qp;
+ odp_queue_t queue, from;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ int i;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ odp_queue_param_init(&qp);
+ odp_pool_param_init(&params);
+ params.buf.size = 100;
+ params.buf.align = 0;
+ params.buf.num = 1;
+ params.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_destroy_pool", &params);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.sync = sync[i];
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
+
+ queue = odp_queue_create("sched_destroy_queue", &qp);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC;
+
+ ev = odp_buffer_to_event(buf);
+ if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0)))
+ odp_buffer_free(buf);
+
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+
+ CU_ASSERT_FATAL(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ CU_ASSERT_FATAL(u32[0] == MAGIC);
+
+ odp_buffer_free(buf);
+ odp_schedule_release_ordered();
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+void scheduler_test_groups(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t params;
+ odp_queue_t queue_grp1, queue_grp2;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ int i, j, rc;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+ int thr_id = odp_thread_id();
+ odp_thrmask_t zeromask, mymask, testmask;
+ odp_schedule_group_t mygrp1, mygrp2, lookup;
+ odp_schedule_group_info_t info;
+
+ odp_thrmask_zero(&zeromask);
+ odp_thrmask_zero(&mymask);
+ odp_thrmask_set(&mymask, thr_id);
+
+ /* Can't find a group before we create it */
+ lookup = odp_schedule_group_lookup("Test Group 1");
+ CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
+
+ /* Now create the group */
+ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
+ CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);
+
+ /* Verify we can now find it */
+ lookup = odp_schedule_group_lookup("Test Group 1");
+ CU_ASSERT(lookup == mygrp1);
+
+ /* Threadmask should be retrievable and be what we expect */
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* Now join the group and verify we're part of it */
+ rc = odp_schedule_group_join(mygrp1, &mymask);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
+
+ /* Info struct */
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ rc = odp_schedule_group_info(mygrp1, &info);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
+ CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);
+
+ /* We can't join or leave an unknown group */
+ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
+ CU_ASSERT(rc != 0);
+
+ rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
+ CU_ASSERT(rc != 0);
+
+ /* But we can leave our group */
+ rc = odp_schedule_group_leave(mygrp1, &mymask);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_schedule_group_thrmask(mygrp1, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* We shouldn't be able to find our second group before creating it */
+ lookup = odp_schedule_group_lookup("Test Group 2");
+ CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
+
+ /* Now create it and verify we can find it */
+ mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
+ CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);
+
+ lookup = odp_schedule_group_lookup("Test Group 2");
+ CU_ASSERT(lookup == mygrp2);
+
+ /* Verify we're not part of it */
+ rc = odp_schedule_group_thrmask(mygrp2, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+
+ /* Now join the group and verify we're part of it */
+ rc = odp_schedule_group_join(mygrp2, &mymask);
+ CU_ASSERT(rc == 0);
+
+ rc = odp_schedule_group_thrmask(mygrp2, &testmask);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));
+
+ /* Now verify scheduler adherence to groups */
+ odp_pool_param_init(&params);
+ params.buf.size = 100;
+ params.buf.align = 0;
+ params.buf.num = 2;
+ params.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_group_pool", &params);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ odp_queue_param_t qp;
+ odp_queue_t queue, from;
+ odp_schedule_group_t mygrp[NUM_GROUPS];
+ odp_queue_t queue_grp[NUM_GROUPS];
+ int num = NUM_GROUPS;
+
+ odp_queue_param_init(&qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.sync = sync[i];
+ qp.sched.group = mygrp1;
+
+ /* Create and populate a group in group 1 */
+ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
+ CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);
+
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC1;
+
+ ev = odp_buffer_to_event(buf);
+ rc = odp_queue_enq(queue_grp1, ev);
+ CU_ASSERT(rc == 0);
+ if (rc)
+ odp_buffer_free(buf);
+
+ /* Now create and populate a queue in group 2 */
+ qp.sched.group = mygrp2;
+ queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
+ CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);
+
+ buf = odp_buffer_alloc(p);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC2;
+
+ ev = odp_buffer_to_event(buf);
+ rc = odp_queue_enq(queue_grp2, ev);
+ CU_ASSERT(rc == 0);
+ if (rc)
+ odp_buffer_free(buf);
+
+ /* Swap between two groups. Application should serve both
+ * groups to avoid potential head of line blocking in
+ * scheduler. */
+ mygrp[0] = mygrp1;
+ mygrp[1] = mygrp2;
+ queue_grp[0] = queue_grp1;
+ queue_grp[1] = queue_grp2;
+ j = 0;
+
+ /* Ensure that each test run starts from mygrp1 */
+ odp_schedule_group_leave(mygrp1, &mymask);
+ odp_schedule_group_leave(mygrp2, &mymask);
+ odp_schedule_group_join(mygrp1, &mymask);
+
+ while (num) {
+ queue = queue_grp[j];
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID) {
+ /* change group */
+ rc = odp_schedule_group_leave(mygrp[j],
+ &mymask);
+ CU_ASSERT_FATAL(rc == 0);
+
+ j = (j + 1) % NUM_GROUPS;
+ rc = odp_schedule_group_join(mygrp[j],
+ &mymask);
+ CU_ASSERT_FATAL(rc == 0);
+ continue;
+ }
+
+ CU_ASSERT_FATAL(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ if (from == queue_grp1) {
+ /* CU_ASSERT_FATAL needs these brackets */
+ CU_ASSERT_FATAL(u32[0] == MAGIC1);
+ } else {
+ CU_ASSERT_FATAL(u32[0] == MAGIC2);
+ }
+
+ odp_buffer_free(buf);
+
+ /* Tell scheduler we're about to request an event.
+ * Not needed, but a convenient place to test this API.
+ */
+ odp_schedule_prefetch(1);
+
+ num--;
+ }
+
+ /* Release schduler context and leave groups */
+ odp_schedule_group_join(mygrp1, &mymask);
+ odp_schedule_group_join(mygrp2, &mymask);
+ CU_ASSERT(exit_schedule_loop() == 0);
+ odp_schedule_group_leave(mygrp1, &mymask);
+ odp_schedule_group_leave(mygrp2, &mymask);
+
+ /* Done with queues for this round */
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
+ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);
+
+ /* Verify we can no longer find our queues */
+ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
+ ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
+ ODP_QUEUE_INVALID);
+ }
+
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+static int chaos_thread(void *arg)
+{
+ uint64_t i, wait;
+ int rc;
+ chaos_buf *cbuf;
+ odp_event_t ev;
+ odp_queue_t from;
+ thread_args_t *args = (thread_args_t *)arg;
+ test_globals_t *globals = args->globals;
+ int me = odp_thread_id();
+ odp_time_t start_time, end_time, diff;
+
+ if (CHAOS_DEBUG)
+ printf("Chaos thread %d starting...\n", me);
+
+ /* Wait for all threads to start */
+ odp_barrier_wait(&globals->barrier);
+ start_time = odp_time_local();
+
+ /* Run the test */
+ wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS);
+ for (i = 0; i < CHAOS_NUM_ROUNDS; i++) {
+ ev = odp_schedule(&from, wait);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
+ CU_ASSERT_FATAL(cbuf != NULL);
+ if (CHAOS_DEBUG)
+ printf("Thread %d received event %" PRIu64
+ " seq %" PRIu64
+ " from Q %s, sending to Q %s\n",
+ me, cbuf->evno, cbuf->seqno,
+ globals->
+ chaos_q
+ [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
+ globals->
+ chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);
+
+ rc = odp_queue_enq(
+ globals->
+ chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
+ ev);
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ if (CHAOS_DEBUG)
+ printf("Thread %d completed %d rounds...terminating\n",
+ odp_thread_id(), CHAOS_NUM_EVENTS);
+
+ exit_schedule_loop();
+
+ end_time = odp_time_local();
+ diff = odp_time_diff(end_time, start_time);
+
+ printf("Thread %d ends, elapsed time = %" PRIu64 "us\n",
+ odp_thread_id(), odp_time_to_ns(diff) / 1000);
+
+ return 0;
+}
+
+static void chaos_run(unsigned int qtype)
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ odp_queue_param_t qp;
+ odp_buffer_t buf;
+ chaos_buf *cbuf;
+ test_globals_t *globals;
+ thread_args_t *args;
+ odp_shm_t shm;
+ int i, rc;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+ const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t));
+ const char *const qtypes[] = {"parallel", "atomic", "ordered"};
+
+ /* Set up the scheduling environment */
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+
+ args->globals = globals;
+ args->cu_thr.numthrds = globals->num_workers;
+
+ odp_queue_param_init(&qp);
+ odp_pool_param_init(&params);
+ params.buf.size = sizeof(chaos_buf);
+ params.buf.align = 0;
+ params.buf.num = CHAOS_NUM_EVENTS;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("sched_chaos_pool", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
+
+ for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
+ uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);
+
+ qp.sched.sync = sync[ndx];
+ snprintf(globals->chaos_q[i].name,
+ sizeof(globals->chaos_q[i].name),
+ "chaos queue %d - %s", i,
+ qtypes[ndx]);
+
+ globals->chaos_q[i].handle =
+ odp_queue_create(globals->chaos_q[i].name, &qp);
+ CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
+ ODP_QUEUE_INVALID);
+ rc = odp_queue_context_set(globals->chaos_q[i].handle,
+ CHAOS_NDX_TO_PTR(i), 0);
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ /* Now populate the queues with the initial seed elements */
+ for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ cbuf = odp_buffer_addr(buf);
+ cbuf->evno = i;
+ cbuf->seqno = 0;
+ rc = odp_queue_enq(
+ globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
+ odp_buffer_to_event(buf));
+ CU_ASSERT_FATAL(rc == 0);
+ }
+
+ /* Run the test */
+ odp_cunit_thread_create(chaos_thread, &args->cu_thr);
+ odp_cunit_thread_exit(&args->cu_thr);
+
+ if (CHAOS_DEBUG)
+ printf("Thread %d returning from chaos threads..cleaning up\n",
+ odp_thread_id());
+
+ drain_queues();
+ exit_schedule_loop();
+
+ for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
+ if (CHAOS_DEBUG)
+ printf("Destroying queue %s\n",
+ globals->chaos_q[i].name);
+ rc = odp_queue_destroy(globals->chaos_q[i].handle);
+ CU_ASSERT(rc == 0);
+ }
+
+ rc = odp_pool_destroy(pool);
+ CU_ASSERT(rc == 0);
+}
+
+void scheduler_test_parallel(void)
+{
+ chaos_run(0);
+}
+
+void scheduler_test_atomic(void)
+{
+ chaos_run(1);
+}
+
+void scheduler_test_ordered(void)
+{
+ chaos_run(2);
+}
+
+void scheduler_test_chaos(void)
+{
+ chaos_run(3);
+}
+
+static int schedule_common_(void *arg)
+{
+ thread_args_t *args = (thread_args_t *)arg;
+ odp_schedule_sync_t sync;
+ test_globals_t *globals;
+ queue_context *qctx;
+ buf_contents *bctx, *bctx_cpy;
+ odp_pool_t pool;
+ int locked;
+ int num;
+ odp_event_t ev;
+ odp_buffer_t buf, buf_cpy;
+ odp_queue_t from;
+
+ globals = args->globals;
+ sync = args->sync;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ while (1) {
+ from = ODP_QUEUE_INVALID;
+ num = 0;
+
+ odp_ticketlock_lock(&globals->lock);
+ if (globals->buf_count == 0) {
+ odp_ticketlock_unlock(&globals->lock);
+ break;
+ }
+ odp_ticketlock_unlock(&globals->lock);
+
+ if (args->enable_schd_multi) {
+ odp_event_t events[BURST_BUF_SIZE],
+ ev_cpy[BURST_BUF_SIZE];
+ odp_buffer_t buf_cpy[BURST_BUF_SIZE];
+ int j;
+
+ num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
+ events, BURST_BUF_SIZE);
+ CU_ASSERT(num >= 0);
+ CU_ASSERT(num <= BURST_BUF_SIZE);
+ if (num == 0)
+ continue;
+
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ int ndx;
+ int ndx_max;
+ int rc;
+
+ ndx_max = odp_queue_lock_count(from);
+ CU_ASSERT_FATAL(ndx_max >= 0);
+
+ qctx = odp_queue_context(from);
+
+ for (j = 0; j < num; j++) {
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event
+ (events[j]));
+
+ buf_cpy[j] = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf_cpy[j] !=
+ ODP_BUFFER_INVALID);
+ bctx_cpy = odp_buffer_addr(buf_cpy[j]);
+ memcpy(bctx_cpy, bctx,
+ sizeof(buf_contents));
+ bctx_cpy->output_sequence =
+ bctx_cpy->sequence;
+ ev_cpy[j] =
+ odp_buffer_to_event(buf_cpy[j]);
+ }
+
+ rc = odp_queue_enq_multi(qctx->pq_handle,
+ ev_cpy, num);
+ CU_ASSERT(rc == num);
+
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event(events[0]));
+ for (ndx = 0; ndx < ndx_max; ndx++) {
+ odp_schedule_order_lock(ndx);
+ CU_ASSERT(bctx->sequence ==
+ qctx->lock_sequence[ndx]);
+ qctx->lock_sequence[ndx] += num;
+ odp_schedule_order_unlock(ndx);
+ }
+ }
+
+ for (j = 0; j < num; j++)
+ odp_event_free(events[j]);
+ } else {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ buf = odp_buffer_from_event(ev);
+ num = 1;
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ int ndx;
+ int ndx_max;
+ int rc;
+
+ ndx_max = odp_queue_lock_count(from);
+ CU_ASSERT_FATAL(ndx_max >= 0);
+
+ qctx = odp_queue_context(from);
+ bctx = odp_buffer_addr(buf);
+ buf_cpy = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
+ bctx_cpy = odp_buffer_addr(buf_cpy);
+ memcpy(bctx_cpy, bctx, sizeof(buf_contents));
+ bctx_cpy->output_sequence = bctx_cpy->sequence;
+
+ rc = odp_queue_enq(qctx->pq_handle,
+ odp_buffer_to_event
+ (buf_cpy));
+ CU_ASSERT(rc == 0);
+
+ for (ndx = 0; ndx < ndx_max; ndx++) {
+ odp_schedule_order_lock(ndx);
+ CU_ASSERT(bctx->sequence ==
+ qctx->lock_sequence[ndx]);
+ qctx->lock_sequence[ndx] += num;
+ odp_schedule_order_unlock(ndx);
+ }
+ }
+
+ odp_buffer_free(buf);
+ }
+
+ if (args->enable_excl_atomic) {
+ locked = odp_spinlock_trylock(&globals->atomic_lock);
+ CU_ASSERT(locked != 0);
+ CU_ASSERT(from != ODP_QUEUE_INVALID);
+ if (locked) {
+ int cnt;
+ odp_time_t time = ODP_TIME_NULL;
+ /* Do some work here to keep the thread busy */
+ for (cnt = 0; cnt < 1000; cnt++)
+ time = odp_time_sum(time,
+ odp_time_local());
+
+ odp_spinlock_unlock(&globals->atomic_lock);
+ }
+ }
+
+ if (sync == ODP_SCHED_SYNC_ATOMIC)
+ odp_schedule_release_atomic();
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ odp_schedule_release_ordered();
+
+ odp_ticketlock_lock(&globals->lock);
+
+ globals->buf_count -= num;
+
+ if (globals->buf_count < 0) {
+ odp_ticketlock_unlock(&globals->lock);
+ CU_FAIL_FATAL("Buffer counting failed");
+ }
+
+ odp_ticketlock_unlock(&globals->lock);
+ }
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ locked = odp_ticketlock_trylock(&globals->lock);
+ else
+ locked = 0;
+
+ if (locked && globals->buf_count_cpy > 0) {
+ odp_event_t ev;
+ odp_queue_t pq;
+ uint64_t seq;
+ uint64_t bcount = 0;
+ int i, j;
+ char name[32];
+ uint64_t num_bufs = args->num_bufs;
+ uint64_t buf_count = globals->buf_count_cpy;
+
+ for (i = 0; i < args->num_prio; i++) {
+ for (j = 0; j < args->num_queues; j++) {
+ snprintf(name, sizeof(name),
+ "plain_%d_%d_o", i, j);
+ pq = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);
+
+ seq = 0;
+ while (1) {
+ ev = odp_queue_deq(pq);
+
+ if (ev == ODP_EVENT_INVALID) {
+ CU_ASSERT(seq == num_bufs);
+ break;
+ }
+
+ bctx = odp_buffer_addr(
+ odp_buffer_from_event(ev));
+
+ CU_ASSERT(bctx->sequence == seq);
+ seq++;
+ bcount++;
+ odp_event_free(ev);
+ }
+ }
+ }
+ CU_ASSERT(bcount == buf_count);
+ globals->buf_count_cpy = 0;
+ }
+
+ if (locked)
+ odp_ticketlock_unlock(&globals->lock);
+
+ /* Clear scheduler atomic / ordered context between tests */
+ num = exit_schedule_loop();
+
+ CU_ASSERT(num == 0);
+
+ if (num)
+ printf("\nDROPPED %i events\n\n", num);
+
+ return 0;
+}
+
+static void fill_queues(thread_args_t *args)
+{
+ odp_schedule_sync_t sync;
+ int num_queues, num_prio;
+ odp_pool_t pool;
+ int i, j, k;
+ int buf_count = 0;
+ test_globals_t *globals;
+ char name[32];
+ int ret;
+ odp_buffer_t buf;
+ odp_event_t ev;
+
+ globals = args->globals;
+ sync = args->sync;
+ num_queues = args->num_queues;
+ num_prio = args->num_prio;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num_prio; i++) {
+ for (j = 0; j < num_queues; j++) {
+ odp_queue_t queue;
+
+ switch (sync) {
+ case ODP_SCHED_SYNC_PARALLEL:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_n", i, j);
+ break;
+ case ODP_SCHED_SYNC_ATOMIC:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_a", i, j);
+ break;
+ case ODP_SCHED_SYNC_ORDERED:
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_o", i, j);
+ break;
+ default:
+ CU_ASSERT_FATAL(0);
+ break;
+ }
+
+ queue = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (k = 0; k < args->num_bufs; k++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ if (sync == ODP_SCHED_SYNC_ORDERED) {
+ queue_context *qctx =
+ odp_queue_context(queue);
+ buf_contents *bctx =
+ odp_buffer_addr(buf);
+ bctx->sequence = qctx->sequence++;
+ }
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+
+ if (ret)
+ odp_buffer_free(buf);
+ else
+ buf_count++;
+ }
+ }
+ }
+
+ globals->buf_count = buf_count;
+ globals->buf_count_cpy = buf_count;
+}
+
+static void reset_queues(thread_args_t *args)
+{
+ int i, j, k;
+ int num_prio = args->num_prio;
+ int num_queues = args->num_queues;
+ char name[32];
+
+ for (i = 0; i < num_prio; i++) {
+ for (j = 0; j < num_queues; j++) {
+ odp_queue_t queue;
+
+ snprintf(name, sizeof(name),
+ "sched_%d_%d_o", i, j);
+ queue = odp_queue_lookup(name);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (k = 0; k < args->num_bufs; k++) {
+ queue_context *qctx =
+ odp_queue_context(queue);
+ int ndx;
+ int ndx_max;
+
+ ndx_max = odp_queue_lock_count(queue);
+ CU_ASSERT_FATAL(ndx_max >= 0);
+ qctx->sequence = 0;
+ for (ndx = 0; ndx < ndx_max; ndx++)
+ qctx->lock_sequence[ndx] = 0;
+ }
+ }
+ }
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+ int num_prio, int enable_schd_multi)
+{
+ thread_args_t args;
+ odp_shm_t shm;
+ test_globals_t *globals;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ memset(&args, 0, sizeof(thread_args_t));
+ args.globals = globals;
+ args.sync = sync;
+ args.num_queues = num_queues;
+ args.num_prio = num_prio;
+ args.num_bufs = BUFS_PER_QUEUE;
+ args.num_workers = 1;
+ args.enable_schd_multi = enable_schd_multi;
+ args.enable_excl_atomic = 0; /* Not needed with a single CPU */
+
+ fill_queues(&args);
+
+ schedule_common_(&args);
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ reset_queues(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+ int num_prio, int enable_schd_multi,
+ int enable_excl_atomic)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ thread_args_t *args;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(globals);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(args);
+
+ args->globals = globals;
+ args->sync = sync;
+ args->num_queues = num_queues;
+ args->num_prio = num_prio;
+ if (enable_excl_atomic)
+ args->num_bufs = BUFS_PER_QUEUE_EXCL;
+ else
+ args->num_bufs = BUFS_PER_QUEUE;
+ args->num_workers = globals->num_workers;
+ args->enable_schd_multi = enable_schd_multi;
+ args->enable_excl_atomic = enable_excl_atomic;
+
+ fill_queues(args);
+
+ /* Create and launch worker threads */
+ args->cu_thr.numthrds = globals->num_workers;
+ odp_cunit_thread_create(schedule_common_, &args->cu_thr);
+
+ /* Wait for worker threads to terminate */
+ odp_cunit_thread_exit(&args->cu_thr);
+
+ /* Cleanup ordered queues for next pass */
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ reset_queues(args);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL */
+void scheduler_test_1q_1t_n(void)
+{
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+void scheduler_test_1q_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+void scheduler_test_1q_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL */
+void scheduler_test_mq_1t_n(void)
+{
+ /* Only one priority involved in these tests, but use
+ the same number of queues the more general case uses */
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+void scheduler_test_mq_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+void scheduler_test_mq_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL */
+void scheduler_test_mq_1t_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+void scheduler_test_mq_1t_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+void scheduler_test_mq_1t_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL */
+void scheduler_test_mq_mt_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE,
+ DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+void scheduler_test_mq_mt_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_ONE,
+ DISABLE_EXCL_ATOMIC);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+void scheduler_test_mq_mt_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE,
+ DISABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues */
+void scheduler_test_1q_mt_a_excl(void)
+{
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_ONE,
+ ENABLE_EXCL_ATOMIC);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL multi */
+void scheduler_test_multi_1q_1t_n(void)
+{
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+void scheduler_test_multi_1q_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED multi */
+void scheduler_test_multi_1q_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL multi */
+void scheduler_test_multi_mq_1t_n(void)
+{
+ /* Only one priority involved in these tests, but use
+ the same number of queues the more general case uses */
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
+void scheduler_test_multi_mq_1t_a(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED multi */
+void scheduler_test_multi_mq_1t_o(void)
+{
+ schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL multi */
+void scheduler_test_multi_mq_1t_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
+void scheduler_test_multi_mq_1t_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED multi */
+void scheduler_test_multi_mq_1t_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL multi */
+void scheduler_test_multi_mq_mt_prio_n(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
+void scheduler_test_multi_mq_mt_prio_a(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED multi */
+void scheduler_test_multi_mq_mt_prio_o(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ parallel_execute(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI, 0);
+}
+
+/* 1 queue many threads check exclusive access on ATOMIC queues multi */
+void scheduler_test_multi_1q_mt_a_excl(void)
+{
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, ONE_Q, ONE_PRIO, SCHD_MULTI,
+ ENABLE_EXCL_ATOMIC);
+}
+
+void scheduler_test_pause_resume(void)
+{
+ odp_queue_t queue;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_queue_t from;
+ int i;
+ int local_bufs = 0;
+ int ret;
+
+ queue = odp_queue_lookup("sched_0_0_n");
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < NUM_BUFS_PAUSE; i++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ odp_buffer_free(buf);
+ }
+
+ for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
+ from = ODP_QUEUE_INVALID;
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ }
+
+ odp_schedule_pause();
+
+ while (1) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ local_bufs++;
+ }
+
+ CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);
+
+ odp_schedule_resume();
+
+ for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ CU_ASSERT(from == queue);
+ buf = odp_buffer_from_event(ev);
+ odp_buffer_free(buf);
+ }
+
+ ret = exit_schedule_loop();
+
+ CU_ASSERT(ret == 0);
+}
+
+static int create_queues(void)
+{
+ int i, j, prios, rc;
+ odp_queue_capability_t capa;
+ odp_pool_param_t params;
+ odp_buffer_t queue_ctx_buf;
+ queue_context *qctx, *pqctx;
+ uint32_t ndx;
+ odp_queue_param_t p;
+
+ if (odp_queue_capability(&capa) < 0) {
+ printf("Queue capability query failed\n");
+ return -1;
+ }
+
+ /* Limit to test maximum */
+ if (capa.max_ordered_locks > MAX_ORDERED_LOCKS) {
+ capa.max_ordered_locks = MAX_ORDERED_LOCKS;
+ printf("Testing only %u ordered locks\n",
+ capa.max_ordered_locks);
+ }
+
+ prios = odp_schedule_num_prio();
+ odp_pool_param_init(&params);
+ params.buf.size = sizeof(queue_context);
+ params.buf.num = prios * QUEUES_PER_PRIO * 2;
+ params.type = ODP_POOL_BUFFER;
+
+ queue_ctx_pool = odp_pool_create(QUEUE_CTX_POOL_NAME, &params);
+
+ if (queue_ctx_pool == ODP_POOL_INVALID) {
+ printf("Pool creation failed (queue ctx).\n");
+ return -1;
+ }
+
+ for (i = 0; i < prios; i++) {
+ odp_queue_param_init(&p);
+ p.type = ODP_QUEUE_TYPE_SCHED;
+ p.sched.prio = i;
+
+ for (j = 0; j < QUEUES_PER_PRIO; j++) {
+ /* Per sched sync type */
+ char name[32];
+ odp_queue_t q, pq;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ printf("Schedule queue create failed.\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ printf("Schedule queue create failed.\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ pq = odp_queue_create(name, NULL);
+ if (pq == ODP_QUEUE_INVALID) {
+ printf("Plain queue create failed.\n");
+ return -1;
+ }
+
+ queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
+
+ if (queue_ctx_buf == ODP_BUFFER_INVALID) {
+ printf("Cannot allocate plain queue ctx buf\n");
+ return -1;
+ }
+
+ pqctx = odp_buffer_addr(queue_ctx_buf);
+ pqctx->ctx_handle = queue_ctx_buf;
+ pqctx->sequence = 0;
+
+ rc = odp_queue_context_set(pq, pqctx, 0);
+
+ if (rc != 0) {
+ printf("Cannot set plain queue context\n");
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+ p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ p.sched.lock_count = capa.max_ordered_locks;
+ q = odp_queue_create(name, &p);
+
+ if (q == ODP_QUEUE_INVALID) {
+ printf("Schedule queue create failed.\n");
+ return -1;
+ }
+ if (odp_queue_lock_count(q) !=
+ (int)capa.max_ordered_locks) {
+ printf("Queue %" PRIu64 " created with "
+ "%d locks instead of expected %d\n",
+ odp_queue_to_u64(q),
+ odp_queue_lock_count(q),
+ capa.max_ordered_locks);
+ return -1;
+ }
+
+ queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
+
+ if (queue_ctx_buf == ODP_BUFFER_INVALID) {
+ printf("Cannot allocate queue ctx buf\n");
+ return -1;
+ }
+
+ qctx = odp_buffer_addr(queue_ctx_buf);
+ qctx->ctx_handle = queue_ctx_buf;
+ qctx->pq_handle = pq;
+ qctx->sequence = 0;
+
+ for (ndx = 0;
+ ndx < capa.max_ordered_locks;
+ ndx++) {
+ qctx->lock_sequence[ndx] = 0;
+ }
+
+ rc = odp_queue_context_set(q, qctx, 0);
+
+ if (rc != 0) {
+ printf("Cannot set queue context\n");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int scheduler_suite_init(void)
+{
+ odp_cpumask_t mask;
+ odp_shm_t shm;
+ odp_pool_t pool;
+ test_globals_t *globals;
+ thread_args_t *args;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.buf.size = BUF_SIZE;
+ params.buf.align = 0;
+ params.buf.num = MSG_POOL_SIZE;
+ params.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create(MSG_POOL_NAME, &params);
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Pool creation failed (msg).\n");
+ return -1;
+ }
+
+ shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+ sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+ globals = odp_shm_addr(shm);
+
+ if (!globals) {
+ printf("Shared memory reserve failed (globals).\n");
+ return -1;
+ }
+
+ memset(globals, 0, sizeof(test_globals_t));
+
+ globals->num_workers = odp_cpumask_default_worker(&mask, 0);
+ if (globals->num_workers > MAX_WORKERS)
+ globals->num_workers = MAX_WORKERS;
+
+ shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ args = odp_shm_addr(shm);
+
+ if (!args) {
+ printf("Shared memory reserve failed (args).\n");
+ return -1;
+ }
+
+ memset(args, 0, sizeof(thread_args_t));
+
+ /* Barrier to sync test case execution */
+ odp_barrier_init(&globals->barrier, globals->num_workers);
+ odp_ticketlock_init(&globals->lock);
+ odp_spinlock_init(&globals->atomic_lock);
+
+ if (create_queues() != 0)
+ return -1;
+
+ return 0;
+}
+
+static int destroy_queue(const char *name)
+{
+ odp_queue_t q;
+ queue_context *qctx;
+
+ q = odp_queue_lookup(name);
+
+ if (q == ODP_QUEUE_INVALID)
+ return -1;
+ qctx = odp_queue_context(q);
+ if (qctx)
+ odp_buffer_free(qctx->ctx_handle);
+
+ return odp_queue_destroy(q);
+}
+
+static int destroy_queues(void)
+{
+ int i, j, prios;
+
+ prios = odp_schedule_num_prio();
+
+ for (i = 0; i < prios; i++) {
+ for (j = 0; j < QUEUES_PER_PRIO; j++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ if (destroy_queue(name) != 0)
+ return -1;
+ }
+ }
+
+ if (odp_pool_destroy(queue_ctx_pool) != 0) {
+ fprintf(stderr, "error: failed to destroy queue ctx pool\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int scheduler_suite_term(void)
+{
+ odp_pool_t pool;
+
+ if (destroy_queues() != 0) {
+ fprintf(stderr, "error: failed to destroy queues\n");
+ return -1;
+ }
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ if (odp_pool_destroy(pool) != 0)
+ fprintf(stderr, "error: failed to destroy pool\n");
+
+ return 0;
+}
+
+odp_testinfo_t scheduler_suite[] = {
+ ODP_TEST_INFO(scheduler_test_wait_time),
+ ODP_TEST_INFO(scheduler_test_num_prio),
+ ODP_TEST_INFO(scheduler_test_queue_destroy),
+ ODP_TEST_INFO(scheduler_test_groups),
+ ODP_TEST_INFO(scheduler_test_pause_resume),
+ ODP_TEST_INFO(scheduler_test_parallel),
+ ODP_TEST_INFO(scheduler_test_atomic),
+ ODP_TEST_INFO(scheduler_test_ordered),
+ ODP_TEST_INFO(scheduler_test_chaos),
+ ODP_TEST_INFO(scheduler_test_1q_1t_n),
+ ODP_TEST_INFO(scheduler_test_1q_1t_a),
+ ODP_TEST_INFO(scheduler_test_1q_1t_o),
+ ODP_TEST_INFO(scheduler_test_mq_1t_n),
+ ODP_TEST_INFO(scheduler_test_mq_1t_a),
+ ODP_TEST_INFO(scheduler_test_mq_1t_o),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_n),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_a),
+ ODP_TEST_INFO(scheduler_test_mq_1t_prio_o),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_n),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_a),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_o),
+ ODP_TEST_INFO(scheduler_test_1q_mt_a_excl),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_n),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_a),
+ ODP_TEST_INFO(scheduler_test_multi_1q_1t_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_1t_prio_o),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_n),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_a),
+ ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_o),
+ ODP_TEST_INFO(scheduler_test_multi_1q_mt_a_excl),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t scheduler_suites[] = {
+ {"Scheduler",
+ scheduler_suite_init, scheduler_suite_term, scheduler_suite
+ },
+ ODP_SUITE_INFO_NULL,
+};
+
+int scheduler_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(scheduler_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/scheduler/scheduler.h b/test/common_plat/validation/api/scheduler/scheduler.h
new file mode 100644
index 000000000..a619d89b2
--- /dev/null
+++ b/test/common_plat/validation/api/scheduler/scheduler.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_SCHEDULER_H_
+#define _ODP_TEST_SCHEDULER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void scheduler_test_wait_time(void);
+void scheduler_test_num_prio(void);
+void scheduler_test_queue_destroy(void);
+void scheduler_test_groups(void);
+void scheduler_test_chaos(void);
+void scheduler_test_parallel(void);
+void scheduler_test_atomic(void);
+void scheduler_test_ordered(void);
+void scheduler_test_1q_1t_n(void);
+void scheduler_test_1q_1t_a(void);
+void scheduler_test_1q_1t_o(void);
+void scheduler_test_mq_1t_n(void);
+void scheduler_test_mq_1t_a(void);
+void scheduler_test_mq_1t_o(void);
+void scheduler_test_mq_1t_prio_n(void);
+void scheduler_test_mq_1t_prio_a(void);
+void scheduler_test_mq_1t_prio_o(void);
+void scheduler_test_mq_mt_prio_n(void);
+void scheduler_test_mq_mt_prio_a(void);
+void scheduler_test_mq_mt_prio_o(void);
+void scheduler_test_1q_mt_a_excl(void);
+void scheduler_test_multi_1q_1t_n(void);
+void scheduler_test_multi_1q_1t_a(void);
+void scheduler_test_multi_1q_1t_o(void);
+void scheduler_test_multi_mq_1t_n(void);
+void scheduler_test_multi_mq_1t_a(void);
+void scheduler_test_multi_mq_1t_o(void);
+void scheduler_test_multi_mq_1t_prio_n(void);
+void scheduler_test_multi_mq_1t_prio_a(void);
+void scheduler_test_multi_mq_1t_prio_o(void);
+void scheduler_test_multi_mq_mt_prio_n(void);
+void scheduler_test_multi_mq_mt_prio_a(void);
+void scheduler_test_multi_mq_mt_prio_o(void);
+void scheduler_test_multi_1q_mt_a_excl(void);
+void scheduler_test_pause_resume(void);
+
+/* test arrays: */
+extern odp_testinfo_t scheduler_suite[];
+
+/* test array init/term functions: */
+int scheduler_suite_init(void);
+int scheduler_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t scheduler_suites[];
+
+/* main test program: */
+int scheduler_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/scheduler/scheduler_main.c b/test/common_plat/validation/api/scheduler/scheduler_main.c
new file mode 100644
index 000000000..57cfa5fc5
--- /dev/null
+++ b/test/common_plat/validation/api/scheduler/scheduler_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "scheduler.h"
+
+int main(int argc, char *argv[])
+{
+ return scheduler_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/shmem/.gitignore b/test/common_plat/validation/api/shmem/.gitignore
new file mode 100644
index 000000000..4d82fd53a
--- /dev/null
+++ b/test/common_plat/validation/api/shmem/.gitignore
@@ -0,0 +1 @@
+shmem_main
diff --git a/test/common_plat/validation/api/shmem/Makefile.am b/test/common_plat/validation/api/shmem/Makefile.am
new file mode 100644
index 000000000..da88af662
--- /dev/null
+++ b/test/common_plat/validation/api/shmem/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestshmem.la
+libtestshmem_la_SOURCES = shmem.c
+
+test_PROGRAMS = shmem_main$(EXEEXT)
+dist_shmem_main_SOURCES = shmem_main.c
+shmem_main_LDADD = libtestshmem.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = shmem.h
diff --git a/test/common_plat/validation/api/shmem/shmem.c b/test/common_plat/validation/api/shmem/shmem.c
new file mode 100644
index 000000000..cbff6738c
--- /dev/null
+++ b/test/common_plat/validation/api/shmem/shmem.c
@@ -0,0 +1,108 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "shmem.h"
+
+#define ALIGE_SIZE (128)
+#define TESTNAME "cunit_test_shared_data"
+#define TEST_SHARE_FOO (0xf0f0f0f0)
+#define TEST_SHARE_BAR (0xf0f0f0f)
+
+static odp_barrier_t test_barrier;
+
+static int run_shm_thread(void *arg ODP_UNUSED)
+{
+ odp_shm_info_t info;
+ odp_shm_t shm;
+ test_shared_data_t *test_shared_data;
+ int thr;
+
+ odp_barrier_wait(&test_barrier);
+ thr = odp_thread_id();
+ printf("Thread %i starts\n", thr);
+
+ shm = odp_shm_lookup(TESTNAME);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ test_shared_data = odp_shm_addr(shm);
+ CU_ASSERT(TEST_SHARE_FOO == test_shared_data->foo);
+ CU_ASSERT(TEST_SHARE_BAR == test_shared_data->bar);
+ CU_ASSERT(0 == odp_shm_info(shm, &info));
+ CU_ASSERT(0 == strcmp(TESTNAME, info.name));
+ CU_ASSERT(0 == info.flags);
+ CU_ASSERT(test_shared_data == info.addr);
+ CU_ASSERT(sizeof(test_shared_data_t) <= info.size);
+#ifdef MAP_HUGETLB
+ CU_ASSERT(odp_sys_huge_page_size() == info.page_size);
+#else
+ CU_ASSERT(odp_sys_page_size() == info.page_size);
+#endif
+ odp_shm_print_all();
+
+ fflush(stdout);
+ return CU_get_number_of_failures();
+}
+
+void shmem_test_odp_shm_sunnyday(void)
+{
+ pthrd_arg thrdarg;
+ odp_shm_t shm;
+ test_shared_data_t *test_shared_data;
+ odp_cpumask_t unused;
+
+ shm = odp_shm_reserve(TESTNAME,
+ sizeof(test_shared_data_t), ALIGE_SIZE, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT(odp_shm_to_u64(shm) != odp_shm_to_u64(ODP_SHM_INVALID));
+
+ CU_ASSERT(0 == odp_shm_free(shm));
+ CU_ASSERT(ODP_SHM_INVALID == odp_shm_lookup(TESTNAME));
+
+ shm = odp_shm_reserve(TESTNAME,
+ sizeof(test_shared_data_t), ALIGE_SIZE, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+
+ test_shared_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != test_shared_data);
+ test_shared_data->foo = TEST_SHARE_FOO;
+ test_shared_data->bar = TEST_SHARE_BAR;
+
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+
+ if (thrdarg.numthrds > MAX_WORKERS)
+ thrdarg.numthrds = MAX_WORKERS;
+
+ odp_barrier_init(&test_barrier, thrdarg.numthrds);
+ odp_cunit_thread_create(run_shm_thread, &thrdarg);
+ CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+}
+
+odp_testinfo_t shmem_suite[] = {
+ ODP_TEST_INFO(shmem_test_odp_shm_sunnyday),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t shmem_suites[] = {
+ {"Shared Memory", NULL, NULL, shmem_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int shmem_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(shmem_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/shmem/shmem.h b/test/common_plat/validation/api/shmem/shmem.h
new file mode 100644
index 000000000..a5893d931
--- /dev/null
+++ b/test/common_plat/validation/api/shmem/shmem.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_SHMEM_H_
+#define _ODP_TEST_SHMEM_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void shmem_test_odp_shm_sunnyday(void);
+
+/* test arrays: */
+extern odp_testinfo_t shmem_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t shmem_suites[];
+
+/* main test program: */
+int shmem_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/shmem/shmem_main.c b/test/common_plat/validation/api/shmem/shmem_main.c
new file mode 100644
index 000000000..4c6913051
--- /dev/null
+++ b/test/common_plat/validation/api/shmem/shmem_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "shmem.h"
+
+int main(int argc, char *argv[])
+{
+ return shmem_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/std_clib/.gitignore b/test/common_plat/validation/api/std_clib/.gitignore
new file mode 100644
index 000000000..37828330a
--- /dev/null
+++ b/test/common_plat/validation/api/std_clib/.gitignore
@@ -0,0 +1 @@
+std_clib_main
diff --git a/test/common_plat/validation/api/std_clib/Makefile.am b/test/common_plat/validation/api/std_clib/Makefile.am
new file mode 100644
index 000000000..e2fc0ccf3
--- /dev/null
+++ b/test/common_plat/validation/api/std_clib/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libteststd_clib.la
+libteststd_clib_la_SOURCES = std_clib.c
+
+test_PROGRAMS = std_clib_main$(EXEEXT)
+dist_std_clib_main_SOURCES = std_clib_main.c
+std_clib_main_LDADD = libteststd_clib.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = std_clib.h
diff --git a/test/common_plat/validation/api/std_clib/std_clib.c b/test/common_plat/validation/api/std_clib/std_clib.c
new file mode 100644
index 000000000..7f089eabb
--- /dev/null
+++ b/test/common_plat/validation/api/std_clib/std_clib.c
@@ -0,0 +1,110 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "std_clib.h"
+
+#include <string.h>
+
+#define PATTERN 0x5e
+
+static void std_clib_test_memcpy(void)
+{
+ uint8_t src[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+ uint8_t dst[16];
+ int ret;
+
+ memset(dst, 0, sizeof(dst));
+
+ odp_memcpy(dst, src, sizeof(dst));
+
+ ret = memcmp(dst, src, sizeof(dst));
+
+ CU_ASSERT(ret == 0);
+}
+
+static void std_clib_test_memset(void)
+{
+ uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+ uint8_t ref[16];
+ int ret;
+
+ odp_memset(data, PATTERN, sizeof(data));
+
+ memset(ref, PATTERN, sizeof(ref));
+
+ ret = memcmp(data, ref, sizeof(data));
+
+ CU_ASSERT(ret == 0);
+}
+
+static void std_clib_test_memcmp(void)
+{
+ uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t equal[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t greater_11[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 99, 12, 13, 14, 15, 16};
+ uint8_t less_6[] = {1, 2, 3, 4, 5, 2, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ size_t i;
+
+ CU_ASSERT(odp_memcmp(data, equal, 0) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data)) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data) - 3) == 0);
+
+ CU_ASSERT(odp_memcmp(greater_11, data, sizeof(data)) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 11) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 10) == 0);
+
+ CU_ASSERT(odp_memcmp(less_6, data, sizeof(data)) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 6) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 5) == 0);
+
+ for (i = 0; i < sizeof(data); i++) {
+ uint8_t tmp;
+
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) == 0);
+ tmp = equal[i];
+ equal[i] = 88;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) < 0);
+ equal[i] = 0;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) > 0);
+ equal[i] = tmp;
+ }
+}
+
+odp_testinfo_t std_clib_suite[] = {
+ ODP_TEST_INFO(std_clib_test_memcpy),
+ ODP_TEST_INFO(std_clib_test_memset),
+ ODP_TEST_INFO(std_clib_test_memcmp),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t std_clib_suites[] = {
+ {"Std C library", NULL, NULL, std_clib_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int std_clib_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(std_clib_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/std_clib/std_clib.h b/test/common_plat/validation/api/std_clib/std_clib.h
new file mode 100644
index 000000000..2804f27e2
--- /dev/null
+++ b/test/common_plat/validation/api/std_clib/std_clib.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_STD_CLIB_H_
+#define _ODP_TEST_STD_CLIB_H_
+
+#include <odp_cunit_common.h>
+
+/* test arrays: */
+extern odp_testinfo_t std_clib_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t std_clib_suites[];
+
+/* main test program: */
+int std_clib_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/std_clib/std_clib_main.c b/test/common_plat/validation/api/std_clib/std_clib_main.c
new file mode 100644
index 000000000..ef6f2736f
--- /dev/null
+++ b/test/common_plat/validation/api/std_clib/std_clib_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "std_clib.h"
+
+int main(int argc, char *argv[])
+{
+ return std_clib_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/system/.gitignore b/test/common_plat/validation/api/system/.gitignore
new file mode 100644
index 000000000..347b1ee21
--- /dev/null
+++ b/test/common_plat/validation/api/system/.gitignore
@@ -0,0 +1 @@
+system_main
diff --git a/test/common_plat/validation/api/system/Makefile.am b/test/common_plat/validation/api/system/Makefile.am
new file mode 100644
index 000000000..3789c36c2
--- /dev/null
+++ b/test/common_plat/validation/api/system/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestsystem.la
+libtestsystem_la_SOURCES = system.c
+
+test_PROGRAMS = system_main$(EXEEXT)
+dist_system_main_SOURCES = system_main.c
+system_main_LDADD = libtestsystem.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = system.h
diff --git a/test/common_plat/validation/api/system/system.c b/test/common_plat/validation/api/system/system.c
new file mode 100644
index 000000000..57ff34eb9
--- /dev/null
+++ b/test/common_plat/validation/api/system/system.c
@@ -0,0 +1,344 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <ctype.h>
+#include <odp_api.h>
+#include <odp/api/cpumask.h>
+#include "odp_cunit_common.h"
+#include "test_debug.h"
+#include "system.h"
+
+#define DIFF_TRY_NUM 160
+#define RES_TRY_NUM 10
+
+void system_test_odp_version_numbers(void)
+{
+ int char_ok = 0;
+ char version_string[128];
+ char *s = version_string;
+
+ strncpy(version_string, odp_version_api_str(),
+ sizeof(version_string) - 1);
+
+ while (*s) {
+ if (isdigit((int)*s) || (strncmp(s, ".", 1) == 0)) {
+ char_ok = 1;
+ s++;
+ } else {
+ char_ok = 0;
+ LOG_DBG("\nBAD VERSION=%s\n", version_string);
+ break;
+ }
+ }
+ CU_ASSERT(char_ok);
+}
+
+void system_test_odp_cpu_count(void)
+{
+ int cpus;
+
+ cpus = odp_cpu_count();
+ CU_ASSERT(0 < cpus);
+}
+
+void system_test_odp_cpu_cycles(void)
+{
+ uint64_t c2, c1;
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(100);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+}
+
+void system_test_odp_cpu_cycles_max(void)
+{
+ uint64_t c2, c1;
+ uint64_t max1, max2;
+
+ max1 = odp_cpu_cycles_max();
+ odp_time_wait_ns(100);
+ max2 = odp_cpu_cycles_max();
+
+ CU_ASSERT(max1 >= UINT32_MAX / 2);
+ CU_ASSERT(max1 == max2);
+
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(1000);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c1 <= max1 && c2 <= max1);
+}
+
+void system_test_odp_cpu_cycles_resolution(void)
+{
+ int i;
+ uint64_t res;
+ uint64_t c2, c1, max;
+
+ max = odp_cpu_cycles_max();
+
+ res = odp_cpu_cycles_resolution();
+ CU_ASSERT(res != 0);
+ CU_ASSERT(res < max / 1024);
+
+ for (i = 0; i < RES_TRY_NUM; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c1 % res == 0);
+ CU_ASSERT(c2 % res == 0);
+ }
+}
+
+void system_test_odp_cpu_cycles_diff(void)
+{
+ int i;
+ uint64_t c2, c1, c3, max;
+ uint64_t tmp, diff, res;
+
+ res = odp_cpu_cycles_resolution();
+ max = odp_cpu_cycles_max();
+
+ /* check resolution for wrap */
+ c1 = max - 2 * res;
+ do
+ c2 = odp_cpu_cycles();
+ while (c1 < c2);
+
+ diff = odp_cpu_cycles_diff(c1, c1);
+ CU_ASSERT(diff == 0);
+
+ /* wrap */
+ tmp = c2 + (max - c1) + res;
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+ CU_ASSERT(diff % res == 0);
+
+ /* no wrap, revert args */
+ tmp = c1 - c2;
+ diff = odp_cpu_cycles_diff(c1, c2);
+ CU_ASSERT(diff == tmp);
+ CU_ASSERT(diff % res == 0);
+
+ c3 = odp_cpu_cycles();
+ for (i = 0; i < DIFF_TRY_NUM; i++) {
+ c1 = odp_cpu_cycles();
+ odp_time_wait_ns(100 * ODP_TIME_MSEC_IN_NS + i);
+ c2 = odp_cpu_cycles();
+
+ CU_ASSERT(c2 != c1);
+ CU_ASSERT(c1 % res == 0);
+ CU_ASSERT(c2 % res == 0);
+ CU_ASSERT(c1 <= max && c2 <= max);
+
+ if (c2 > c1)
+ tmp = c2 - c1;
+ else
+ tmp = c2 + (max - c1) + res;
+
+ diff = odp_cpu_cycles_diff(c2, c1);
+ CU_ASSERT(diff == tmp);
+ CU_ASSERT(diff % res == 0);
+
+ /* wrap is detected and verified */
+ if (c2 < c1)
+ break;
+ }
+
+ /* wrap was detected, no need to continue */
+ if (i < DIFF_TRY_NUM)
+ return;
+
+ /* wrap has to be detected if possible */
+ CU_ASSERT(max > UINT32_MAX);
+ CU_ASSERT((max - c3) > UINT32_MAX);
+
+ printf("wrap was not detected...");
+}
+
+void system_test_odp_sys_cache_line_size(void)
+{
+ uint64_t cache_size;
+
+ cache_size = odp_sys_cache_line_size();
+ CU_ASSERT(0 < cache_size);
+ CU_ASSERT(ODP_CACHE_LINE_SIZE == cache_size);
+}
+
+void system_test_odp_cpu_model_str(void)
+{
+ char model[128];
+
+ snprintf(model, 128, "%s", odp_cpu_model_str());
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+}
+
+void system_test_odp_cpu_model_str_id(void)
+{
+ char model[128];
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+void system_test_odp_sys_page_size(void)
+{
+ uint64_t page;
+
+ page = odp_sys_page_size();
+ CU_ASSERT(0 < page);
+ CU_ASSERT(ODP_PAGE_SIZE == page);
+}
+
+void system_test_odp_sys_huge_page_size(void)
+{
+ uint64_t page;
+
+ page = odp_sys_huge_page_size();
+ CU_ASSERT(0 < page);
+}
+
+int system_check_odp_cpu_hz(void)
+{
+ if (odp_cpu_hz() == 0) {
+ fprintf(stderr, "odp_cpu_hz is not supported, skipping\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz(void)
+{
+ uint64_t hz = odp_cpu_hz();
+
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+}
+
+int system_check_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ if (hz == 0) {
+ fprintf(stderr, "cpu %d does not support"
+ " odp_cpu_hz_id(),"
+ "skip that test\n", cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+void system_test_odp_cpu_hz_max(void)
+{
+ uint64_t hz;
+
+ hz = odp_cpu_hz_max();
+ CU_ASSERT(0 < hz);
+}
+
+void system_test_odp_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ CU_ASSERT(0 < hz);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+odp_testinfo_t system_suite[] = {
+ ODP_TEST_INFO(system_test_odp_version_numbers),
+ ODP_TEST_INFO(system_test_odp_cpu_count),
+ ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
+ ODP_TEST_INFO(system_test_odp_sys_page_size),
+ ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
+ system_check_odp_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
+ system_check_odp_cpu_hz_id),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max_id),
+ ODP_TEST_INFO(system_test_odp_cpu_cycles),
+ ODP_TEST_INFO(system_test_odp_cpu_cycles_max),
+ ODP_TEST_INFO(system_test_odp_cpu_cycles_resolution),
+ ODP_TEST_INFO(system_test_odp_cpu_cycles_diff),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t system_suites[] = {
+ {"System Info", NULL, NULL, system_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int system_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(system_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/system/system.h b/test/common_plat/validation/api/system/system.h
new file mode 100644
index 000000000..cbb994eb0
--- /dev/null
+++ b/test/common_plat/validation/api/system/system.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_SYSTEM_H_
+#define _ODP_TEST_SYSTEM_H_
+
+#include <odp_cunit_common.h>
+
+#define GIGA_HZ 1000000000ULL
+#define KILO_HZ 1000ULL
+
+/* test functions: */
+void system_test_odp_version_numbers(void);
+void system_test_odp_cpu_count(void);
+void system_test_odp_sys_cache_line_size(void);
+void system_test_odp_cpu_model_str(void);
+void system_test_odp_cpu_model_str_id(void);
+void system_test_odp_sys_page_size(void);
+void system_test_odp_sys_huge_page_size(void);
+int system_check_odp_cpu_hz(void);
+void system_test_odp_cpu_hz(void);
+int system_check_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_max(void);
+void system_test_odp_cpu_hz_max_id(void);
+void system_test_odp_cpu_cycles_max(void);
+void system_test_odp_cpu_cycles(void);
+void system_test_odp_cpu_cycles_diff(void);
+void system_test_odp_cpu_cycles_resolution(void);
+
+/* test arrays: */
+extern odp_testinfo_t system_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t system_suites[];
+
+/* main test program: */
+int system_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/system/system_main.c b/test/common_plat/validation/api/system/system_main.c
new file mode 100644
index 000000000..50d202a84
--- /dev/null
+++ b/test/common_plat/validation/api/system/system_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "system.h"
+
+int main(int argc, char *argv[])
+{
+ return system_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/thread/.gitignore b/test/common_plat/validation/api/thread/.gitignore
new file mode 100644
index 000000000..ab1787d97
--- /dev/null
+++ b/test/common_plat/validation/api/thread/.gitignore
@@ -0,0 +1 @@
+thread_main
diff --git a/test/common_plat/validation/api/thread/Makefile.am b/test/common_plat/validation/api/thread/Makefile.am
new file mode 100644
index 000000000..eaf680cf5
--- /dev/null
+++ b/test/common_plat/validation/api/thread/Makefile.am
@@ -0,0 +1,12 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestthread.la
+libtestthread_la_SOURCES = thread.c
+libtestthread_la_CFLAGS = $(AM_CFLAGS) -DTEST_THRMASK
+libtestthread_la_LIBADD = $(LIBTHRMASK_COMMON)
+
+test_PROGRAMS = thread_main$(EXEEXT)
+dist_thread_main_SOURCES = thread_main.c
+thread_main_LDADD = libtestthread.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = thread.h
diff --git a/test/common_plat/validation/api/thread/thread.c b/test/common_plat/validation/api/thread/thread.c
new file mode 100644
index 000000000..24f1c4580
--- /dev/null
+++ b/test/common_plat/validation/api/thread/thread.c
@@ -0,0 +1,140 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include <mask_common.h>
+#include <test_debug.h>
+#include "thread.h"
+
+/* Test thread entry and exit synchronization barriers */
+odp_barrier_t bar_entry;
+odp_barrier_t bar_exit;
+
+void thread_test_odp_cpu_id(void)
+{
+ (void)odp_cpu_id();
+ CU_PASS();
+}
+
+void thread_test_odp_thread_id(void)
+{
+ (void)odp_thread_id();
+ CU_PASS();
+}
+
+void thread_test_odp_thread_count(void)
+{
+ (void)odp_thread_count();
+ CU_PASS();
+}
+
+static int thread_func(void *arg TEST_UNUSED)
+{
+ /* indicate that thread has started */
+ odp_barrier_wait(&bar_entry);
+
+ CU_ASSERT(odp_thread_type() == ODP_THREAD_WORKER);
+
+ /* wait for indication that we can exit */
+ odp_barrier_wait(&bar_exit);
+
+ return CU_get_number_of_failures();
+}
+
+void thread_test_odp_thrmask_worker(void)
+{
+ odp_thrmask_t mask;
+ int ret;
+ pthrd_arg args = { .testcase = 0, .numthrds = 1 };
+
+ CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
+
+ odp_barrier_init(&bar_entry, args.numthrds + 1);
+ odp_barrier_init(&bar_exit, args.numthrds + 1);
+
+ /* should start out with 0 worker threads */
+ ret = odp_thrmask_worker(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == 0);
+
+ /* start the test thread(s) */
+ ret = odp_cunit_thread_create(thread_func, &args);
+ CU_ASSERT(ret == args.numthrds);
+
+ if (ret != args.numthrds)
+ return;
+
+ /* wait for thread(s) to start */
+ odp_barrier_wait(&bar_entry);
+
+ ret = odp_thrmask_worker(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == args.numthrds);
+ CU_ASSERT(ret <= odp_thread_count_max());
+
+ /* allow thread(s) to exit */
+ odp_barrier_wait(&bar_exit);
+
+ odp_cunit_thread_exit(&args);
+}
+
+void thread_test_odp_thrmask_control(void)
+{
+ odp_thrmask_t mask;
+ int ret;
+
+ CU_ASSERT(odp_thread_type() == ODP_THREAD_CONTROL);
+
+ /* should start out with 1 worker thread */
+ ret = odp_thrmask_control(&mask);
+ CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == 1);
+}
+
+odp_testinfo_t thread_suite[] = {
+ ODP_TEST_INFO(thread_test_odp_cpu_id),
+ ODP_TEST_INFO(thread_test_odp_thread_id),
+ ODP_TEST_INFO(thread_test_odp_thread_count),
+ ODP_TEST_INFO(thread_test_odp_thrmask_to_from_str),
+ ODP_TEST_INFO(thread_test_odp_thrmask_equal),
+ ODP_TEST_INFO(thread_test_odp_thrmask_zero),
+ ODP_TEST_INFO(thread_test_odp_thrmask_set),
+ ODP_TEST_INFO(thread_test_odp_thrmask_clr),
+ ODP_TEST_INFO(thread_test_odp_thrmask_isset),
+ ODP_TEST_INFO(thread_test_odp_thrmask_count),
+ ODP_TEST_INFO(thread_test_odp_thrmask_and),
+ ODP_TEST_INFO(thread_test_odp_thrmask_or),
+ ODP_TEST_INFO(thread_test_odp_thrmask_xor),
+ ODP_TEST_INFO(thread_test_odp_thrmask_copy),
+ ODP_TEST_INFO(thread_test_odp_thrmask_first),
+ ODP_TEST_INFO(thread_test_odp_thrmask_last),
+ ODP_TEST_INFO(thread_test_odp_thrmask_next),
+ ODP_TEST_INFO(thread_test_odp_thrmask_worker),
+ ODP_TEST_INFO(thread_test_odp_thrmask_control),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t thread_suites[] = {
+ {"thread", NULL, NULL, thread_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int thread_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(thread_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/thread/thread.h b/test/common_plat/validation/api/thread/thread.h
new file mode 100644
index 000000000..d511c9259
--- /dev/null
+++ b/test/common_plat/validation/api/thread/thread.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_THREAD_H_
+#define _ODP_TEST_THREAD_H_
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+
+/* test functions: */
+#ifndef TEST_THRMASK
+#define TEST_THRMASK
+#endif
+#include "mask_common.h"
+void thread_test_odp_cpu_id(void);
+void thread_test_odp_thread_id(void);
+void thread_test_odp_thread_count(void);
+void thread_test_odp_thrmask_control(void);
+void thread_test_odp_thrmask_worker(void);
+
+/* test arrays: */
+extern odp_testinfo_t thread_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t thread_suites[];
+
+/* main test program: */
+int thread_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/thread/thread_main.c b/test/common_plat/validation/api/thread/thread_main.c
new file mode 100644
index 000000000..53c756551
--- /dev/null
+++ b/test/common_plat/validation/api/thread/thread_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "thread.h"
+
+int main(int argc, char *argv[])
+{
+ return thread_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/time/.gitignore b/test/common_plat/validation/api/time/.gitignore
new file mode 100644
index 000000000..0ef3e6162
--- /dev/null
+++ b/test/common_plat/validation/api/time/.gitignore
@@ -0,0 +1 @@
+time_main
diff --git a/test/common_plat/validation/api/time/Makefile.am b/test/common_plat/validation/api/time/Makefile.am
new file mode 100644
index 000000000..bf2d0268c
--- /dev/null
+++ b/test/common_plat/validation/api/time/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtesttime.la
+libtesttime_la_SOURCES = time.c
+
+test_PROGRAMS = time_main$(EXEEXT)
+dist_time_main_SOURCES = time_main.c
+time_main_LDADD = libtesttime.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = time.h
diff --git a/test/common_plat/validation/api/time/time.c b/test/common_plat/validation/api/time/time.c
new file mode 100644
index 000000000..530d5c07a
--- /dev/null
+++ b/test/common_plat/validation/api/time/time.c
@@ -0,0 +1,476 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include "odp_cunit_common.h"
+#include "time.h"
+
+#define BUSY_LOOP_CNT 30000000 /* used for t > min resolution */
+#define BUSY_LOOP_CNT_LONG 6000000000 /* used for t > 4 sec */
+#define MIN_TIME_RATE 32000
+#define MAX_TIME_RATE 15000000000
+#define DELAY_TOLERANCE 20000000 /* deviation for delay */
+#define WAIT_SECONDS 3
+
+static uint64_t local_res;
+static uint64_t global_res;
+
+typedef odp_time_t time_cb(void);
+typedef uint64_t time_res_cb(void);
+typedef odp_time_t time_from_ns_cb(uint64_t ns);
+
+void time_test_constants(void)
+{
+ uint64_t ns;
+
+ ns = ODP_TIME_SEC_IN_NS / 1000;
+ CU_ASSERT(ns == ODP_TIME_MSEC_IN_NS);
+ ns /= 1000;
+ CU_ASSERT(ns == ODP_TIME_USEC_IN_NS);
+}
+
+static void time_test_res(time_res_cb time_res, uint64_t *res)
+{
+ uint64_t rate;
+
+ rate = time_res();
+ CU_ASSERT(rate > MIN_TIME_RATE);
+ CU_ASSERT(rate < MAX_TIME_RATE);
+
+ *res = ODP_TIME_SEC_IN_NS / rate;
+ if (ODP_TIME_SEC_IN_NS % rate)
+ (*res)++;
+}
+
+void time_test_local_res(void)
+{
+ time_test_res(odp_time_local_res, &local_res);
+}
+
+void time_test_global_res(void)
+{
+ time_test_res(odp_time_global_res, &global_res);
+}
+
+/* check that related conversions come back to the same value */
+static void time_test_conversion(time_from_ns_cb time_from_ns, uint64_t res)
+{
+ uint64_t ns1, ns2;
+ odp_time_t time;
+ uint64_t upper_limit, lower_limit;
+
+ ns1 = 100;
+ time = time_from_ns(ns1);
+
+ ns2 = odp_time_to_ns(time);
+
+ /* need to check within arithmetic tolerance that the same
+ * value in ns is returned after conversions */
+ upper_limit = ns1 + res;
+ lower_limit = ns1 - res;
+ CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
+
+ ns1 = 60 * 11 * ODP_TIME_SEC_IN_NS;
+ time = time_from_ns(ns1);
+
+ ns2 = odp_time_to_ns(time);
+
+ /* need to check within arithmetic tolerance that the same
+ * value in ns is returned after conversions */
+ upper_limit = ns1 + res;
+ lower_limit = ns1 - res;
+ CU_ASSERT((ns2 <= upper_limit) && (ns2 >= lower_limit));
+
+ /* test on 0 */
+ ns1 = odp_time_to_ns(ODP_TIME_NULL);
+ CU_ASSERT(ns1 == 0);
+}
+
+void time_test_local_conversion(void)
+{
+ time_test_conversion(odp_time_local_from_ns, local_res);
+}
+
+void time_test_global_conversion(void)
+{
+ time_test_conversion(odp_time_global_from_ns, global_res);
+}
+
+void time_test_monotony(void)
+{
+ volatile uint64_t count = 0;
+ odp_time_t l_t1, l_t2, l_t3;
+ odp_time_t g_t1, g_t2, g_t3;
+ uint64_t ns1, ns2, ns3;
+
+ l_t1 = odp_time_local();
+ g_t1 = odp_time_global();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ l_t2 = odp_time_local();
+ g_t2 = odp_time_global();
+
+ while (count < BUSY_LOOP_CNT_LONG) {
+ count++;
+ };
+
+ l_t3 = odp_time_local();
+ g_t3 = odp_time_global();
+
+ ns1 = odp_time_to_ns(l_t1);
+ ns2 = odp_time_to_ns(l_t2);
+ ns3 = odp_time_to_ns(l_t3);
+
+ /* Local time assertions */
+ CU_ASSERT(ns2 > ns1);
+ CU_ASSERT(ns3 > ns2);
+
+ ns1 = odp_time_to_ns(g_t1);
+ ns2 = odp_time_to_ns(g_t2);
+ ns3 = odp_time_to_ns(g_t3);
+
+ /* Global time assertions */
+ CU_ASSERT(ns2 > ns1);
+ CU_ASSERT(ns3 > ns2);
+}
+
+static void time_test_cmp(time_cb time, time_from_ns_cb time_from_ns)
+{
+ /* volatile to stop optimization of busy loop */
+ volatile int count = 0;
+ odp_time_t t1, t2, t3;
+
+ t1 = time();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ t2 = time();
+
+ while (count < BUSY_LOOP_CNT * 2) {
+ count++;
+ };
+
+ t3 = time();
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t3, t2) > 0);
+ CU_ASSERT(odp_time_cmp(t3, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t1, t2) < 0);
+ CU_ASSERT(odp_time_cmp(t2, t3) < 0);
+ CU_ASSERT(odp_time_cmp(t1, t3) < 0);
+ CU_ASSERT(odp_time_cmp(t1, t1) == 0);
+ CU_ASSERT(odp_time_cmp(t2, t2) == 0);
+ CU_ASSERT(odp_time_cmp(t3, t3) == 0);
+
+ t2 = time_from_ns(60 * 10 * ODP_TIME_SEC_IN_NS);
+ t1 = time_from_ns(3);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+ CU_ASSERT(odp_time_cmp(t1, t2) < 0);
+
+ t1 = time_from_ns(0);
+ CU_ASSERT(odp_time_cmp(t1, ODP_TIME_NULL) == 0);
+}
+
+void time_test_local_cmp(void)
+{
+ time_test_cmp(odp_time_local, odp_time_local_from_ns);
+}
+
+void time_test_global_cmp(void)
+{
+ time_test_cmp(odp_time_global, odp_time_global_from_ns);
+}
+
+/* check that a time difference gives a reasonable result */
+static void time_test_diff(time_cb time,
+ time_from_ns_cb time_from_ns,
+ uint64_t res)
+{
+ /* volatile to stop optimization of busy loop */
+ volatile int count = 0;
+ odp_time_t diff, t1, t2;
+ uint64_t nsdiff, ns1, ns2, ns;
+ uint64_t upper_limit, lower_limit;
+
+ /* test timestamp diff */
+ t1 = time();
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ t2 = time();
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+
+ ns1 = odp_time_to_ns(t1);
+ ns2 = odp_time_to_ns(t2);
+ ns = ns2 - ns1;
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+
+ /* test timestamp and interval diff */
+ ns1 = 54;
+ t1 = time_from_ns(ns1);
+ ns = ns2 - ns1;
+
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+
+ /* test interval diff */
+ ns2 = 60 * 10 * ODP_TIME_SEC_IN_NS;
+ ns = ns2 - ns1;
+
+ t2 = time_from_ns(ns2);
+ diff = odp_time_diff(t2, t1);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) > 0);
+ nsdiff = odp_time_to_ns(diff);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nsdiff <= upper_limit) && (nsdiff >= lower_limit));
+
+ /* same time has to diff to 0 */
+ diff = odp_time_diff(t2, t2);
+ CU_ASSERT(odp_time_cmp(diff, ODP_TIME_NULL) == 0);
+
+ diff = odp_time_diff(t2, ODP_TIME_NULL);
+ CU_ASSERT(odp_time_cmp(t2, diff) == 0);
+}
+
+void time_test_local_diff(void)
+{
+ time_test_diff(odp_time_local, odp_time_local_from_ns, local_res);
+}
+
+void time_test_global_diff(void)
+{
+ time_test_diff(odp_time_global, odp_time_global_from_ns, global_res);
+}
+
+/* check that a time sum gives a reasonable result */
+static void time_test_sum(time_cb time,
+ time_from_ns_cb time_from_ns,
+ uint64_t res)
+{
+ odp_time_t sum, t1, t2;
+ uint64_t nssum, ns1, ns2, ns;
+ uint64_t upper_limit, lower_limit;
+
+ /* sum timestamp and interval */
+ t1 = time();
+ ns2 = 103;
+ t2 = time_from_ns(ns2);
+ ns1 = odp_time_to_ns(t1);
+ ns = ns1 + ns2;
+
+ sum = odp_time_sum(t2, t1);
+ CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
+ nssum = odp_time_to_ns(sum);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
+
+ /* sum intervals */
+ ns1 = 60 * 13 * ODP_TIME_SEC_IN_NS;
+ t1 = time_from_ns(ns1);
+ ns = ns1 + ns2;
+
+ sum = odp_time_sum(t2, t1);
+ CU_ASSERT(odp_time_cmp(sum, ODP_TIME_NULL) > 0);
+ nssum = odp_time_to_ns(sum);
+
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+ CU_ASSERT((nssum <= upper_limit) && (nssum >= lower_limit));
+
+ /* test on 0 */
+ sum = odp_time_sum(t2, ODP_TIME_NULL);
+ CU_ASSERT(odp_time_cmp(t2, sum) == 0);
+}
+
+void time_test_local_sum(void)
+{
+ time_test_sum(odp_time_local, odp_time_local_from_ns, local_res);
+}
+
+void time_test_global_sum(void)
+{
+ time_test_sum(odp_time_global, odp_time_global_from_ns, global_res);
+}
+
+static void time_test_wait_until(time_cb time, time_from_ns_cb time_from_ns)
+{
+ int i;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, wait;
+ odp_time_t second = time_from_ns(ODP_TIME_SEC_IN_NS);
+
+ start_time = time();
+ wait = start_time;
+ for (i = 0; i < WAIT_SECONDS; i++) {
+ wait = odp_time_sum(wait, second);
+ odp_time_wait_until(wait);
+ }
+ end_time = time();
+
+ wait = odp_time_diff(end_time, start_time);
+ lower_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
+ DELAY_TOLERANCE);
+ upper_limit = time_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
+ DELAY_TOLERANCE);
+
+ if (odp_time_cmp(wait, lower_limit) < 0) {
+ fprintf(stderr, "Exceed lower limit: "
+ "wait is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(wait, upper_limit) > 0) {
+ fprintf(stderr, "Exceed upper limit: "
+ "wait is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(wait), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
+}
+
+void time_test_local_wait_until(void)
+{
+ time_test_wait_until(odp_time_local, odp_time_local_from_ns);
+}
+
+void time_test_global_wait_until(void)
+{
+ time_test_wait_until(odp_time_global, odp_time_global_from_ns);
+}
+
+void time_test_wait_ns(void)
+{
+ int i;
+ odp_time_t lower_limit, upper_limit;
+ odp_time_t start_time, end_time, diff;
+
+ start_time = odp_time_local();
+ for (i = 0; i < WAIT_SECONDS; i++)
+ odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
+ end_time = odp_time_local();
+
+ diff = odp_time_diff(end_time, start_time);
+
+ lower_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS -
+ DELAY_TOLERANCE);
+ upper_limit = odp_time_local_from_ns(WAIT_SECONDS * ODP_TIME_SEC_IN_NS +
+ DELAY_TOLERANCE);
+
+ if (odp_time_cmp(diff, lower_limit) < 0) {
+ fprintf(stderr, "Exceed lower limit: "
+ "diff is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(diff, upper_limit) > 0) {
+ fprintf(stderr, "Exceed upper limit: "
+ "diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
+}
+
+static void time_test_to_u64(time_cb time)
+{
+ volatile int count = 0;
+ uint64_t val1, val2;
+ odp_time_t t1, t2;
+
+ t1 = time();
+
+ val1 = odp_time_to_u64(t1);
+ CU_ASSERT(val1 > 0);
+
+ while (count < BUSY_LOOP_CNT) {
+ count++;
+ };
+
+ t2 = time();
+ val2 = odp_time_to_u64(t2);
+ CU_ASSERT(val2 > 0);
+
+ CU_ASSERT(val2 > val1);
+
+ val1 = odp_time_to_u64(ODP_TIME_NULL);
+ CU_ASSERT(val1 == 0);
+}
+
+void time_test_local_to_u64(void)
+{
+ time_test_to_u64(odp_time_local);
+}
+
+void time_test_global_to_u64(void)
+{
+ time_test_to_u64(odp_time_global);
+}
+
+odp_testinfo_t time_suite_time[] = {
+ ODP_TEST_INFO(time_test_constants),
+ ODP_TEST_INFO(time_test_local_res),
+ ODP_TEST_INFO(time_test_local_conversion),
+ ODP_TEST_INFO(time_test_monotony),
+ ODP_TEST_INFO(time_test_local_cmp),
+ ODP_TEST_INFO(time_test_local_diff),
+ ODP_TEST_INFO(time_test_local_sum),
+ ODP_TEST_INFO(time_test_local_wait_until),
+ ODP_TEST_INFO(time_test_wait_ns),
+ ODP_TEST_INFO(time_test_local_to_u64),
+ ODP_TEST_INFO(time_test_global_res),
+ ODP_TEST_INFO(time_test_global_conversion),
+ ODP_TEST_INFO(time_test_global_cmp),
+ ODP_TEST_INFO(time_test_global_diff),
+ ODP_TEST_INFO(time_test_global_sum),
+ ODP_TEST_INFO(time_test_global_wait_until),
+ ODP_TEST_INFO(time_test_global_to_u64),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t time_suites[] = {
+ {"Time", NULL, NULL, time_suite_time},
+ ODP_SUITE_INFO_NULL
+};
+
+int time_main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(time_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/time/time.h b/test/common_plat/validation/api/time/time.h
new file mode 100644
index 000000000..e5132a494
--- /dev/null
+++ b/test/common_plat/validation/api/time/time.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_TIME_H_
+#define _ODP_TEST_TIME_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void time_test_constants(void);
+void time_test_local_res(void);
+void time_test_global_res(void);
+void time_test_local_conversion(void);
+void time_test_global_conversion(void);
+void time_test_local_cmp(void);
+void time_test_global_cmp(void);
+void time_test_local_diff(void);
+void time_test_global_diff(void);
+void time_test_local_sum(void);
+void time_test_global_sum(void);
+void time_test_local_wait_until(void);
+void time_test_global_wait_until(void);
+void time_test_wait_ns(void);
+void time_test_local_to_u64(void);
+void time_test_global_to_u64(void);
+void time_test_monotony(void);
+
+/* test arrays: */
+extern odp_testinfo_t time_suite_time[];
+
+/* test registry: */
+extern odp_suiteinfo_t time_suites[];
+
+/* main test program: */
+int time_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/time/time_main.c b/test/common_plat/validation/api/time/time_main.c
new file mode 100644
index 000000000..f86d638a5
--- /dev/null
+++ b/test/common_plat/validation/api/time/time_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "time.h"
+
+int main(int argc, char *argv[])
+{
+ return time_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/timer/.gitignore b/test/common_plat/validation/api/timer/.gitignore
new file mode 100644
index 000000000..74e8fa992
--- /dev/null
+++ b/test/common_plat/validation/api/timer/.gitignore
@@ -0,0 +1 @@
+timer_main
diff --git a/test/common_plat/validation/api/timer/Makefile.am b/test/common_plat/validation/api/timer/Makefile.am
new file mode 100644
index 000000000..fe6872f41
--- /dev/null
+++ b/test/common_plat/validation/api/timer/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtesttimer.la
+libtesttimer_la_SOURCES = timer.c
+
+test_PROGRAMS = timer_main$(EXEEXT)
+dist_timer_main_SOURCES = timer_main.c
+timer_main_LDADD = libtesttimer.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = timer.h
diff --git a/test/common_plat/validation/api/timer/timer.c b/test/common_plat/validation/api/timer/timer.c
new file mode 100644
index 000000000..0007639cc
--- /dev/null
+++ b/test/common_plat/validation/api/timer/timer.c
@@ -0,0 +1,605 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ */
+
+/* For rand_r and nanosleep */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <time.h>
+#include <odp.h>
+#include <odp/helper/linux.h>
+#include "odp_cunit_common.h"
+#include "test_debug.h"
+#include "timer.h"
+
+/** @private Timeout range in milliseconds (ms) */
+#define RANGE_MS 2000
+
+/** @private Number of timers per thread */
+#define NTIMERS 2000
+
+/** @private Barrier for thread synchronisation */
+static odp_barrier_t test_barrier;
+
+/** @private Timeout pool handle used by all threads */
+static odp_pool_t tbp;
+
+/** @private Timer pool handle used by all threads */
+static odp_timer_pool_t tp;
+
+/** @private Count of timeouts delivered too late */
+static odp_atomic_u32_t ndelivtoolate;
+
+/** @private Sum of all allocated timers from all threads. Thread-local
+ * caches may make this number lower than the capacity of the pool */
+static odp_atomic_u32_t timers_allocated;
+
+/* @private Timer helper structure */
+struct test_timer {
+ odp_timer_t tim; /* Timer handle */
+ odp_event_t ev; /* Timeout event */
+ odp_event_t ev2; /* Copy of event handle */
+ uint64_t tick; /* Expiration tick or TICK_INVALID */
+};
+
+#define TICK_INVALID (~(uint64_t)0)
+
+void timer_test_timeout_pool_alloc(void)
+{
+ odp_pool_t pool;
+ const int num = 3;
+ odp_timeout_t tmo[num];
+ odp_event_t ev;
+ int index;
+ char wrong_type = 0;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = num;
+
+ pool = odp_pool_create("timeout_pool_alloc", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_pool_print(pool);
+
+ /* Try to allocate num items from the pool */
+ for (index = 0; index < num; index++) {
+ tmo[index] = odp_timeout_alloc(pool);
+
+ if (tmo[index] == ODP_TIMEOUT_INVALID)
+ break;
+
+ ev = odp_timeout_to_event(tmo[index]);
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
+ wrong_type = 1;
+ }
+
+ /* Check that the pool had at least num items */
+ CU_ASSERT(index == num);
+ /* index points out of buffer[] or it point to an invalid buffer */
+ index--;
+
+ /* Check that the pool had correct buffers */
+ CU_ASSERT(wrong_type == 0);
+
+ for (; index >= 0; index--)
+ odp_timeout_free(tmo[index]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void timer_test_timeout_pool_free(void)
+{
+ odp_pool_t pool;
+ odp_timeout_t tmo;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = 1;
+
+ pool = odp_pool_create("timeout_pool_free", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ odp_pool_print(pool);
+
+ /* Allocate the only timeout from the pool */
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+
+ /* Pool should have only one timeout */
+ CU_ASSERT_FATAL(odp_timeout_alloc(pool) == ODP_TIMEOUT_INVALID)
+
+ odp_timeout_free(tmo);
+
+ /* Check that the timeout was returned back to the pool */
+ tmo = odp_timeout_alloc(pool);
+ CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID);
+
+ odp_timeout_free(tmo);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void timer_test_odp_timer_cancel(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ odp_timer_pool_param_t tparam;
+ odp_timer_pool_t tp;
+ odp_queue_t queue;
+ odp_timer_t tim;
+ odp_event_t ev;
+ odp_timeout_t tmo;
+ odp_timer_set_t rc;
+ uint64_t tick;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = 1;
+
+ pool = odp_pool_create("tmo_pool_for_cancel", &params);
+
+ if (pool == ODP_POOL_INVALID)
+ CU_FAIL_FATAL("Timeout pool create failed");
+
+ tparam.res_ns = 100 * ODP_TIME_MSEC_IN_NS;
+ tparam.min_tmo = 1 * ODP_TIME_SEC_IN_NS;
+ tparam.max_tmo = 10 * ODP_TIME_SEC_IN_NS;
+ tparam.num_timers = 1;
+ tparam.priv = 0;
+ tparam.clk_src = ODP_CLOCK_CPU;
+ tp = odp_timer_pool_create("timer_pool0", &tparam);
+ if (tp == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ /* Start all created timer pools */
+ odp_timer_pool_start();
+
+ queue = odp_queue_create("timer_queue", NULL);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ #define USER_PTR ((void *)0xdead)
+ tim = odp_timer_alloc(tp, queue, USER_PTR);
+ if (tim == ODP_TIMER_INVALID)
+ CU_FAIL_FATAL("Failed to allocate timer");
+ LOG_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim));
+
+ ev = odp_timeout_to_event(odp_timeout_alloc(pool));
+ if (ev == ODP_EVENT_INVALID)
+ CU_FAIL_FATAL("Failed to allocate timeout");
+
+ tick = odp_timer_ns_to_tick(tp, 2 * ODP_TIME_SEC_IN_NS);
+
+ rc = odp_timer_set_rel(tim, tick, &ev);
+ if (rc != ODP_TIMER_SUCCESS)
+ CU_FAIL_FATAL("Failed to set timer (relative time)");
+
+ ev = ODP_EVENT_INVALID;
+ if (odp_timer_cancel(tim, &ev) != 0)
+ CU_FAIL_FATAL("Failed to cancel timer (relative time)");
+
+ if (ev == ODP_EVENT_INVALID)
+ CU_FAIL_FATAL("Cancel did not return event");
+
+ tmo = odp_timeout_from_event(ev);
+ if (tmo == ODP_TIMEOUT_INVALID)
+ CU_FAIL_FATAL("Cancel did not return timeout");
+ LOG_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo));
+
+ if (odp_timeout_timer(tmo) != tim)
+ CU_FAIL("Cancel invalid tmo.timer");
+
+ if (odp_timeout_user_ptr(tmo) != USER_PTR)
+ CU_FAIL("Cancel invalid tmo.user_ptr");
+
+ odp_timeout_free(tmo);
+
+ ev = odp_timer_free(tim);
+ if (ev != ODP_EVENT_INVALID)
+ CU_FAIL_FATAL("Free returned event");
+
+ odp_timer_pool_destroy(tp);
+
+ if (odp_queue_destroy(queue) != 0)
+ CU_FAIL_FATAL("Failed to destroy queue");
+
+ if (odp_pool_destroy(pool) != 0)
+ CU_FAIL_FATAL("Failed to destroy pool");
+}
+
+/* @private Handle a received (timeout) event */
+static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
+{
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */
+ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
+ /* Not a timeout event */
+ CU_FAIL("Unexpected event type received");
+ return;
+ }
+ /* Read the metadata from the timeout */
+ odp_timeout_t tmo = odp_timeout_from_event(ev);
+ odp_timer_t tim = odp_timeout_timer(tmo);
+ uint64_t tick = odp_timeout_tick(tmo);
+ struct test_timer *ttp = odp_timeout_user_ptr(tmo);
+
+ if (tim == ODP_TIMER_INVALID)
+ CU_FAIL("odp_timeout_timer() invalid timer");
+ if (!ttp)
+ CU_FAIL("odp_timeout_user_ptr() null user ptr");
+
+ if (ttp && ttp->ev2 != ev)
+ CU_FAIL("odp_timeout_user_ptr() wrong user ptr");
+ if (ttp && ttp->tim != tim)
+ CU_FAIL("odp_timeout_timer() wrong timer");
+ if (stale) {
+ if (odp_timeout_fresh(tmo))
+ CU_FAIL("Wrong status (fresh) for stale timeout");
+ /* Stale timeout => local timer must have invalid tick */
+ if (ttp && ttp->tick != TICK_INVALID)
+ CU_FAIL("Stale timeout for active timer");
+ } else {
+ if (!odp_timeout_fresh(tmo))
+ CU_FAIL("Wrong status (stale) for fresh timeout");
+ /* Fresh timeout => local timer must have matching tick */
+ if (ttp && ttp->tick != tick) {
+ LOG_DBG("Wrong tick: expected %" PRIu64
+ " actual %" PRIu64 "\n",
+ ttp->tick, tick);
+ CU_FAIL("odp_timeout_tick() wrong tick");
+ }
+ /* Check that timeout was delivered 'timely' */
+ if (tick > odp_timer_current_tick(tp))
+ CU_FAIL("Timeout delivered early");
+ if (tick < prev_tick) {
+ LOG_DBG("Too late tick: %" PRIu64
+ " prev_tick %" PRIu64"\n",
+ tick, prev_tick);
+ /* We don't report late timeouts using CU_FAIL */
+ odp_atomic_inc_u32(&ndelivtoolate);
+ }
+ }
+
+ if (ttp) {
+ /* Internal error */
+ CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID);
+ ttp->ev = ev;
+ }
+}
+
+/* @private Worker thread entrypoint which performs timer alloc/set/cancel/free
+ * tests */
+static int worker_entrypoint(void *arg TEST_UNUSED)
+{
+ int thr = odp_thread_id();
+ uint32_t i, allocated;
+ unsigned seed = thr;
+ int rc;
+ odp_queue_t queue;
+ struct test_timer *tt;
+ uint32_t nset;
+ uint64_t tck;
+ uint32_t nrcv;
+ uint32_t nreset;
+ uint32_t ncancel;
+ uint32_t ntoolate;
+ uint32_t ms;
+ uint64_t prev_tick;
+ odp_event_t ev;
+ struct timespec ts;
+ uint32_t nstale;
+ odp_timer_set_t timer_rc;
+
+ queue = odp_queue_create("timer_queue", NULL);
+ if (queue == ODP_QUEUE_INVALID)
+ CU_FAIL_FATAL("Queue create failed");
+
+ tt = malloc(sizeof(struct test_timer) * NTIMERS);
+ if (!tt)
+ CU_FAIL_FATAL("malloc failed");
+
+ /* Prepare all timers */
+ for (i = 0; i < NTIMERS; i++) {
+ tt[i].ev = odp_timeout_to_event(odp_timeout_alloc(tbp));
+ if (tt[i].ev == ODP_EVENT_INVALID) {
+ LOG_DBG("Failed to allocate timeout (%" PRIu32 "/%d)\n",
+ i, NTIMERS);
+ break;
+ }
+ tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
+ if (tt[i].tim == ODP_TIMER_INVALID) {
+ LOG_DBG("Failed to allocate timer (%" PRIu32 "/%d)\n",
+ i, NTIMERS);
+ odp_event_free(tt[i].ev);
+ break;
+ }
+ tt[i].ev2 = tt[i].ev;
+ tt[i].tick = TICK_INVALID;
+ }
+ allocated = i;
+ if (allocated == 0)
+ CU_FAIL_FATAL("unable to alloc a timer");
+ odp_atomic_fetch_add_u32(&timers_allocated, allocated);
+
+ odp_barrier_wait(&test_barrier);
+
+ /* Initial set all timers with a random expiration time */
+ nset = 0;
+ for (i = 0; i < allocated; i++) {
+ tck = odp_timer_current_tick(tp) + 1 +
+ odp_timer_ns_to_tick(tp, (rand_r(&seed) % RANGE_MS)
+ * 1000000ULL);
+ timer_rc = odp_timer_set_abs(tt[i].tim, tck, &tt[i].ev);
+ if (timer_rc != ODP_TIMER_SUCCESS) {
+ CU_FAIL("Failed to set timer");
+ } else {
+ tt[i].tick = tck;
+ nset++;
+ }
+ }
+
+ /* Step through wall time, 1ms at a time and check for expired timers */
+ nrcv = 0;
+ nreset = 0;
+ ncancel = 0;
+ ntoolate = 0;
+ prev_tick = odp_timer_current_tick(tp);
+
+ for (ms = 0; ms < 7 * RANGE_MS / 10 && allocated > 0; ms++) {
+ while ((ev = odp_queue_deq(queue)) != ODP_EVENT_INVALID) {
+ /* Subtract one from prev_tick to allow for timeouts
+ * to be delivered a tick late */
+ handle_tmo(ev, false, prev_tick - 1);
+ nrcv++;
+ }
+ prev_tick = odp_timer_current_tick(tp);
+ i = rand_r(&seed) % allocated;
+ if (tt[i].ev == ODP_EVENT_INVALID &&
+ (rand_r(&seed) % 2 == 0)) {
+ /* Timer active, cancel it */
+ rc = odp_timer_cancel(tt[i].tim, &tt[i].ev);
+ if (rc != 0)
+ /* Cancel failed, timer already expired */
+ ntoolate++;
+ tt[i].tick = TICK_INVALID;
+ ncancel++;
+ } else {
+ if (tt[i].ev != ODP_EVENT_INVALID)
+ /* Timer inactive => set */
+ nset++;
+ else
+ /* Timer active => reset */
+ nreset++;
+ uint64_t tck = 1 + odp_timer_ns_to_tick(tp,
+ (rand_r(&seed) % RANGE_MS) * 1000000ULL);
+ odp_timer_set_t rc;
+ uint64_t cur_tick;
+ /* Loop until we manage to read cur_tick and set a
+ * relative timer in the same tick */
+ do {
+ cur_tick = odp_timer_current_tick(tp);
+ rc = odp_timer_set_rel(tt[i].tim,
+ tck, &tt[i].ev);
+ } while (cur_tick != odp_timer_current_tick(tp));
+ if (rc == ODP_TIMER_TOOEARLY ||
+ rc == ODP_TIMER_TOOLATE) {
+ CU_FAIL("Failed to set timer (tooearly/toolate)");
+ } else if (rc != ODP_TIMER_SUCCESS) {
+ /* Set/reset failed, timer already expired */
+ ntoolate++;
+ } else if (rc == ODP_TIMER_SUCCESS) {
+ /* Save expected expiration tick on success */
+ tt[i].tick = cur_tick + tck;
+ }
+ }
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000; /* 1ms */
+ if (nanosleep(&ts, NULL) < 0)
+ CU_FAIL_FATAL("nanosleep failed");
+ }
+
+ /* Cancel and free all timers */
+ nstale = 0;
+ for (i = 0; i < allocated; i++) {
+ (void)odp_timer_cancel(tt[i].tim, &tt[i].ev);
+ tt[i].tick = TICK_INVALID;
+ if (tt[i].ev == ODP_EVENT_INVALID)
+ /* Cancel too late, timer already expired and
+ * timeout enqueued */
+ nstale++;
+ }
+
+ LOG_DBG("Thread %u: %" PRIu32 " timers set\n", thr, nset);
+ LOG_DBG("Thread %u: %" PRIu32 " timers reset\n", thr, nreset);
+ LOG_DBG("Thread %u: %" PRIu32 " timers cancelled\n", thr, ncancel);
+ LOG_DBG("Thread %u: %" PRIu32 " timers reset/cancelled too late\n",
+ thr, ntoolate);
+ LOG_DBG("Thread %u: %" PRIu32 " timeouts received\n", thr, nrcv);
+ LOG_DBG("Thread %u: %" PRIu32
+ " stale timeout(s) after odp_timer_free()\n",
+ thr, nstale);
+
+ /* Delay some more to ensure timeouts for expired timers can be
+ * received. Can not use busy loop here to make background timer
+ * thread finish their work. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = (3 * RANGE_MS / 10 + 50) * ODP_TIME_MSEC_IN_NS;
+ if (nanosleep(&ts, NULL) < 0)
+ CU_FAIL_FATAL("nanosleep failed");
+
+ while (nstale != 0) {
+ ev = odp_queue_deq(queue);
+ if (ev != ODP_EVENT_INVALID) {
+ handle_tmo(ev, true, 0/*Don't care for stale tmo's*/);
+ nstale--;
+ } else {
+ CU_FAIL("Failed to receive stale timeout");
+ break;
+ }
+ }
+
+ for (i = 0; i < allocated; i++) {
+ if (odp_timer_free(tt[i].tim) != ODP_EVENT_INVALID)
+ CU_FAIL("odp_timer_free");
+ }
+
+ /* Check if there any more (unexpected) events */
+ ev = odp_queue_deq(queue);
+ if (ev != ODP_EVENT_INVALID)
+ CU_FAIL("Unexpected event received");
+
+ rc = odp_queue_destroy(queue);
+ CU_ASSERT(rc == 0);
+ for (i = 0; i < allocated; i++) {
+ if (tt[i].ev != ODP_EVENT_INVALID)
+ odp_event_free(tt[i].ev);
+ }
+
+ free(tt);
+ LOG_DBG("Thread %u: exiting\n", thr);
+ return CU_get_number_of_failures();
+}
+
+/* @private Timer test case entrypoint */
+void timer_test_odp_timer_all(void)
+{
+ int rc;
+ odp_pool_param_t params;
+ odp_timer_pool_param_t tparam;
+ odp_cpumask_t unused;
+ odp_timer_pool_info_t tpinfo;
+ uint64_t tick;
+ uint64_t ns;
+ uint64_t t2;
+ pthrd_arg thrdarg;
+
+ /* Reserve at least one core for running other processes so the timer
+ * test hopefully can run undisturbed and thus get better timing
+ * results. */
+ int num_workers = odp_cpumask_default_worker(&unused, 0);
+
+ /* force to max CPU count */
+ if (num_workers > MAX_WORKERS)
+ num_workers = MAX_WORKERS;
+
+ /* On a single-CPU machine run at least one thread */
+ if (num_workers < 1)
+ num_workers = 1;
+
+ /* Create timeout pools */
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_TIMEOUT;
+ params.tmo.num = (NTIMERS + 1) * num_workers;
+
+ tbp = odp_pool_create("tmo_pool", &params);
+ if (tbp == ODP_POOL_INVALID)
+ CU_FAIL_FATAL("Timeout pool create failed");
+
+#define NAME "timer_pool"
+#define RES (10 * ODP_TIME_MSEC_IN_NS / 3)
+#define MIN (10 * ODP_TIME_MSEC_IN_NS / 3)
+#define MAX (1000000 * ODP_TIME_MSEC_IN_NS)
+ /* Create a timer pool */
+ tparam.res_ns = RES;
+ tparam.min_tmo = MIN;
+ tparam.max_tmo = MAX;
+ tparam.num_timers = num_workers * NTIMERS;
+ tparam.priv = 0;
+ tparam.clk_src = ODP_CLOCK_CPU;
+ tp = odp_timer_pool_create(NAME, &tparam);
+ if (tp == ODP_TIMER_POOL_INVALID)
+ CU_FAIL_FATAL("Timer pool create failed");
+
+ /* Start all created timer pools */
+ odp_timer_pool_start();
+
+ if (odp_timer_pool_info(tp, &tpinfo) != 0)
+ CU_FAIL("odp_timer_pool_info");
+ CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
+ CU_ASSERT(tpinfo.param.res_ns == RES);
+ CU_ASSERT(tpinfo.param.min_tmo == MIN);
+ CU_ASSERT(tpinfo.param.max_tmo == MAX);
+ CU_ASSERT(strcmp(tpinfo.name, NAME) == 0);
+
+ LOG_DBG("Timer pool handle: %" PRIu64 "\n", odp_timer_pool_to_u64(tp));
+ LOG_DBG("#timers..: %u\n", NTIMERS);
+ LOG_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS,
+ odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS));
+
+ for (tick = 0; tick < 1000000000000ULL; tick += 1000000ULL) {
+ ns = odp_timer_tick_to_ns(tp, tick);
+ t2 = odp_timer_ns_to_tick(tp, ns);
+ if (tick != t2)
+ CU_FAIL("Invalid conversion tick->ns->tick");
+ }
+
+ /* Initialize barrier used by worker threads for synchronization */
+ odp_barrier_init(&test_barrier, num_workers);
+
+ /* Initialize the shared timeout counter */
+ odp_atomic_init_u32(&ndelivtoolate, 0);
+
+ /* Initialize the number of finally allocated elements */
+ odp_atomic_init_u32(&timers_allocated, 0);
+
+ /* Create and start worker threads */
+ thrdarg.testcase = 0;
+ thrdarg.numthrds = num_workers;
+ odp_cunit_thread_create(worker_entrypoint, &thrdarg);
+
+ /* Wait for worker threads to exit */
+ odp_cunit_thread_exit(&thrdarg);
+ LOG_DBG("Number of timeouts delivered/received too late: %" PRIu32 "\n",
+ odp_atomic_load_u32(&ndelivtoolate));
+
+ /* Check some statistics after the test */
+ if (odp_timer_pool_info(tp, &tpinfo) != 0)
+ CU_FAIL("odp_timer_pool_info");
+ CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS);
+ CU_ASSERT(tpinfo.cur_timers == 0);
+ CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(&timers_allocated));
+
+ /* Destroy timer pool, all timers must have been freed */
+ odp_timer_pool_destroy(tp);
+
+ /* Destroy timeout pool, all timeouts must have been freed */
+ rc = odp_pool_destroy(tbp);
+ CU_ASSERT(rc == 0);
+
+ CU_PASS("ODP timer test");
+}
+
+odp_testinfo_t timer_suite[] = {
+ ODP_TEST_INFO(timer_test_timeout_pool_alloc),
+ ODP_TEST_INFO(timer_test_timeout_pool_free),
+ ODP_TEST_INFO(timer_test_odp_timer_cancel),
+ ODP_TEST_INFO(timer_test_odp_timer_all),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t timer_suites[] = {
+ {"Timer", NULL, NULL, timer_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int timer_main(int argc, char *argv[])
+{
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ int ret = odp_cunit_register(timer_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/timer/timer.h b/test/common_plat/validation/api/timer/timer.h
new file mode 100644
index 000000000..bd304fffd
--- /dev/null
+++ b/test/common_plat/validation/api/timer/timer.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_TIMER_H_
+#define _ODP_TEST_TIMER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void timer_test_timeout_pool_alloc(void);
+void timer_test_timeout_pool_free(void);
+void timer_test_odp_timer_cancel(void);
+void timer_test_odp_timer_all(void);
+
+/* test arrays: */
+extern odp_testinfo_t timer_suite[];
+
+/* test registry: */
+extern odp_suiteinfo_t timer_suites[];
+
+/* main test program: */
+int timer_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/timer/timer_main.c b/test/common_plat/validation/api/timer/timer_main.c
new file mode 100644
index 000000000..c318763fa
--- /dev/null
+++ b/test/common_plat/validation/api/timer/timer_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "timer.h"
+
+int main(int argc, char *argv[])
+{
+ return timer_main(argc, argv);
+}
diff --git a/test/common_plat/validation/api/traffic_mngr/.gitignore b/test/common_plat/validation/api/traffic_mngr/.gitignore
new file mode 100644
index 000000000..efd07a27d
--- /dev/null
+++ b/test/common_plat/validation/api/traffic_mngr/.gitignore
@@ -0,0 +1 @@
+traffic_mngr_main
diff --git a/test/common_plat/validation/api/traffic_mngr/Makefile.am b/test/common_plat/validation/api/traffic_mngr/Makefile.am
new file mode 100644
index 000000000..35e689a02
--- /dev/null
+++ b/test/common_plat/validation/api/traffic_mngr/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtesttraffic_mngr.la
+libtesttraffic_mngr_la_SOURCES = traffic_mngr.c
+
+test_PROGRAMS = traffic_mngr_main$(EXEEXT)
+dist_traffic_mngr_main_SOURCES = traffic_mngr_main.c
+traffic_mngr_main_LDADD = libtesttraffic_mngr.la -lm $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = traffic_mngr.h
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
new file mode 100644
index 000000000..1c4e90bf3
--- /dev/null
+++ b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
@@ -0,0 +1,4009 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <math.h>
+#include <odp.h>
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
+#include <odp/helper/chksum.h>
+#include <test_debug.h>
+#include "odp_cunit_common.h"
+#include "traffic_mngr.h"
+
+#define TM_DEBUG 0
+
+#define MAX_CAPABILITIES 16
+#define MAX_NUM_IFACES 2
+#define MAX_TM_SYSTEMS 3
+#define NUM_LEVELS 3
+#define NUM_PRIORITIES 4
+#define NUM_QUEUES_PER_NODE NUM_PRIORITIES
+#define FANIN_RATIO 8
+#define NUM_LEVEL0_TM_NODES 1
+#define NUM_LEVEL1_TM_NODES FANIN_RATIO
+#define NUM_LEVEL2_TM_NODES (FANIN_RATIO * FANIN_RATIO)
+#define NUM_TM_QUEUES (NUM_LEVEL2_TM_NODES * NUM_QUEUES_PER_NODE)
+#define NUM_SHAPER_PROFILES 64
+#define NUM_SCHED_PROFILES 64
+#define NUM_THRESHOLD_PROFILES 64
+#define NUM_WRED_PROFILES 64
+#define NUM_SHAPER_TEST_PROFILES 8
+#define NUM_SCHED_TEST_PROFILES 8
+#define NUM_THRESH_TEST_PROFILES 8
+#define NUM_WRED_TEST_PROFILES 8
+
+#define ODP_NUM_PKT_COLORS ODP_NUM_PACKET_COLORS
+#define PKT_GREEN ODP_PACKET_GREEN
+#define PKT_YELLOW ODP_PACKET_YELLOW
+#define PKT_RED ODP_PACKET_RED
+
+#define MIN_COMMIT_BW (64 * 1024)
+#define MIN_COMMIT_BURST 8000
+#define MIN_PEAK_BW 2000000
+#define MIN_PEAK_BURST 16000
+
+#define INITIAL_RCV_GAP_DROP 10 /* This is a percent of rcvd pkts */
+#define ENDING_RCV_GAP_DROP 20 /* This is a percent of rcvd pkts */
+
+#define MIN_SHAPER_BW_RCV_GAP 80 /* Percent of expected_rcv_gap */
+#define MAX_SHAPER_BW_RCV_GAP 125 /* Percent of expected_rcv_gap */
+
+#define MIN_PKT_THRESHOLD 10
+#define MIN_BYTE_THRESHOLD 2048
+
+#define MIN_WRED_THRESH 5
+#define MED_WRED_THRESH 10
+#define MED_DROP_PROB 4
+#define MAX_DROP_PROB 8
+
+#define MAX_PKTS 1000
+#define PKT_BUF_SIZE 1460
+#define MAX_PAYLOAD 1400
+#define USE_IPV4 false
+#define USE_IPV6 true
+#define USE_UDP false
+#define USE_TCP true
+#define LOW_DROP_PRECEDENCE 0x02
+#define MEDIUM_DROP_PRECEDENCE 0x04
+#define HIGH_DROP_PRECEDENCE 0x06
+#define DROP_PRECEDENCE_MASK 0x06
+#define DSCP_CLASS1 0x08
+#define DSCP_CLASS2 0x10
+#define DSCP_CLASS3 0x18
+#define DSCP_CLASS4 0x20
+#define DEFAULT_DSCP (DSCP_CLASS2 | LOW_DROP_PRECEDENCE)
+#define DEFAULT_ECN ODPH_IP_ECN_ECT0
+#define DEFAULT_TOS ((DEFAULT_DSCP << ODPH_IP_TOS_DSCP_SHIFT) | \
+ DEFAULT_ECN)
+#define DEFAULT_TTL 128
+#define DEFAULT_UDP_SRC_PORT 12049
+#define DEFAULT_UDP_DST_PORT 12050
+#define DEFAULT_TCP_SRC_PORT 0xDEAD
+#define DEFAULT_TCP_DST_PORT 0xBABE
+#define DEFAULT_TCP_SEQ_NUM 0x12345678
+#define DEFAULT_TCP_ACK_NUM 0x12340000
+#define DEFAULT_TCP_WINDOW 0x4000
+#define VLAN_PRIORITY_BK 1 /* Background - lowest priority */
+#define VLAN_PRIORITY_BE 0 /* Best Effort */
+#define VLAN_PRIORITY_EE 2 /* Excellent Effort */
+#define VLAN_PRIORITY_NC 7 /* Network Control - highest priority */
+#define VLAN_DEFAULT_VID 12
+#define VLAN_NO_DEI ((VLAN_PRIORITY_EE << 13) | VLAN_DEFAULT_VID)
+#define ETHERNET_IFG 12 /* Ethernet Interframe Gap */
+#define ETHERNET_PREAMBLE 8
+#define ETHERNET_OVHD_LEN (ETHERNET_IFG + ETHERNET_PREAMBLE)
+#define CRC_LEN 4
+#define SHAPER_LEN_ADJ ETHERNET_OVHD_LEN
+#define TM_NAME_LEN 32
+#define BILLION 1000000000ULL
+#define MS 1000000 /* Millisecond in units of NS */
+#define MBPS 1000000
+#define GBPS 1000000000
+
+#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
+#define MAX(a, b) (((a) <= (b)) ? (b) : (a))
+
+#define TM_PERCENT(percent) ((uint32_t)(100 * percent))
+
+typedef enum {
+ SHAPER_PROFILE, SCHED_PROFILE, THRESHOLD_PROFILE, WRED_PROFILE
+} profile_kind_t;
+
+typedef struct {
+ uint32_t num_queues;
+ odp_tm_queue_t tm_queues[0];
+} tm_queue_desc_t;
+
+typedef struct tm_node_desc_s tm_node_desc_t;
+
+struct tm_node_desc_s {
+ uint32_t level;
+ uint32_t node_idx;
+ uint32_t num_children;
+ char *node_name;
+ odp_tm_node_t node;
+ odp_tm_node_t parent_node;
+ tm_queue_desc_t *queue_desc;
+ tm_node_desc_t *children[0];
+};
+
+typedef struct {
+ uint32_t num_samples;
+ uint32_t min_rcv_gap;
+ uint32_t max_rcv_gap;
+ uint32_t total_rcv_gap;
+ uint64_t total_rcv_gap_squared;
+ uint32_t avg_rcv_gap;
+ uint32_t std_dev_gap;
+} rcv_stats_t;
+
+typedef struct {
+ odp_time_t xmt_time;
+ odp_time_t rcv_time;
+ uint64_t delta_ns;
+ odp_tm_queue_t tm_queue;
+ uint16_t pkt_len;
+ uint16_t xmt_unique_id;
+ uint16_t xmt_idx;
+ uint8_t pkt_class;
+ uint8_t was_rcvd;
+} xmt_pkt_desc_t;
+
+typedef struct {
+ odp_time_t rcv_time;
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ uint16_t rcv_unique_id;
+ uint16_t xmt_idx;
+ uint8_t errors;
+ uint8_t matched;
+ uint8_t pkt_class;
+ uint8_t is_ipv4_pkt;
+} rcv_pkt_desc_t;
+
+typedef struct {
+ odp_tm_percent_t confidence_percent;
+ odp_tm_percent_t drop_percent;
+ uint32_t min_cnt;
+ uint32_t max_cnt;
+} wred_pkt_cnts_t;
+
+typedef struct {
+ uint32_t num_queues;
+ uint32_t priority;
+ odp_tm_queue_t tm_queues[NUM_LEVEL2_TM_NODES];
+} queue_array_t;
+
+typedef struct {
+ queue_array_t queue_array[NUM_PRIORITIES];
+} queues_set_t;
+
+typedef struct {
+ uint16_t vlan_tci;
+ uint8_t pkt_class;
+ uint8_t ip_tos; /* TOS for IPv4 and TC for IPv6 */
+ odp_packet_color_t pkt_color;
+ odp_bool_t drop_eligible;
+ odp_bool_t use_vlan; /* Else no VLAN header */
+ odp_bool_t use_ipv6; /* Else use IPv4 */
+ odp_bool_t use_tcp; /* Else use UDP */
+} pkt_info_t;
+
+static const char ALPHABET[] =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+/* The following constant table determines the minimum and maximum number of
+ * pkts that will be received when sending 100 pkts through a system with a
+ * drop probability of p% (using a uniform probability distribution), with a
+ * confidence of 99.9% 99.99% and 99.999%. The confidence is interepreted as
+ * follows: a 99.99% confidence says that receiving LESS pkts than the given
+ * minimum or receiving MORE pkts than the given maximum (assuming a uniform
+ * drop percent of p) will happen less than 1 time in 10,000 trials.
+ * Mathematically the minimum pkt cnt is the largest value of cnt
+ * that satisfies the following equation:
+ * "(1 - cf/100)/2 <= Sum(binomial(100,k) * (1-p)^k * p^(100-k), k=0..cnt)",
+ * where cf is the confidence, caret (^) represents exponentiation,
+ * binomial(n,k) is the binomial coefficient defined as n! / (k! * (n-k)!).
+ * and p is the drop probability. Similarly the maximum pkt cnt is the
+ * smallest value of cnt that satisfies the equation:
+ * "(1 - cf/100)/2 <= Sum(binomial(100,k) * (1-p)^k * p^(100-k), k=cnt..100)".
+ * As a consequence of this, it should be the case that:
+ * cf/100 <= Sum(binomial(100,k) * (1-p)^k * p^(100-k), k=min..max)".
+ */
+static wred_pkt_cnts_t EXPECTED_PKT_RCVD[] = {
+ { TM_PERCENT(99.0), TM_PERCENT(10.0), 82, 97 },
+ { TM_PERCENT(99.0), TM_PERCENT(20.0), 69, 90 },
+ { TM_PERCENT(99.0), TM_PERCENT(30.0), 58, 81 },
+ { TM_PERCENT(99.0), TM_PERCENT(40.0), 47, 72 },
+ { TM_PERCENT(99.0), TM_PERCENT(50.0), 37, 63 },
+ { TM_PERCENT(99.0), TM_PERCENT(60.0), 28, 53 },
+ { TM_PERCENT(99.0), TM_PERCENT(70.0), 19, 42 },
+ { TM_PERCENT(99.0), TM_PERCENT(80.0), 10, 31 },
+ { TM_PERCENT(99.0), TM_PERCENT(90.0), 3, 18 },
+
+ { TM_PERCENT(99.9), TM_PERCENT(10.0), 79, 98 },
+ { TM_PERCENT(99.9), TM_PERCENT(20.0), 66, 92 },
+ { TM_PERCENT(99.9), TM_PERCENT(30.0), 54, 84 },
+ { TM_PERCENT(99.9), TM_PERCENT(40.0), 44, 76 },
+ { TM_PERCENT(99.9), TM_PERCENT(50.0), 34, 66 },
+ { TM_PERCENT(99.9), TM_PERCENT(60.0), 24, 56 },
+ { TM_PERCENT(99.9), TM_PERCENT(70.0), 16, 46 },
+ { TM_PERCENT(99.9), TM_PERCENT(80.0), 8, 34 },
+ { TM_PERCENT(99.9), TM_PERCENT(90.0), 2, 21 },
+
+ { TM_PERCENT(99.99), TM_PERCENT(10.0), 77, 99 },
+ { TM_PERCENT(99.99), TM_PERCENT(20.0), 63, 94 },
+ { TM_PERCENT(99.99), TM_PERCENT(30.0), 51, 87 },
+ { TM_PERCENT(99.99), TM_PERCENT(40.0), 41, 78 },
+ { TM_PERCENT(99.99), TM_PERCENT(50.0), 31, 69 },
+ { TM_PERCENT(99.99), TM_PERCENT(60.0), 22, 59 },
+ { TM_PERCENT(99.99), TM_PERCENT(70.0), 13, 49 },
+ { TM_PERCENT(99.99), TM_PERCENT(80.0), 6, 37 },
+ { TM_PERCENT(99.99), TM_PERCENT(90.0), 1, 23 },
+};
+
+static uint8_t EQUAL_WEIGHTS[FANIN_RATIO] = {
+ 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+static uint8_t INCREASING_WEIGHTS[FANIN_RATIO] = {
+ 8, 12, 16, 24, 32, 48, 64, 96
+};
+
+static uint8_t IPV4_SRC_ADDR[ODPH_IPV4ADDR_LEN] = {
+ 10, 0, 0, 1 /* I.e. 10.0.0.1 */
+};
+
+static uint8_t IPV4_DST_ADDR[ODPH_IPV4ADDR_LEN] = {
+ 10, 0, 0, 100 /* I.e. 10.0.0.100 */
+};
+
+static uint8_t IPV6_SRC_ADDR[ODPH_IPV6ADDR_LEN] = {
+ /* I.e. ::ffff:10.0.0.1 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 0, 0, 1
+};
+
+static uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
+ /* I.e. ::ffff:10.0.0.100 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 10, 0, 0, 100
+};
+
+static odp_tm_t odp_tm_systems[MAX_TM_SYSTEMS];
+static tm_node_desc_t *root_node_descs[MAX_TM_SYSTEMS];
+static uint32_t num_odp_tm_systems;
+
+static odp_tm_capabilities_t tm_capabilities;
+
+static odp_tm_shaper_t shaper_profiles[NUM_SHAPER_PROFILES];
+static odp_tm_sched_t sched_profiles[NUM_SCHED_PROFILES];
+static odp_tm_threshold_t threshold_profiles[NUM_THRESHOLD_PROFILES];
+static odp_tm_wred_t wred_profiles[NUM_WRED_PROFILES][ODP_NUM_PKT_COLORS];
+
+static uint32_t num_shaper_profiles;
+static uint32_t num_sched_profiles;
+static uint32_t num_threshold_profiles;
+static uint32_t num_wred_profiles;
+
+static uint8_t payload_data[MAX_PAYLOAD];
+
+static odp_packet_t xmt_pkts[MAX_PKTS];
+static xmt_pkt_desc_t xmt_pkt_descs[MAX_PKTS];
+static uint32_t num_pkts_made;
+static uint32_t num_pkts_sent;
+
+static odp_packet_t rcv_pkts[MAX_PKTS];
+static rcv_pkt_desc_t rcv_pkt_descs[MAX_PKTS];
+static uint32_t num_rcv_pkts;
+
+static uint32_t rcv_gaps[MAX_PKTS];
+static uint32_t rcv_gap_cnt;
+
+static queues_set_t queues_set;
+static uint32_t unique_id_list[MAX_PKTS];
+
+/* interface names used for testing */
+static const char *iface_name[MAX_NUM_IFACES];
+
+/** number of interfaces being used (1=loopback, 2=pair) */
+static uint32_t num_ifaces;
+
+static odp_pool_t pools[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+
+static odp_pktio_t pktios[MAX_NUM_IFACES];
+static odp_pktin_queue_t pktins[MAX_NUM_IFACES];
+static odp_pktout_queue_t pktouts[MAX_NUM_IFACES];
+static odp_pktin_queue_t rcv_pktin;
+static odp_pktout_queue_t xmt_pktout;
+
+static odph_ethaddr_t src_mac;
+static odph_ethaddr_t dst_mac;
+
+static uint32_t cpu_unique_id;
+static uint32_t cpu_tcp_seq_num;
+
+static void busy_wait(uint64_t nanoseconds)
+{
+ odp_time_t start_time, end_time;
+
+ start_time = odp_time_local();
+ end_time = odp_time_sum(start_time,
+ odp_time_local_from_ns(nanoseconds));
+
+ while (odp_time_cmp(odp_time_local(), end_time) < 0)
+ odp_cpu_pause();
+}
+
+static odp_bool_t approx_eq32(uint32_t val, uint32_t correct)
+{
+ uint64_t low_bound, val_times_100, high_bound;
+
+ if (val == correct)
+ return true;
+
+ low_bound = 98 * (uint64_t)correct;
+ val_times_100 = 100 * (uint64_t)val;
+ high_bound = 102 * (uint64_t)correct;
+
+ if ((low_bound <= val_times_100) && (val_times_100 <= high_bound))
+ return true;
+ else
+ return false;
+}
+
+static odp_bool_t approx_eq64(uint64_t val, uint64_t correct)
+{
+ uint64_t low_bound, val_times_100, high_bound;
+
+ if (val == correct)
+ return true;
+
+ low_bound = 98 * correct;
+ val_times_100 = 100 * val;
+ high_bound = 102 * correct;
+
+ if ((low_bound <= val_times_100) && (val_times_100 <= high_bound))
+ return true;
+ else
+ return false;
+}
+
+static int test_overall_capabilities(void)
+{
+ odp_tm_level_capabilities_t *per_level;
+ odp_tm_capabilities_t capabilities_array[MAX_CAPABILITIES];
+ odp_tm_capabilities_t *cap_ptr;
+ uint32_t num_records, idx, num_levels, level;
+ int rc;
+
+ rc = odp_tm_capabilities(capabilities_array, MAX_CAPABILITIES);
+ if (rc < 0) {
+ CU_ASSERT(rc < 0);
+ return -1;
+ }
+
+ /* Now test the return code (which did not indicate a failure code)
+ * to make sure that there is at least ONE capabilities record
+ * returned */
+ if (rc == 0) {
+ CU_ASSERT(rc != 0);
+ return -1;
+ }
+
+ /* Now test the return code to see if there were more capabilities
+ * records than the call above allowed for. This is not an error,
+ * just an interesting fact.
+ */
+ num_records = MAX_CAPABILITIES;
+ if (MAX_CAPABILITIES < rc)
+ LOG_DBG("There were more than %u capabilities (%u)\n",
+ MAX_CAPABILITIES, rc);
+ else
+ num_records = rc;
+
+ /* Loop through the returned capabilities (there MUST be at least one)
+ * and do some basic checks to prove that it isn't just an empty
+ * record. */
+ for (idx = 0; idx < num_records; idx++) {
+ cap_ptr = &capabilities_array[idx];
+ if (cap_ptr->max_tm_queues == 0) {
+ CU_ASSERT(cap_ptr->max_tm_queues != 0);
+ return -1;
+ }
+
+ if (cap_ptr->max_levels == 0) {
+ CU_ASSERT(cap_ptr->max_levels != 0);
+ return -1;
+ }
+
+ num_levels = cap_ptr->max_levels;
+ for (level = 0; level < num_levels; level++) {
+ per_level = &cap_ptr->per_level[level];
+
+ if (per_level->max_num_tm_nodes == 0) {
+ CU_ASSERT(per_level->max_num_tm_nodes != 0);
+ return -1;
+ }
+
+ if (per_level->max_fanin_per_node == 0) {
+ CU_ASSERT(per_level->max_fanin_per_node != 0);
+ return -1;
+ }
+
+ if (per_level->max_priority == 0) {
+ CU_ASSERT(per_level->max_priority != 0);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret < 0 || ret == 1)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ return ret;
+}
+
+static int open_pktios(void)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pool_param_t pool_param;
+ odp_pktio_t pktio;
+ odp_pool_t pkt_pool;
+ uint32_t iface;
+ char pool_name[ODP_POOL_NAME_LEN];
+ int rc, ret;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = 10 * MAX_PKTS;
+ pool_param.type = ODP_POOL_PACKET;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ for (iface = 0; iface < num_ifaces; iface++) {
+ snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s",
+ iface_name[iface]);
+
+ pkt_pool = odp_pool_create(pool_name, &pool_param);
+ if (pkt_pool == ODP_POOL_INVALID) {
+ CU_FAIL("unable to create pool");
+ return -1;
+ }
+
+ pools[iface] = pkt_pool;
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ pktio = odp_pktio_lookup(iface_name[iface]);
+ if (pktio == ODP_PKTIO_INVALID) {
+ LOG_ERR("odp_pktio_open() failed\n");
+ return -1;
+ }
+
+ /* Set defaults for PktIn and PktOut queues */
+ (void)odp_pktin_queue_config(pktio, NULL);
+ (void)odp_pktout_queue_config(pktio, NULL);
+ rc = odp_pktio_promisc_mode_set(pktio, true);
+ if (rc != 0)
+ printf("****** promisc_mode_set failed ******\n");
+
+ pktios[iface] = pktio;
+
+ if (odp_pktin_queue(pktio, &pktins[iface], 1) != 1) {
+ odp_pktio_close(pktio);
+ LOG_ERR("odp_pktio_open() failed: no pktin queue\n");
+ return -1;
+ }
+
+ if (odp_pktout_queue(pktio, &pktouts[iface], 1) != 1) {
+ odp_pktio_close(pktio);
+ LOG_ERR("odp_pktio_open() failed: no pktout queue\n");
+ return -1;
+ }
+
+ rc = -1;
+ if (iface == 0)
+ rc = odp_pktio_mac_addr(pktio, &src_mac,
+ ODPH_ETHADDR_LEN);
+
+ if ((iface == 1) || (num_ifaces == 1))
+ rc = odp_pktio_mac_addr(pktio, &dst_mac,
+ ODPH_ETHADDR_LEN);
+
+ if (rc != ODPH_ETHADDR_LEN) {
+ LOG_ERR("odp_pktio_mac_addr() failed\n");
+ return -1;
+ }
+ }
+
+ if (2 <= num_ifaces) {
+ xmt_pktout = pktouts[0];
+ rcv_pktin = pktins[1];
+ ret = odp_pktio_start(pktios[1]);
+ if (ret != 0) {
+ LOG_ERR("odp_pktio_start() failed\n");
+ return -1;
+ }
+ } else {
+ xmt_pktout = pktouts[0];
+ rcv_pktin = pktins[0];
+ }
+
+ ret = odp_pktio_start(pktios[0]);
+ if (ret != 0) {
+ LOG_ERR("odp_pktio_start() failed\n");
+ return -1;
+ }
+
+ /* Now wait until the link or links are up. */
+ rc = wait_linkup(pktios[0]);
+ if (rc != 1) {
+ LOG_ERR("link %" PRIX64 " not up\n",
+ odp_pktio_to_u64(pktios[0]));
+ return -1;
+ }
+
+ if (num_ifaces < 2)
+ return 0;
+
+ /* Wait for 2nd link to be up */
+ rc = wait_linkup(pktios[1]);
+ if (rc != 1) {
+ LOG_ERR("link %" PRIX64 " not up\n",
+ odp_pktio_to_u64(pktios[0]));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_unique_id(odp_packet_t odp_pkt,
+ uint16_t *unique_id_ptr,
+ uint8_t *is_ipv4_pkt_ptr)
+{
+ odp_u32be_t be_ver_tc_flow;
+ odp_u16be_t be_ip_ident;
+ odp_bool_t is_ipv4;
+ uint32_t l3_offset, ident_offset, flow_offset, ver_tc_flow;
+ uint16_t unique_id;
+
+ l3_offset = odp_packet_l3_offset(odp_pkt);
+
+ if (odp_packet_has_ipv4(odp_pkt)) {
+ /* For IPv4 pkts use the ident field to store the unique_id. */
+ ident_offset = l3_offset + offsetof(odph_ipv4hdr_t, id);
+
+ odp_packet_copy_to_mem(odp_pkt, ident_offset, 2, &be_ip_ident);
+ unique_id = odp_be_to_cpu_16(be_ip_ident);
+ is_ipv4 = true;
+ } else if (odp_packet_has_ipv6(odp_pkt)) {
+ /* For IPv6 pkts use the flow field to store the unique_id. */
+ flow_offset = l3_offset + offsetof(odph_ipv6hdr_t, ver_tc_flow);
+
+ odp_packet_copy_to_mem(odp_pkt, flow_offset, 4,
+ &be_ver_tc_flow);
+ ver_tc_flow = odp_be_to_cpu_32(be_ver_tc_flow);
+ unique_id = ver_tc_flow & ODPH_IPV6HDR_FLOW_LABEL_MASK;
+ is_ipv4 = false;
+ } else {
+ return -1;
+ }
+
+ if (unique_id_ptr != NULL)
+ *unique_id_ptr = unique_id;
+
+ if (is_ipv4_pkt_ptr != NULL)
+ *is_ipv4_pkt_ptr = is_ipv4;
+
+ return 0;
+}
+
+static int get_vlan_tci(odp_packet_t odp_pkt, uint16_t *vlan_tci_ptr)
+{
+ odph_vlanhdr_t *vlan_hdr;
+ odph_ethhdr_t *ether_hdr;
+ uint32_t hdr_len;
+ uint16_t vlan_tci;
+
+ if (!odp_packet_has_vlan(odp_pkt))
+ return -1;
+
+ /* *TBD* check value of hdr_len? */
+ ether_hdr = odp_packet_l2_ptr(odp_pkt, &hdr_len);
+ vlan_hdr = (odph_vlanhdr_t *)(ether_hdr + 1);
+ vlan_tci = odp_be_to_cpu_16(vlan_hdr->tci);
+ if (vlan_tci_ptr != NULL)
+ *vlan_tci_ptr = vlan_tci;
+
+ return 0;
+}
+
+/* Returns either the TOS field for IPv4 pkts or the TC field for IPv6 pkts. */
+static int get_ip_tos(odp_packet_t odp_pkt, uint8_t *tos_ptr)
+{
+ odph_ipv4hdr_t *ipv4_hdr;
+ odph_ipv6hdr_t *ipv6_hdr;
+ uint32_t hdr_len, ver_tc_flow;
+ uint8_t tos, tc;
+
+ if (odp_packet_has_ipv4(odp_pkt)) {
+ ipv4_hdr = odp_packet_l3_ptr(odp_pkt, &hdr_len);
+ if (hdr_len < 12)
+ return -1;
+
+ tos = ipv4_hdr->tos;
+ } else if (odp_packet_has_ipv6(odp_pkt)) {
+ ipv6_hdr = odp_packet_l3_ptr(odp_pkt, &hdr_len);
+ if (hdr_len < 4)
+ return -1;
+
+ ver_tc_flow = odp_be_to_cpu_32(ipv6_hdr->ver_tc_flow);
+ tc = (ver_tc_flow & ODPH_IPV6HDR_TC_MASK)
+ >> ODPH_IPV6HDR_TC_SHIFT;
+ tos = tc;
+ } else {
+ return -1;
+ }
+
+ if (tos_ptr != NULL)
+ *tos_ptr = tos;
+
+ return 0;
+}
+
+static odp_packet_t make_pkt(odp_pool_t pkt_pool,
+ uint32_t payload_len,
+ uint16_t unique_id,
+ pkt_info_t *pkt_info)
+{
+ odph_vlanhdr_t *vlan_hdr;
+ odph_ipv4hdr_t *ipv4_hdr;
+ odph_ipv6hdr_t *ipv6_hdr;
+ odph_ethhdr_t *eth_hdr;
+ odph_udphdr_t *udp_hdr;
+ odph_tcphdr_t *tcp_hdr;
+ odp_packet_t odp_pkt;
+ uint32_t l4_hdr_len, l3_hdr_len, vlan_hdr_len, l2_hdr_len;
+ uint32_t l4_len, l3_len, l2_len, pkt_len, l3_offset, l4_offset;
+ uint32_t version, tc, flow, ver_tc_flow, app_offset;
+ uint16_t final_ether_type;
+ uint8_t *buf, *pkt_class_ptr, next_hdr;
+ int rc;
+
+ l4_hdr_len = pkt_info->use_tcp ? ODPH_TCPHDR_LEN : ODPH_UDPHDR_LEN;
+ l3_hdr_len = pkt_info->use_ipv6 ? ODPH_IPV6HDR_LEN : ODPH_IPV4HDR_LEN;
+ vlan_hdr_len = pkt_info->use_vlan ? ODPH_VLANHDR_LEN : 0;
+ l2_hdr_len = ODPH_ETHHDR_LEN + vlan_hdr_len;
+ l4_len = l4_hdr_len + payload_len;
+ l3_len = l3_hdr_len + l4_len;
+ l2_len = l2_hdr_len + l3_len;
+ pkt_len = l2_len;
+ if (unique_id == 0) {
+ LOG_ERR("make_pkt called with invalid unique_id of 0\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ odp_pkt = odp_packet_alloc(pkt_pool, pkt_len);
+ if (odp_pkt == ODP_PACKET_INVALID)
+ return ODP_PACKET_INVALID;
+
+ buf = odp_packet_data(odp_pkt);
+
+ /* Ethernet Header */
+ odp_packet_l2_offset_set(odp_pkt, 0);
+ eth_hdr = (odph_ethhdr_t *)buf;
+ final_ether_type = pkt_info->use_ipv6 ? ODPH_ETHTYPE_IPV6
+ : ODPH_ETHTYPE_IPV4;
+ memcpy(eth_hdr->src.addr, &src_mac, ODPH_ETHADDR_LEN);
+ memcpy(eth_hdr->dst.addr, &dst_mac, ODPH_ETHADDR_LEN);
+
+ /* Vlan Header */
+ if (pkt_info->use_vlan) {
+ odp_packet_has_vlan_set(odp_pkt, 1);
+ eth_hdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
+ vlan_hdr = (odph_vlanhdr_t *)(eth_hdr + 1);
+ vlan_hdr->tci = odp_cpu_to_be_16(pkt_info->vlan_tci);
+ vlan_hdr->type = odp_cpu_to_be_16(final_ether_type);
+ } else {
+ eth_hdr->type = odp_cpu_to_be_16(final_ether_type);
+ }
+
+ l3_offset = l2_hdr_len;
+ next_hdr = pkt_info->use_tcp ? ODPH_IPPROTO_TCP : ODPH_IPPROTO_UDP;
+ odp_packet_l3_offset_set(odp_pkt, l3_offset);
+ if (pkt_info->use_ipv6) {
+ /* IPv6 Header */
+ odp_packet_has_ipv6_set(odp_pkt, 1);
+ version = ODPH_IPV6 << ODPH_IPV6HDR_VERSION_SHIFT;
+ tc = pkt_info->ip_tos << ODPH_IPV6HDR_TC_SHIFT;
+ flow = unique_id << ODPH_IPV6HDR_FLOW_LABEL_SHIFT;
+ ver_tc_flow = version | tc | flow;
+
+ ipv6_hdr = (odph_ipv6hdr_t *)(buf + l3_offset);
+ ipv6_hdr->ver_tc_flow = odp_cpu_to_be_32(ver_tc_flow);
+ ipv6_hdr->payload_len = odp_cpu_to_be_16(l4_len);
+ ipv6_hdr->next_hdr = next_hdr;
+ ipv6_hdr->hop_limit = DEFAULT_TTL;
+ memcpy(ipv6_hdr->src_addr, IPV6_SRC_ADDR, ODPH_IPV6ADDR_LEN);
+ memcpy(ipv6_hdr->dst_addr, IPV6_DST_ADDR, ODPH_IPV6ADDR_LEN);
+ } else {
+ /* IPv4 Header */
+ odp_packet_has_ipv4_set(odp_pkt, 1);
+ ipv4_hdr = (odph_ipv4hdr_t *)(buf + l3_offset);
+ ipv4_hdr->ver_ihl = (ODPH_IPV4 << 4) | ODPH_IPV4HDR_IHL_MIN;
+ ipv4_hdr->tos = pkt_info->ip_tos;
+ ipv4_hdr->tot_len = odp_cpu_to_be_16(l3_len);
+ ipv4_hdr->id = odp_cpu_to_be_16(unique_id);
+ ipv4_hdr->frag_offset = 0;
+ ipv4_hdr->ttl = DEFAULT_TTL;
+ ipv4_hdr->proto = next_hdr;
+ ipv4_hdr->chksum = 0;
+ memcpy(&ipv4_hdr->src_addr, IPV4_SRC_ADDR, ODPH_IPV4ADDR_LEN);
+ memcpy(&ipv4_hdr->dst_addr, IPV4_DST_ADDR, ODPH_IPV4ADDR_LEN);
+ }
+
+ l4_offset = l3_offset + l3_hdr_len;
+ odp_packet_l4_offset_set(odp_pkt, l4_offset);
+ tcp_hdr = (odph_tcphdr_t *)(buf + l4_offset);
+ udp_hdr = (odph_udphdr_t *)(buf + l4_offset);
+
+ if (pkt_info->use_tcp) {
+ /* TCP Header */
+ odp_packet_has_tcp_set(odp_pkt, 1);
+ tcp_hdr->src_port = odp_cpu_to_be_16(DEFAULT_TCP_SRC_PORT);
+ tcp_hdr->dst_port = odp_cpu_to_be_16(DEFAULT_TCP_DST_PORT);
+ tcp_hdr->seq_no = odp_cpu_to_be_32(cpu_tcp_seq_num);
+ tcp_hdr->ack_no = odp_cpu_to_be_32(DEFAULT_TCP_ACK_NUM);
+ tcp_hdr->window = odp_cpu_to_be_16(DEFAULT_TCP_WINDOW);
+ tcp_hdr->cksm = 0;
+ tcp_hdr->urgptr = 0;
+
+ tcp_hdr->doffset_flags = 0;
+ tcp_hdr->hl = 5;
+ tcp_hdr->ack = 1;
+ cpu_tcp_seq_num += payload_len;
+ } else {
+ /* UDP Header */
+ odp_packet_has_udp_set(odp_pkt, 1);
+ udp_hdr->src_port = odp_cpu_to_be_16(DEFAULT_UDP_SRC_PORT);
+ udp_hdr->dst_port = odp_cpu_to_be_16(DEFAULT_UDP_DST_PORT);
+ udp_hdr->length = odp_cpu_to_be_16(l4_len);
+ udp_hdr->chksum = 0;
+ }
+
+ app_offset = l4_offset + l4_hdr_len;
+ rc = odp_packet_copy_from_mem(odp_pkt, app_offset, payload_len,
+ payload_data);
+ CU_ASSERT_FATAL(rc == 0);
+
+ pkt_class_ptr = odp_packet_offset(odp_pkt, app_offset, NULL, NULL);
+ CU_ASSERT_FATAL(pkt_class_ptr != NULL);
+ *pkt_class_ptr = pkt_info->pkt_class;
+
+ /* Calculate and insert checksums. First the IPv4 header checksum. */
+ if (!pkt_info->use_ipv6)
+ odph_ipv4_csum_update(odp_pkt);
+
+ /* Next the UDP/TCP checksum. */
+ if (odph_udp_tcp_chksum(odp_pkt, ODPH_CHKSUM_GENERATE, NULL) != 0)
+ LOG_ERR("odph_udp_tcp_chksum failed\n");
+
+ return odp_pkt;
+}
+
+static xmt_pkt_desc_t *find_matching_xmt_pkt_desc(uint16_t unique_id)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ uint32_t xmt_pkt_idx;
+
+ if (unique_id == 0)
+ return NULL;
+
+ for (xmt_pkt_idx = 0; xmt_pkt_idx < num_pkts_sent; xmt_pkt_idx++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx];
+ if (xmt_pkt_desc->xmt_unique_id == unique_id)
+ return xmt_pkt_desc;
+ }
+
+ return NULL;
+}
+
+static int receive_pkts(odp_tm_t odp_tm,
+ odp_pktin_queue_t pktin,
+ uint32_t num_pkts,
+ uint64_t rate_bps)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ rcv_pkt_desc_t *rcv_pkt_desc;
+ odp_packet_t rcv_pkt;
+ odp_time_t start_time, current_time, duration, xmt_time;
+ odp_time_t rcv_time, delta_time;
+ uint64_t temp1, timeout_ns, duration_ns, delta_ns;
+ uint32_t pkts_rcvd, rcv_idx, l4_offset, l4_hdr_len, app_offset;
+ uint16_t unique_id;
+ uint8_t *pkt_class_ptr, pkt_class, is_ipv4_pkt;
+ int rc;
+
+ temp1 = (1000000ULL * 10000ULL * (uint64_t)num_pkts) / rate_bps;
+ timeout_ns = 1000ULL * ((4ULL * temp1) + 10000ULL);
+
+ pkts_rcvd = 0;
+ start_time = odp_time_local();
+ duration_ns = 0;
+
+ while ((pkts_rcvd < num_pkts) || (!odp_tm_is_idle(odp_tm))) {
+ rc = odp_pktin_recv(pktin, &rcv_pkts[pkts_rcvd], 1);
+ if (rc < 0)
+ return rc;
+
+ current_time = odp_time_local();
+ duration = odp_time_diff(current_time, start_time);
+ duration_ns = odp_time_to_ns(duration);
+ if (rc == 1)
+ rcv_pkt_descs[pkts_rcvd++].rcv_time = current_time;
+ else if (timeout_ns < duration_ns)
+ break;
+ }
+
+ /* Now go through matching the rcv pkts to the xmt pkts, determining
+ * which xmt_pkts were lost and for the ones that did arrive, how
+ * long did they take. We don't do this work while receiving the pkts
+ * in the loop above because we want to try to get as accurate a
+ * rcv timestamp as possible. */
+ for (rcv_idx = 0; rcv_idx < pkts_rcvd; rcv_idx++) {
+ rcv_pkt = rcv_pkts[rcv_idx];
+ rcv_pkt_desc = &rcv_pkt_descs[rcv_idx];
+
+ if (odp_packet_has_error(rcv_pkt)) {
+ rcv_pkt_desc->errors = 0x01 |
+ (odp_packet_has_l2_error(rcv_pkt) << 1) |
+ (odp_packet_has_l3_error(rcv_pkt) << 2) |
+ (odp_packet_has_l4_error(rcv_pkt) << 3);
+
+ LOG_ERR("received a pkt with the following errors\n");
+ LOG_ERR(" l2_err=%u l3_err=%u l4_err=%u. Skipping\n",
+ (rcv_pkt_desc->errors >> 1) & 0x1,
+ (rcv_pkt_desc->errors >> 2) & 0x1,
+ (rcv_pkt_desc->errors >> 3) & 0x1);
+ }
+
+ unique_id = 0;
+ rc = get_unique_id(rcv_pkt, &unique_id, &is_ipv4_pkt);
+ if (rc != 0) {
+ LOG_ERR("received a non IPv4/IPv6 pkt\n");
+ return -1;
+ }
+
+ rcv_pkt_desc->rcv_unique_id = unique_id;
+ rcv_pkt_desc->is_ipv4_pkt = is_ipv4_pkt;
+ if (odp_packet_has_udp(rcv_pkt))
+ l4_hdr_len = ODPH_UDPHDR_LEN;
+ else if (odp_packet_has_tcp(rcv_pkt))
+ l4_hdr_len = ODPH_TCPHDR_LEN;
+ else
+ l4_hdr_len = 0;
+
+ l4_offset = odp_packet_l4_offset(rcv_pkt);
+ app_offset = l4_offset + l4_hdr_len;
+ pkt_class_ptr = odp_packet_offset(rcv_pkt, app_offset,
+ NULL, NULL);
+ if (pkt_class_ptr != NULL)
+ rcv_pkt_desc->pkt_class = *pkt_class_ptr;
+
+ xmt_pkt_desc = find_matching_xmt_pkt_desc(unique_id);
+ if (xmt_pkt_desc != NULL) {
+ rcv_pkt_desc->xmt_pkt_desc = xmt_pkt_desc;
+ rcv_pkt_desc->matched = true;
+
+ xmt_time = xmt_pkt_desc->xmt_time;
+ rcv_time = rcv_pkt_desc->rcv_time;
+ pkt_class = rcv_pkt_desc->pkt_class;
+ delta_time = odp_time_diff(rcv_time, xmt_time);
+ delta_ns = odp_time_to_ns(delta_time);
+
+ rcv_pkt_desc->xmt_idx = xmt_pkt_desc->xmt_idx;
+ xmt_pkt_desc->rcv_time = rcv_time;
+ xmt_pkt_desc->delta_ns = delta_ns;
+ xmt_pkt_desc->pkt_class = pkt_class;
+ xmt_pkt_desc->was_rcvd = 1;
+ }
+ }
+
+ return pkts_rcvd;
+}
+
+static void dump_rcvd_pkts(uint32_t first_rcv_idx, uint32_t last_rcv_idx)
+{
+ rcv_pkt_desc_t *rcv_pkt_desc;
+ odp_packet_t rcv_pkt;
+ uint32_t rcv_idx;
+ int32_t xmt_idx;
+ uint16_t unique_id;
+ uint8_t is_ipv4;
+ int rc;
+
+ for (rcv_idx = first_rcv_idx; rcv_idx <= last_rcv_idx; rcv_idx++) {
+ rcv_pkt = rcv_pkts[rcv_idx];
+ rcv_pkt_desc = &rcv_pkt_descs[rcv_idx];
+ rc = get_unique_id(rcv_pkt, &unique_id, &is_ipv4);
+ xmt_idx = -1;
+ if (rcv_pkt_desc->matched)
+ xmt_idx = rcv_pkt_desc->xmt_pkt_desc->xmt_idx;
+
+ printf("rcv_idx=%u odp_pkt=0x%" PRIX64 " xmt_idx=%d "
+ "pkt_class=%u is_ipv4=%u unique_id=0x%X (rc=%d)\n",
+ rcv_idx, odp_packet_to_u64(rcv_pkt), xmt_idx,
+ rcv_pkt_desc->pkt_class, is_ipv4, unique_id, rc);
+ }
+}
+
+static void free_rcvd_pkts(void)
+{
+ odp_packet_t rcv_pkt;
+ uint32_t rcv_idx;
+
+ /* Go through all of the received pkts and free them. */
+ for (rcv_idx = 0; rcv_idx < num_rcv_pkts; rcv_idx++) {
+ rcv_pkt = rcv_pkts[rcv_idx];
+ if (rcv_pkt != ODP_PACKET_INVALID) {
+ odp_packet_free(rcv_pkt);
+ rcv_pkts[rcv_idx] = ODP_PACKET_INVALID;
+ }
+ }
+}
+
+static void flush_leftover_pkts(odp_tm_t odp_tm, odp_pktin_queue_t pktin)
+{
+ odp_packet_t rcv_pkt;
+ odp_time_t start_time, current_time, duration;
+ uint64_t min_timeout_ns, max_timeout_ns, duration_ns;
+ int rc;
+
+ /* Set the timeout to be at least 10 milliseconds and at most 100
+ * milliseconds */
+ min_timeout_ns = 10 * ODP_TIME_MSEC_IN_NS;
+ max_timeout_ns = 100 * ODP_TIME_MSEC_IN_NS;
+ start_time = odp_time_local();
+
+ while (true) {
+ rc = odp_pktin_recv(pktin, &rcv_pkt, 1);
+ if (rc == 1)
+ odp_packet_free(rcv_pkt);
+
+ current_time = odp_time_local();
+ duration = odp_time_diff(current_time, start_time);
+ duration_ns = odp_time_to_ns(duration);
+
+ if (max_timeout_ns <= duration_ns)
+ break;
+ else if (duration_ns < min_timeout_ns)
+ ;
+ else if ((odp_tm_is_idle(odp_tm)) && (rc == 0))
+ break;
+
+ /* Busy wait here a little bit to prevent overwhelming the
+ * odp_pktin_recv logic. */
+ busy_wait(10000);
+ }
+}
+
+static void init_xmt_pkts(pkt_info_t *pkt_info)
+{
+ memset(xmt_pkts, 0, sizeof(xmt_pkts));
+ memset(xmt_pkt_descs, 0, sizeof(xmt_pkt_descs));
+ num_pkts_made = 0;
+ num_pkts_sent = 0;
+
+ free_rcvd_pkts();
+ memset(rcv_pkts, 0, sizeof(rcv_pkts));
+ memset(rcv_pkt_descs, 0, sizeof(rcv_pkt_descs));
+ num_rcv_pkts = 0;
+
+ memset(rcv_gaps, 0, sizeof(rcv_gaps));
+ rcv_gap_cnt = 0;
+ memset(pkt_info, 0, sizeof(pkt_info_t));
+ pkt_info->ip_tos = DEFAULT_TOS;
+}
+
+static int make_pkts(uint32_t num_pkts,
+ uint32_t pkt_len,
+ pkt_info_t *pkt_info)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_packet_t odp_pkt;
+ uint32_t l4_hdr_len, l3_hdr_len, vlan_hdr_len, l2_hdr_len;
+ uint32_t hdrs_len, payload_len, idx, unique_id, xmt_pkt_idx;
+
+ l4_hdr_len = pkt_info->use_tcp ? ODPH_TCPHDR_LEN : ODPH_UDPHDR_LEN;
+ l3_hdr_len = pkt_info->use_ipv6 ? ODPH_IPV6HDR_LEN : ODPH_IPV4HDR_LEN;
+ vlan_hdr_len = pkt_info->use_vlan ? ODPH_VLANHDR_LEN : 0;
+ l2_hdr_len = ODPH_ETHHDR_LEN + vlan_hdr_len;
+
+ hdrs_len = l2_hdr_len + l3_hdr_len + l4_hdr_len;
+ payload_len = pkt_len - hdrs_len;
+
+ for (idx = 0; idx < num_pkts; idx++) {
+ unique_id = cpu_unique_id++;
+ xmt_pkt_idx = num_pkts_made++;
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx];
+ xmt_pkt_desc->pkt_len = pkt_len;
+ xmt_pkt_desc->xmt_unique_id = unique_id;
+ xmt_pkt_desc->pkt_class = pkt_info->pkt_class;
+
+ odp_pkt = make_pkt(pools[0], payload_len, unique_id, pkt_info);
+ if (odp_pkt == ODP_PACKET_INVALID)
+ return -1;
+
+ odp_packet_color_set(odp_pkt, pkt_info->pkt_color);
+ odp_packet_drop_eligible_set(odp_pkt, pkt_info->drop_eligible);
+ odp_packet_shaper_len_adjust_set(odp_pkt, SHAPER_LEN_ADJ);
+
+ xmt_pkts[xmt_pkt_idx] = odp_pkt;
+ }
+
+ return 0;
+}
+
+static uint32_t send_pkts(odp_tm_queue_t tm_queue, uint32_t num_pkts)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_packet_t odp_pkt;
+ uint32_t idx, xmt_pkt_idx, pkts_sent;
+ int rc;
+
+ /* Now send the pkts as fast as we can. */
+ pkts_sent = 0;
+ for (idx = 0; idx < num_pkts; idx++) {
+ xmt_pkt_idx = num_pkts_sent;
+ odp_pkt = xmt_pkts[xmt_pkt_idx];
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx];
+
+ /* Alternate calling with odp_tm_enq and odp_tm_enq_with_cnt */
+ if ((idx & 1) == 0)
+ rc = odp_tm_enq(tm_queue, odp_pkt);
+ else
+ rc = odp_tm_enq_with_cnt(tm_queue, odp_pkt);
+
+ xmt_pkt_desc->xmt_idx = xmt_pkt_idx;
+ if (0 <= rc) {
+ xmt_pkt_desc->xmt_time = odp_time_local();
+ xmt_pkt_desc->tm_queue = tm_queue;
+ pkts_sent++;
+ } else {
+ odp_packet_free(odp_pkt);
+ xmt_pkts[xmt_pkt_idx] = ODP_PACKET_INVALID;
+ }
+
+ num_pkts_sent++;
+ }
+
+ return pkts_sent;
+}
+
+static uint32_t pkts_rcvd_in_send_order(void)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_time_t last_rcv_time, rcv_time;
+ uint32_t xmt_pkt_idx, pkts_rcvd;
+
+ pkts_rcvd = 0;
+ last_rcv_time = ODP_TIME_NULL;
+ for (xmt_pkt_idx = 0; xmt_pkt_idx < num_pkts_sent; xmt_pkt_idx++) {
+ xmt_pkt_desc = &xmt_pkt_descs[xmt_pkt_idx];
+ rcv_time = xmt_pkt_desc->rcv_time;
+ if (xmt_pkt_desc->was_rcvd != 0) {
+ if ((pkts_rcvd != 0) &&
+ (odp_time_cmp(rcv_time, last_rcv_time) < 0))
+ return 0;
+
+ pkts_rcvd++;
+ last_rcv_time = xmt_pkt_desc->rcv_time;
+ }
+ }
+
+ return pkts_rcvd;
+}
+
+static int unique_id_list_idx(uint32_t unique_id,
+ uint32_t unique_id_list[],
+ uint32_t unique_id_list_len)
+{
+ uint32_t idx;
+
+ for (idx = 0; idx < unique_id_list_len; idx++)
+ if (unique_id_list[idx] == unique_id)
+ return idx;
+
+ return -1;
+}
+
+static uint32_t pkts_rcvd_in_given_order(uint32_t unique_id_list[],
+ uint32_t unique_id_list_len,
+ uint8_t pkt_class,
+ odp_bool_t match_pkt_class,
+ odp_bool_t ignore_pkt_class)
+{
+ rcv_pkt_desc_t *rcv_pkt_desc;
+ odp_bool_t is_match;
+ uint32_t rcv_pkt_idx, pkts_in_order, pkts_out_of_order;
+ uint32_t rcv_unique_id;
+ int last_pkt_idx, pkt_idx;
+
+ pkts_in_order = 1;
+ pkts_out_of_order = 0;
+ last_pkt_idx = -1;
+ pkt_idx = -1;
+
+ for (rcv_pkt_idx = 0; rcv_pkt_idx < num_rcv_pkts; rcv_pkt_idx++) {
+ rcv_pkt_desc = &rcv_pkt_descs[rcv_pkt_idx];
+
+ if (ignore_pkt_class)
+ is_match = true;
+ else if (match_pkt_class)
+ is_match = rcv_pkt_desc->pkt_class == pkt_class;
+ else
+ is_match = rcv_pkt_desc->pkt_class != pkt_class;
+
+ if (is_match) {
+ rcv_unique_id = rcv_pkt_desc->rcv_unique_id;
+ pkt_idx = unique_id_list_idx(rcv_unique_id,
+ unique_id_list,
+ unique_id_list_len);
+ if (0 <= pkt_idx) {
+ if (0 <= last_pkt_idx) {
+ if (last_pkt_idx < pkt_idx)
+ pkts_in_order++;
+ else
+ pkts_out_of_order++;
+ }
+
+ last_pkt_idx = pkt_idx;
+ }
+ }
+ }
+
+ return pkts_in_order;
+}
+
+static inline void record_rcv_gap(odp_time_t rcv_time, odp_time_t last_rcv_time)
+{
+ odp_time_t delta_time;
+ uint64_t delta_ns;
+ uint32_t rcv_gap;
+
+ rcv_gap = 0;
+ if (odp_time_cmp(last_rcv_time, rcv_time) <= 0) {
+ delta_time = odp_time_diff(rcv_time, last_rcv_time);
+ delta_ns = odp_time_to_ns(delta_time);
+ rcv_gap = delta_ns / 1000;
+ }
+
+ /* Note that rcv_gap is in units of microseconds. */
+ rcv_gaps[rcv_gap_cnt++] = rcv_gap;
+}
+
+static int rcv_gap_cmp(const void *left_ptr, const void *right_ptr)
+{
+ uint32_t left_value, right_value;
+
+ left_value = * (const uint32_t *)left_ptr;
+ right_value = * (const uint32_t *)right_ptr;
+
+ if (left_value < right_value)
+ return -1;
+ else if (left_value == right_value)
+ return 0;
+ else
+ return 1;
+}
+
+static inline void calc_rcv_stats(rcv_stats_t *rcv_stats,
+ uint32_t initial_drop_percent,
+ uint32_t ending_drop_percent)
+{
+ uint32_t first_rcv_gap_idx, last_rcv_gap_idx, idx, rcv_gap;
+
+ /* Sort the rcv_gaps, and then drop the outlying x values before doing
+ * doing the rcv stats on the remaining */
+ qsort(&rcv_gaps[0], rcv_gap_cnt, sizeof(uint32_t), rcv_gap_cmp);
+
+ /* Next we drop the outlying values before doing doing the rcv stats
+ * on the remaining rcv_gap values. The number of initial (very low)
+ * rcv_gaps dropped and the number of ending (very high) rcv_gaps
+ * drops is based on the percentages passed in. */
+ first_rcv_gap_idx = (rcv_gap_cnt * initial_drop_percent) / 100;
+ last_rcv_gap_idx = (rcv_gap_cnt * (100 - ending_drop_percent)) / 100;
+ for (idx = first_rcv_gap_idx; idx <= last_rcv_gap_idx; idx++) {
+ rcv_gap = rcv_gaps[idx];
+ rcv_stats->min_rcv_gap = MIN(rcv_stats->min_rcv_gap, rcv_gap);
+ rcv_stats->max_rcv_gap = MAX(rcv_stats->max_rcv_gap, rcv_gap);
+ rcv_stats->total_rcv_gap += rcv_gap;
+ rcv_stats->total_rcv_gap_squared += rcv_gap * rcv_gap;
+ rcv_stats->num_samples++;
+ }
+}
+
+static int rcv_rate_stats(rcv_stats_t *rcv_stats, uint8_t pkt_class)
+{
+ xmt_pkt_desc_t *xmt_pkt_desc;
+ odp_time_t last_rcv_time, rcv_time;
+ uint32_t pkt_idx, pkts_rcvd, num;
+ uint32_t avg, variance, std_dev;
+
+ pkts_rcvd = 0;
+ last_rcv_time = ODP_TIME_NULL;
+ memset(rcv_stats, 0, sizeof(rcv_stats_t));
+ rcv_stats->min_rcv_gap = 1000000000;
+
+ for (pkt_idx = 0; pkt_idx < num_pkts_sent; pkt_idx++) {
+ xmt_pkt_desc = &xmt_pkt_descs[pkt_idx];
+ if ((xmt_pkt_desc->was_rcvd != 0) &&
+ (xmt_pkt_desc->pkt_class == pkt_class)) {
+ rcv_time = xmt_pkt_desc->rcv_time;
+ if (pkts_rcvd != 0)
+ record_rcv_gap(rcv_time, last_rcv_time);
+ pkts_rcvd++;
+ last_rcv_time = rcv_time;
+ }
+ }
+
+ if (pkts_rcvd == 0)
+ return -1;
+
+ calc_rcv_stats(rcv_stats, INITIAL_RCV_GAP_DROP, ENDING_RCV_GAP_DROP);
+ num = rcv_stats->num_samples;
+ if (num == 0)
+ return -1;
+
+ avg = rcv_stats->total_rcv_gap / num;
+ variance = (rcv_stats->total_rcv_gap_squared / num) - avg * avg;
+ std_dev = (uint32_t)sqrt((double)variance);
+
+ rcv_stats->avg_rcv_gap = avg;
+ rcv_stats->std_dev_gap = std_dev;
+ return 0;
+}
+
+static int create_tm_queue(odp_tm_t odp_tm,
+ odp_tm_node_t tm_node,
+ uint32_t node_idx,
+ tm_queue_desc_t *queue_desc,
+ uint32_t priority)
+{
+ odp_tm_queue_params_t queue_params;
+ odp_tm_queue_t tm_queue;
+ odp_tm_wred_t green_profile, yellow_profile, red_profile;
+ int rc;
+
+ odp_tm_queue_params_init(&queue_params);
+ queue_params.priority = priority;
+ if (priority == 0) {
+ green_profile = wred_profiles[node_idx][PKT_GREEN];
+ yellow_profile = wred_profiles[node_idx][PKT_YELLOW];
+ red_profile = wred_profiles[node_idx][PKT_RED];
+
+ queue_params.shaper_profile = shaper_profiles[0];
+ queue_params.threshold_profile = threshold_profiles[0];
+ queue_params.wred_profile[PKT_GREEN] = green_profile;
+ queue_params.wred_profile[PKT_YELLOW] = yellow_profile;
+ queue_params.wred_profile[PKT_RED] = red_profile;
+ }
+
+ tm_queue = odp_tm_queue_create(odp_tm, &queue_params);
+ if (tm_queue == ODP_TM_INVALID) {
+ LOG_ERR("odp_tm_queue_create() failed\n");
+ return -1;
+ }
+
+ queue_desc->tm_queues[priority] = tm_queue;
+ rc = odp_tm_queue_connect(tm_queue, tm_node);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_connect() failed\n");
+ odp_tm_queue_destroy(tm_queue);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int destroy_tm_queue(odp_tm_queue_t tm_queue)
+{
+ odp_tm_queue_disconnect(tm_queue);
+ return odp_tm_queue_destroy(tm_queue);
+}
+
+static tm_node_desc_t *create_tm_node(odp_tm_t odp_tm,
+ uint32_t level,
+ uint32_t num_levels,
+ uint32_t node_idx,
+ tm_node_desc_t *parent_node_desc)
+{
+ odp_tm_node_params_t node_params;
+ tm_queue_desc_t *queue_desc;
+ tm_node_desc_t *node_desc;
+ odp_tm_wred_t green_profile, yellow_profile, red_profile;
+ odp_tm_node_t tm_node, parent_node;
+ uint32_t node_desc_size, queue_desc_size, priority;
+ char node_name[TM_NAME_LEN];
+ int rc;
+
+ odp_tm_node_params_init(&node_params);
+ node_params.shaper_profile = ODP_TM_INVALID;
+ node_params.threshold_profile = ODP_TM_INVALID;
+ node_params.wred_profile[PKT_GREEN] = ODP_TM_INVALID;
+ node_params.wred_profile[PKT_YELLOW] = ODP_TM_INVALID;
+ node_params.wred_profile[PKT_RED] = ODP_TM_INVALID;
+ if (node_idx == 0) {
+ node_params.shaper_profile = shaper_profiles[0];
+ node_params.threshold_profile = threshold_profiles[0];
+ if (level == num_levels) {
+ green_profile = wred_profiles[node_idx][PKT_GREEN];
+ yellow_profile = wred_profiles[node_idx][PKT_YELLOW];
+ red_profile = wred_profiles[node_idx][PKT_RED];
+
+ node_params.wred_profile[PKT_GREEN] = green_profile;
+ node_params.wred_profile[PKT_YELLOW] = yellow_profile;
+ node_params.wred_profile[PKT_RED] = red_profile;
+ }
+ }
+
+ node_params.max_fanin = FANIN_RATIO;
+ node_params.level = level;
+ if (parent_node_desc == NULL)
+ snprintf(node_name, sizeof(node_name), "node_%u",
+ node_idx + 1);
+ else
+ snprintf(node_name, sizeof(node_name), "%s_%u",
+ parent_node_desc->node_name, node_idx + 1);
+
+ tm_node = odp_tm_node_create(odp_tm, node_name, &node_params);
+ if (tm_node == ODP_TM_INVALID) {
+ LOG_ERR("odp_tm_node_create() failed @ level=%u\n",
+ level);
+ return NULL;
+ }
+
+ /* Now connect this node to the lower level "parent" node. */
+ if (level == 0 || !parent_node_desc)
+ parent_node = ODP_TM_ROOT;
+ else
+ parent_node = parent_node_desc->node;
+
+ rc = odp_tm_node_connect(tm_node, parent_node);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_connect() failed @ level=%u\n",
+ level);
+ odp_tm_node_destroy(tm_node);
+ return NULL;
+ }
+
+ node_desc_size = sizeof(tm_node_desc_t) +
+ sizeof(odp_tm_node_t) * FANIN_RATIO;
+ node_desc = malloc(node_desc_size);
+ memset(node_desc, 0, node_desc_size);
+ node_desc->level = level;
+ node_desc->node_idx = node_idx;
+ node_desc->num_children = FANIN_RATIO;
+ node_desc->node = tm_node;
+ node_desc->parent_node = parent_node;
+ node_desc->node_name = strdup(node_name);
+
+ /* Finally if the level is the highest then make fanin_ratio tm_queues
+ * feeding this node. */
+ if (level < (num_levels - 1))
+ return node_desc;
+
+ node_desc->num_children = 0;
+ queue_desc_size = sizeof(tm_queue_desc_t) +
+ sizeof(odp_tm_queue_t) * NUM_QUEUES_PER_NODE;
+ queue_desc = malloc(queue_desc_size);
+ memset(queue_desc, 0, queue_desc_size);
+ queue_desc->num_queues = NUM_QUEUES_PER_NODE;
+ node_desc->queue_desc = queue_desc;
+
+ for (priority = 0; priority < NUM_QUEUES_PER_NODE; priority++) {
+ rc = create_tm_queue(odp_tm, tm_node, node_idx, queue_desc,
+ priority);
+ if (rc != 0) {
+ LOG_ERR("create_tm_queue() failed @ level=%u\n",
+ level);
+ while (priority > 0)
+ (void)destroy_tm_queue
+ (queue_desc->tm_queues[--priority]);
+ free(queue_desc);
+ free(node_desc);
+ return NULL;
+ }
+ }
+
+ return node_desc;
+}
+
+static tm_node_desc_t *create_tm_subtree(odp_tm_t odp_tm,
+ uint32_t level,
+ uint32_t num_levels,
+ uint32_t node_idx,
+ tm_node_desc_t *parent_node)
+{
+ tm_node_desc_t *node_desc, *child_desc;
+ uint32_t child_idx;
+
+ node_desc = create_tm_node(odp_tm, level, num_levels,
+ node_idx, parent_node);
+ if (node_desc == NULL) {
+ LOG_ERR("create_tm_node() failed @ level=%u\n", level);
+ return NULL;
+ }
+
+ if (level < (num_levels - 1)) {
+ for (child_idx = 0; child_idx < FANIN_RATIO; child_idx++) {
+ child_desc = create_tm_subtree(odp_tm, level + 1,
+ num_levels, child_idx,
+ node_desc);
+ if (child_desc == NULL) {
+ LOG_ERR("create_tm_subtree failed level=%u\n",
+ level);
+
+ return NULL;
+ }
+
+ node_desc->children[child_idx] = child_desc;
+ }
+ }
+
+ return node_desc;
+}
+
+static odp_tm_node_t find_tm_node(uint8_t tm_system_idx, const char *node_name)
+{
+ return odp_tm_node_lookup(odp_tm_systems[tm_system_idx], node_name);
+}
+
+static tm_node_desc_t *find_node_desc(uint8_t tm_system_idx,
+ const char *node_name)
+{
+ tm_node_desc_t *node_desc;
+ uint32_t child_num;
+ char *name_ptr;
+
+ /* Assume node_name is "node_" followed by a sequence of integers
+ * separated by underscores, where each integer is the child number to
+ * get to the next level node. */
+ node_desc = root_node_descs[tm_system_idx];
+ name_ptr = strchr(node_name, '_');
+ if (name_ptr == NULL)
+ return NULL;
+
+ /* Skip over the first integer */
+ name_ptr++;
+ name_ptr = strchr(name_ptr, '_');
+ if (name_ptr != NULL)
+ name_ptr++;
+
+ while (node_desc != NULL) {
+ if (strcmp(node_desc->node_name, node_name) == 0)
+ return node_desc;
+
+ if (name_ptr == NULL)
+ return NULL;
+
+ child_num = atoi(name_ptr);
+ if (node_desc->num_children < child_num)
+ return NULL;
+
+ node_desc = node_desc->children[child_num - 1];
+ name_ptr = strchr(name_ptr, '_');
+ if (name_ptr != NULL)
+ name_ptr++;
+ }
+
+ return NULL;
+}
+
+static odp_tm_queue_t find_tm_queue(uint8_t tm_system_idx,
+ const char *node_name,
+ uint8_t priority)
+{
+ tm_queue_desc_t *queue_desc;
+ tm_node_desc_t *node_desc;
+
+ node_desc = find_node_desc(tm_system_idx, node_name);
+ if (node_desc == NULL)
+ return ODP_TM_INVALID;
+
+ queue_desc = node_desc->queue_desc;
+ if (queue_desc == NULL)
+ return ODP_TM_INVALID;
+
+ return queue_desc->tm_queues[priority];
+}
+
+static uint32_t find_child_queues(uint8_t tm_system_idx,
+ tm_node_desc_t *node_desc,
+ uint8_t priority,
+ odp_tm_queue_t tm_queues[],
+ uint32_t max_queues)
+{
+ tm_queue_desc_t *queue_desc;
+ tm_node_desc_t *child_node_desc;
+ uint32_t num_children, num_queues, child_idx, rem_queues;
+
+ if (max_queues == 0)
+ return 0;
+
+ queue_desc = node_desc->queue_desc;
+ if (queue_desc != NULL) {
+ tm_queues[0] = queue_desc->tm_queues[priority];
+ return 1;
+ }
+
+ num_children = node_desc->num_children;
+ num_queues = 0;
+
+ for (child_idx = 0; child_idx < num_children; child_idx++) {
+ child_node_desc = node_desc->children[child_idx];
+ rem_queues = max_queues - num_queues;
+ num_queues += find_child_queues(tm_system_idx, child_node_desc,
+ priority,
+ &tm_queues[num_queues],
+ rem_queues);
+ if (num_queues == max_queues)
+ break;
+ }
+
+ return num_queues;
+}
+
+static int create_tm_system(void)
+{
+ odp_tm_level_requirements_t *per_level;
+ odp_tm_requirements_t requirements;
+ odp_tm_egress_t egress;
+ odp_packet_color_t color;
+ tm_node_desc_t *root_node_desc;
+ uint32_t level, max_nodes[ODP_TM_MAX_LEVELS];
+ odp_tm_t odp_tm, found_odp_tm;
+ char tm_name[TM_NAME_LEN];
+ int rc;
+
+ odp_tm_requirements_init(&requirements);
+ odp_tm_egress_init(&egress);
+
+ requirements.max_tm_queues = NUM_TM_QUEUES + 1;
+ requirements.num_levels = NUM_LEVELS;
+ requirements.tm_queue_shaper_needed = true;
+ requirements.tm_queue_wred_needed = true;
+ requirements.tm_queue_dual_slope_needed = true;
+ requirements.vlan_marking_needed = false;
+ requirements.ecn_marking_needed = true;
+ requirements.drop_prec_marking_needed = true;
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
+ requirements.marking_colors_needed[color] = true;
+
+ /* Set the max_num_tm_nodes to be double the expected number of nodes
+ * at that level */
+ memset(max_nodes, 0, sizeof(max_nodes));
+ max_nodes[0] = 2 * NUM_LEVEL0_TM_NODES;
+ max_nodes[1] = 2 * NUM_LEVEL1_TM_NODES;
+ max_nodes[2] = 2 * NUM_LEVEL2_TM_NODES;
+ max_nodes[3] = 2 * NUM_LEVEL2_TM_NODES * FANIN_RATIO;
+
+ for (level = 0; level < NUM_LEVELS; level++) {
+ per_level = &requirements.per_level[level];
+ per_level->max_priority = NUM_PRIORITIES - 1;
+ per_level->max_num_tm_nodes = max_nodes[level];
+ per_level->max_fanin_per_node = FANIN_RATIO;
+ per_level->tm_node_shaper_needed = true;
+ per_level->tm_node_wred_needed = false;
+ per_level->tm_node_dual_slope_needed = false;
+ per_level->fair_queuing_needed = true;
+ per_level->weights_needed = true;
+ }
+
+ egress.egress_kind = ODP_TM_EGRESS_PKT_IO;
+ egress.pktout = xmt_pktout;
+
+ snprintf(tm_name, sizeof(tm_name), "TM_system_%u", num_odp_tm_systems);
+ odp_tm = odp_tm_create(tm_name, &requirements, &egress);
+ if (odp_tm == ODP_TM_INVALID) {
+ LOG_ERR("odp_tm_create() failed\n");
+ return -1;
+ }
+
+ odp_tm_systems[num_odp_tm_systems] = odp_tm;
+
+ root_node_desc = create_tm_subtree(odp_tm, 0, NUM_LEVELS, 0, NULL);
+ root_node_descs[num_odp_tm_systems] = root_node_desc;
+ if (root_node_desc == NULL) {
+ LOG_ERR("create_tm_subtree() failed\n");
+ return -1;
+ }
+
+ num_odp_tm_systems++;
+
+ /* Test odp_tm_capability and odp_tm_find. */
+ rc = odp_tm_capability(odp_tm, &tm_capabilities);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_capability() failed\n");
+ return -1;
+ }
+
+ found_odp_tm = odp_tm_find(tm_name, &requirements, &egress);
+ if ((found_odp_tm == ODP_TM_INVALID) || (found_odp_tm != odp_tm)) {
+ LOG_ERR("odp_tm_find() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dump_tm_subtree(tm_node_desc_t *node_desc)
+{
+ odp_tm_node_info_t node_info;
+ uint32_t idx, num_queues, child_idx;
+ int rc;
+
+ for (idx = 0; idx < node_desc->level; idx++)
+ printf(" ");
+
+ rc = odp_tm_node_info(node_desc->node, &node_info);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
+ node_desc->node);
+ }
+
+ num_queues = 0;
+ if (node_desc->queue_desc != NULL)
+ num_queues = node_desc->queue_desc->num_queues;
+
+ printf("node_desc=%p name='%s' tm_node=0x%" PRIX64 " idx=%u level=%u "
+ "parent=0x%" PRIX64 " children=%u queues=%u queue_fanin=%u "
+ "node_fanin=%u\n",
+ node_desc, node_desc->node_name, node_desc->node,
+ node_desc->node_idx, node_desc->level, node_desc->parent_node,
+ node_desc->num_children, num_queues, node_info.tm_queue_fanin,
+ node_info.tm_node_fanin);
+
+ for (child_idx = 0; child_idx < node_desc->num_children; child_idx++)
+ dump_tm_subtree(node_desc->children[child_idx]);
+}
+
+static void dump_tm_tree(uint32_t tm_idx)
+{
+ tm_node_desc_t *root_node_desc;
+
+ if (!TM_DEBUG)
+ return;
+
+ root_node_desc = root_node_descs[tm_idx];
+ dump_tm_subtree(root_node_desc);
+}
+
+static int unconfig_tm_queue_profiles(odp_tm_queue_t tm_queue)
+{
+ odp_tm_queue_info_t queue_info;
+ odp_tm_wred_t wred_profile;
+ uint32_t color;
+ int rc;
+
+ rc = odp_tm_queue_info(tm_queue, &queue_info);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_info failed code=%d\n", rc);
+ return rc;
+ }
+
+ if (queue_info.shaper_profile != ODP_TM_INVALID) {
+ rc = odp_tm_queue_shaper_config(tm_queue, ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_shaper_config failed code=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (queue_info.threshold_profile != ODP_TM_INVALID) {
+ rc = odp_tm_queue_threshold_config(tm_queue, ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_threshold_config failed "
+ "code=%d\n", rc);
+ return rc;
+ }
+ }
+
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_profile = queue_info.wred_profile[color];
+ if (wred_profile != ODP_TM_INVALID) {
+ rc = odp_tm_queue_wred_config(tm_queue, color,
+ ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_wred_config failed "
+ "color=%u code=%d\n", color, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_tm_queues(tm_queue_desc_t *queue_desc)
+{
+ odp_tm_queue_t tm_queue;
+ uint32_t num_queues, queue_idx;
+ int rc;
+
+ num_queues = queue_desc->num_queues;
+ for (queue_idx = 0; queue_idx < num_queues; queue_idx++) {
+ tm_queue = queue_desc->tm_queues[queue_idx];
+ if (tm_queue != ODP_TM_INVALID) {
+ rc = odp_tm_queue_disconnect(tm_queue);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_disconnect failed "
+ "idx=%u code=%d\n", queue_idx, rc);
+ return rc;
+ }
+
+ rc = unconfig_tm_queue_profiles(tm_queue);
+ if (rc != 0) {
+ LOG_ERR("unconfig_tm_queue_profiles failed "
+ "idx=%u code=%d\n", queue_idx, rc);
+ return rc;
+ }
+
+ rc = odp_tm_queue_destroy(tm_queue);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_queue_destroy failed "
+ "idx=%u code=%d\n", queue_idx, rc);
+ return rc;
+ }
+ }
+ }
+
+ free(queue_desc);
+ return 0;
+}
+
+static int unconfig_tm_node_profiles(odp_tm_node_t tm_node)
+{
+ odp_tm_node_info_t node_info;
+ odp_tm_wred_t wred_profile;
+ uint32_t color;
+ int rc;
+
+ rc = odp_tm_node_info(tm_node, &node_info);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_info failed code=%d\n", rc);
+ return rc;
+ }
+
+ if (node_info.shaper_profile != ODP_TM_INVALID) {
+ rc = odp_tm_node_shaper_config(tm_node, ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_shaper_config failed code=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (node_info.threshold_profile != ODP_TM_INVALID) {
+ rc = odp_tm_node_threshold_config(tm_node, ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_threshold_config failed "
+ "code=%d\n", rc);
+ return rc;
+ }
+ }
+
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_profile = node_info.wred_profile[color];
+ if (wred_profile != ODP_TM_INVALID) {
+ rc = odp_tm_node_wred_config(tm_node, color,
+ ODP_TM_INVALID);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_wred_config failed "
+ "color=%u code=%d\n", color, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_tm_subtree(tm_node_desc_t *node_desc)
+{
+ tm_queue_desc_t *queue_desc;
+ tm_node_desc_t *child_desc;
+ odp_tm_node_t tm_node;
+ uint32_t num_children, child_num;
+ int rc;
+
+ num_children = node_desc->num_children;
+ for (child_num = 0; child_num < num_children; child_num++) {
+ child_desc = node_desc->children[child_num];
+ if (child_desc != NULL) {
+ rc = destroy_tm_subtree(child_desc);
+ if (rc != 0) {
+ LOG_ERR("destroy_tm_subtree failed "
+ "child_num=%u code=%d\n",
+ child_num, rc);
+ return rc;
+ }
+ }
+ }
+
+ queue_desc = node_desc->queue_desc;
+ if (queue_desc != NULL) {
+ rc = destroy_tm_queues(queue_desc);
+ if (rc != 0) {
+ LOG_ERR("destroy_tm_queues failed code=%d\n", rc);
+ return rc;
+ }
+ }
+
+ tm_node = node_desc->node;
+ rc = odp_tm_node_disconnect(tm_node);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_disconnect failed code=%d\n", rc);
+ return rc;
+ }
+
+ rc = unconfig_tm_node_profiles(tm_node);
+ if (rc != 0) {
+ LOG_ERR("unconfig_tm_node_profiles failed code=%d\n", rc);
+ return rc;
+ }
+
+ rc = odp_tm_node_destroy(tm_node);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_destroy failed code=%d\n", rc);
+ return rc;
+ }
+
+ if (node_desc->node_name)
+ free(node_desc->node_name);
+
+ free(node_desc);
+ return 0;
+}
+
+static int destroy_all_shaper_profiles(void)
+{
+ odp_tm_shaper_t shaper_profile;
+ uint32_t idx;
+ int rc;
+
+ for (idx = 0; idx < NUM_SHAPER_PROFILES; idx++) {
+ shaper_profile = shaper_profiles[idx];
+ if (shaper_profile != ODP_TM_INVALID) {
+ rc = odp_tm_shaper_destroy(shaper_profile);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_sched_destroy failed "
+ "idx=%u code=%d\n", idx, rc);
+ return rc;
+ }
+ shaper_profiles[idx] = ODP_TM_INVALID;
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_all_sched_profiles(void)
+{
+ odp_tm_sched_t sched_profile;
+ uint32_t idx;
+ int rc;
+
+ for (idx = 0; idx < NUM_SCHED_PROFILES; idx++) {
+ sched_profile = sched_profiles[idx];
+ if (sched_profile != ODP_TM_INVALID) {
+ rc = odp_tm_sched_destroy(sched_profile);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_sched_destroy failed "
+ "idx=%u code=%d\n", idx, rc);
+ return rc;
+ }
+ sched_profiles[idx] = ODP_TM_INVALID;
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_all_threshold_profiles(void)
+{
+ odp_tm_threshold_t threshold_profile;
+ uint32_t idx;
+ int rc;
+
+ for (idx = 0; idx < NUM_THRESHOLD_PROFILES; idx++) {
+ threshold_profile = threshold_profiles[idx];
+ if (threshold_profile != ODP_TM_INVALID) {
+ rc = odp_tm_threshold_destroy(threshold_profile);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_threshold_destroy failed "
+ "idx=%u code=%d\n", idx, rc);
+ return rc;
+ }
+ threshold_profiles[idx] = ODP_TM_INVALID;
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_all_wred_profiles(void)
+{
+ odp_tm_wred_t wred_profile;
+ uint32_t idx, color;
+ int rc;
+
+ for (idx = 0; idx < NUM_WRED_PROFILES; idx++) {
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ wred_profile = wred_profiles[idx][color];
+ if (wred_profile != ODP_TM_INVALID) {
+ rc = odp_tm_wred_destroy(wred_profile);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_wred_destroy failed "
+ "idx=%u color=%u code=%d\n",
+ idx, color, rc);
+ return rc;
+ }
+ wred_profiles[idx][color] = ODP_TM_INVALID;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int destroy_all_profiles(void)
+{
+ int rc;
+
+ rc = destroy_all_shaper_profiles();
+ if (rc != 0) {
+ LOG_ERR("destroy_all_shaper_profiles failed code=%d\n", rc);
+ return rc;
+ }
+
+ rc = destroy_all_sched_profiles();
+ if (rc != 0) {
+ LOG_ERR("destroy_all_sched_profiles failed code=%d\n", rc);
+ return rc;
+ }
+
+ rc = destroy_all_threshold_profiles();
+ if (rc != 0) {
+ LOG_ERR("destroy_all_threshold_profiles failed code=%d\n", rc);
+ return rc;
+ }
+
+ rc = destroy_all_wred_profiles();
+ if (rc != 0) {
+ LOG_ERR("destroy_all_wred_profiles failed code=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int destroy_tm_systems(void)
+{
+ uint32_t idx;
+
+ /* Close/free the TM systems. */
+ for (idx = 0; idx < num_odp_tm_systems; idx++) {
+ if (destroy_tm_subtree(root_node_descs[idx]) != 0)
+ return -1;
+
+ if (odp_tm_destroy(odp_tm_systems[idx]) != 0)
+ return -1;
+ }
+
+ /* Close/free the TM profiles. */
+ if (destroy_all_profiles() != 0)
+ return -1;
+
+ return 0;
+}
+
+int traffic_mngr_suite_init(void)
+{
+ uint32_t payload_len, copy_len;
+
+ /* Initialize some global variables. */
+ num_pkts_made = 0;
+ num_pkts_sent = 0;
+ num_rcv_pkts = 0;
+ cpu_unique_id = 1;
+ cpu_tcp_seq_num = DEFAULT_TCP_SEQ_NUM;
+ memset(xmt_pkts, 0, sizeof(xmt_pkts));
+ memset(rcv_pkts, 0, sizeof(rcv_pkts));
+
+ payload_len = 0;
+ while (payload_len < MAX_PAYLOAD) {
+ copy_len = MIN(MAX_PAYLOAD - payload_len, sizeof(ALPHABET));
+ memcpy(&payload_data[payload_len], ALPHABET, copy_len);
+ payload_len += copy_len;
+ }
+
+ /* Next open a single or pair of interfaces. This should be the same
+ * logic as in the pktio_suite_init() function in the
+ * test/validation/pktio.c file. */
+ iface_name[0] = getenv("ODP_PKTIO_IF0");
+ iface_name[1] = getenv("ODP_PKTIO_IF1");
+ num_ifaces = 1;
+
+ if (!iface_name[0]) {
+ printf("No interfaces specified, using default \"loop\".\n");
+ iface_name[0] = "loop";
+ } else if (!iface_name[1]) {
+ printf("Using loopback interface: %s\n", iface_name[0]);
+ } else {
+ num_ifaces = 2;
+ printf("Using paired interfaces: %s %s\n",
+ iface_name[0], iface_name[1]);
+ }
+
+ if (open_pktios() != 0)
+ return -1;
+
+ return 0;
+}
+
+int traffic_mngr_suite_term(void)
+{
+ uint32_t iface;
+
+ /* Close the pktios and associated packet pools. */
+ free_rcvd_pkts();
+ for (iface = 0; iface < num_ifaces; iface++) {
+ if (odp_pktio_stop(pktios[iface]) != 0)
+ return -1;
+
+ if (odp_pktio_close(pktios[iface]) != 0)
+ return -1;
+
+ if (odp_pool_destroy(pools[iface]) != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void check_shaper_profile(char *shaper_name, uint32_t shaper_idx)
+{
+ odp_tm_shaper_params_t shaper_params;
+ odp_tm_shaper_t profile;
+
+ profile = odp_tm_shaper_lookup(shaper_name);
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == shaper_profiles[shaper_idx - 1]);
+ if (profile != shaper_profiles[shaper_idx - 1])
+ return;
+
+ odp_tm_shaper_params_read(profile, &shaper_params);
+ CU_ASSERT(approx_eq64(shaper_params.commit_bps,
+ shaper_idx * MIN_COMMIT_BW));
+ CU_ASSERT(approx_eq64(shaper_params.peak_bps,
+ shaper_idx * MIN_PEAK_BW));
+ CU_ASSERT(approx_eq32(shaper_params.commit_burst,
+ shaper_idx * MIN_COMMIT_BURST));
+ CU_ASSERT(approx_eq32(shaper_params.peak_burst,
+ shaper_idx * MIN_PEAK_BURST));
+
+ CU_ASSERT(shaper_params.shaper_len_adjust == SHAPER_LEN_ADJ);
+ CU_ASSERT(shaper_params.dual_rate == 0);
+}
+
+void traffic_mngr_test_shaper_profile(void)
+{
+ odp_tm_shaper_params_t shaper_params;
+ odp_tm_shaper_t profile;
+ uint32_t idx, shaper_idx, i;
+ char shaper_name[TM_NAME_LEN];
+
+ odp_tm_shaper_params_init(&shaper_params);
+ shaper_params.shaper_len_adjust = SHAPER_LEN_ADJ;
+ shaper_params.dual_rate = 0;
+
+ for (idx = 1; idx <= NUM_SHAPER_TEST_PROFILES; idx++) {
+ snprintf(shaper_name, sizeof(shaper_name),
+ "shaper_profile_%u", idx);
+ shaper_params.commit_bps = idx * MIN_COMMIT_BW;
+ shaper_params.peak_bps = idx * MIN_PEAK_BW;
+ shaper_params.commit_burst = idx * MIN_COMMIT_BURST;
+ shaper_params.peak_burst = idx * MIN_PEAK_BURST;
+
+ profile = odp_tm_shaper_create(shaper_name, &shaper_params);
+ CU_ASSERT_FATAL(profile != ODP_TM_INVALID);
+
+ /* Make sure profile handle is unique */
+ for (i = 1; i < idx - 1; i++)
+ CU_ASSERT(profile != shaper_profiles[i - 1]);
+
+ shaper_profiles[idx - 1] = profile;
+ num_shaper_profiles++;
+ }
+
+ /* Now test odp_tm_shaper_lookup */
+ for (idx = 1; idx <= NUM_SHAPER_TEST_PROFILES; idx++) {
+ /* The following equation is designed is somewhat randomize
+ * the lookup of the profiles to catch any implementations
+ *taking shortcuts. */
+ shaper_idx = ((3 + 7 * idx) % NUM_SHAPER_TEST_PROFILES) + 1;
+ snprintf(shaper_name, sizeof(shaper_name),
+ "shaper_profile_%u", shaper_idx);
+
+ check_shaper_profile(shaper_name, shaper_idx);
+ }
+}
+
+static void check_sched_profile(char *sched_name, uint32_t sched_idx)
+{
+ odp_tm_sched_params_t sched_params;
+ odp_tm_sched_t profile;
+ uint32_t priority;
+
+ profile = odp_tm_sched_lookup(sched_name);
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == sched_profiles[sched_idx - 1]);
+ if (profile != sched_profiles[sched_idx - 1])
+ return;
+
+ odp_tm_sched_params_read(profile, &sched_params);
+ for (priority = 0; priority < NUM_PRIORITIES; priority++) {
+ CU_ASSERT(sched_params.sched_modes[priority] ==
+ ODP_TM_BYTE_BASED_WEIGHTS);
+ CU_ASSERT(approx_eq32(sched_params.sched_weights[priority],
+ 8 + sched_idx + priority));
+ }
+}
+
+void traffic_mngr_test_sched_profile(void)
+{
+ odp_tm_sched_params_t sched_params;
+ odp_tm_sched_t profile;
+ uint32_t idx, priority, sched_idx, i;
+ char sched_name[TM_NAME_LEN];
+
+ odp_tm_sched_params_init(&sched_params);
+
+ for (idx = 1; idx <= NUM_SCHED_TEST_PROFILES; idx++) {
+ snprintf(sched_name, sizeof(sched_name),
+ "sched_profile_%u", idx);
+ for (priority = 0; priority < 16; priority++) {
+ sched_params.sched_modes[priority] =
+ ODP_TM_BYTE_BASED_WEIGHTS;
+ sched_params.sched_weights[priority] = 8 + idx +
+ priority;
+ }
+
+ profile = odp_tm_sched_create(sched_name, &sched_params);
+ CU_ASSERT_FATAL(profile != ODP_TM_INVALID);
+
+ /* Make sure profile handle is unique */
+ for (i = 1; i < idx - 1; i++)
+ CU_ASSERT(profile != sched_profiles[i - 1]);
+
+ sched_profiles[idx - 1] = profile;
+ num_sched_profiles++;
+ }
+
+ /* Now test odp_tm_sched_lookup */
+ for (idx = 1; idx <= NUM_SCHED_TEST_PROFILES; idx++) {
+ /* The following equation is designed is somewhat randomize
+ * the lookup of the profiles to catch any implementations
+ * taking shortcuts. */
+ sched_idx = ((3 + 7 * idx) % NUM_SCHED_TEST_PROFILES) + 1;
+ snprintf(sched_name, sizeof(sched_name), "sched_profile_%u",
+ sched_idx);
+ check_sched_profile(sched_name, sched_idx);
+ }
+}
+
+static void check_threshold_profile(char *threshold_name,
+ uint32_t threshold_idx)
+{
+ odp_tm_threshold_params_t threshold_params;
+ odp_tm_threshold_t profile;
+
+ profile = odp_tm_thresholds_lookup(threshold_name);
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == threshold_profiles[threshold_idx - 1]);
+
+ if (profile == threshold_profiles[threshold_idx - 1])
+ return;
+
+ odp_tm_thresholds_params_read(profile, &threshold_params);
+ CU_ASSERT(threshold_params.max_pkts ==
+ threshold_idx * MIN_PKT_THRESHOLD);
+ CU_ASSERT(threshold_params.max_bytes ==
+ threshold_idx * MIN_BYTE_THRESHOLD);
+ CU_ASSERT(threshold_params.enable_max_pkts == 1);
+ CU_ASSERT(threshold_params.enable_max_bytes == 1);
+}
+
+void traffic_mngr_test_threshold_profile(void)
+{
+ odp_tm_threshold_params_t threshold_params;
+ odp_tm_threshold_t profile;
+ uint32_t idx, threshold_idx, i;
+ char threshold_name[TM_NAME_LEN];
+
+ odp_tm_threshold_params_init(&threshold_params);
+ threshold_params.enable_max_pkts = 1;
+ threshold_params.enable_max_bytes = 1;
+
+ for (idx = 1; idx <= NUM_THRESH_TEST_PROFILES; idx++) {
+ snprintf(threshold_name, sizeof(threshold_name),
+ "threshold_profile_%u", idx);
+ threshold_params.max_pkts = idx * MIN_PKT_THRESHOLD;
+ threshold_params.max_bytes = idx * MIN_BYTE_THRESHOLD;
+
+ profile = odp_tm_threshold_create(threshold_name,
+ &threshold_params);
+ CU_ASSERT_FATAL(profile != ODP_TM_INVALID);
+
+ /* Make sure profile handle is unique */
+ for (i = 1; i < idx - 1; i++)
+ CU_ASSERT(profile != threshold_profiles[i - 1]);
+
+ threshold_profiles[idx - 1] = profile;
+ num_threshold_profiles++;
+ }
+
+ /* Now test odp_tm_threshold_lookup */
+ for (idx = 1; idx <= NUM_THRESH_TEST_PROFILES; idx++) {
+ /* The following equation is designed is somewhat randomize
+ * the lookup of the profiles to catch any implementations
+ * taking shortcuts. */
+ threshold_idx = ((3 + 7 * idx) % NUM_THRESH_TEST_PROFILES) + 1;
+ snprintf(threshold_name, sizeof(threshold_name),
+ "threshold_profile_%u", threshold_idx);
+ check_threshold_profile(threshold_name, threshold_idx);
+ }
+}
+
+static void check_wred_profile(char *wred_name,
+ uint32_t wred_idx,
+ uint32_t color)
+{
+ odp_tm_wred_params_t wred_params;
+ odp_tm_wred_t profile;
+
+ profile = odp_tm_wred_lookup(wred_name);
+ CU_ASSERT(profile != ODP_TM_INVALID);
+ CU_ASSERT(profile == wred_profiles[wred_idx - 1][color]);
+ if (profile != wred_profiles[wred_idx - 1][color])
+ return;
+
+ odp_tm_wred_params_read(profile, &wred_params);
+ CU_ASSERT(wred_params.min_threshold == wred_idx * MIN_WRED_THRESH);
+ CU_ASSERT(wred_params.med_threshold == wred_idx * MED_WRED_THRESH);
+ CU_ASSERT(wred_params.med_drop_prob == wred_idx * MED_DROP_PROB);
+ CU_ASSERT(wred_params.max_drop_prob == wred_idx * MAX_DROP_PROB);
+
+ CU_ASSERT(wred_params.enable_wred == 1);
+ CU_ASSERT(wred_params.use_byte_fullness == 0);
+}
+
+void traffic_mngr_test_wred_profile(void)
+{
+ odp_tm_wred_params_t wred_params;
+ odp_tm_wred_t profile;
+ uint32_t idx, color, wred_idx, i, c;
+ char wred_name[TM_NAME_LEN];
+
+ odp_tm_wred_params_init(&wred_params);
+ wred_params.enable_wred = 1;
+ wred_params.use_byte_fullness = 0;
+
+ for (idx = 1; idx <= NUM_WRED_TEST_PROFILES; idx++) {
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ snprintf(wred_name, sizeof(wred_name),
+ "wred_profile_%u_%u", idx, color);
+ wred_params.min_threshold = idx * MIN_WRED_THRESH;
+ wred_params.med_threshold = idx * MED_WRED_THRESH;
+ wred_params.med_drop_prob = idx * MED_DROP_PROB;
+ wred_params.max_drop_prob = idx * MAX_DROP_PROB;
+
+ profile = odp_tm_wred_create(wred_name, &wred_params);
+ CU_ASSERT_FATAL(profile != ODP_TM_INVALID);
+
+ /* Make sure profile handle is unique */
+ for (i = 1; i < idx - 1; i++)
+ for (c = 0; c < ODP_NUM_PKT_COLORS; c++)
+ CU_ASSERT(profile !=
+ wred_profiles[i - 1][c]);
+
+ wred_profiles[idx - 1][color] = profile;
+ }
+
+ num_wred_profiles++;
+ }
+
+ /* Now test odp_tm_wred_lookup */
+ for (idx = 1; idx <= NUM_WRED_TEST_PROFILES; idx++) {
+ /* The following equation is designed is somewhat randomize
+ * the lookup of the profiles to catch any implementations
+ * taking shortcuts. */
+ wred_idx = ((3 + 7 * idx) % NUM_WRED_TEST_PROFILES) + 1;
+
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ snprintf(wred_name, sizeof(wred_name),
+ "wred_profile_%u_%u", wred_idx, color);
+ check_wred_profile(wred_name, wred_idx, color);
+ }
+ }
+}
+
+static int set_shaper(const char *node_name,
+ const char *shaper_name,
+ const uint64_t commit_bps,
+ const uint64_t commit_burst_in_bits)
+{
+ odp_tm_shaper_params_t shaper_params;
+ odp_tm_shaper_t shaper_profile;
+ odp_tm_node_t tm_node;
+
+ tm_node = find_tm_node(0, node_name);
+ if (tm_node == ODP_TM_INVALID) {
+ LOG_ERR("find_tm_node(%s) failed\n", node_name);
+ CU_ASSERT_FATAL(tm_node != ODP_TM_INVALID);
+ return -1;
+ }
+
+ odp_tm_shaper_params_init(&shaper_params);
+ shaper_params.commit_bps = commit_bps;
+ shaper_params.peak_bps = 0;
+ shaper_params.commit_burst = commit_burst_in_bits;
+ shaper_params.peak_burst = 0;
+ shaper_params.shaper_len_adjust = 0;
+ shaper_params.dual_rate = 0;
+
+ /* First see if a shaper profile already exists with this name, in
+ * which case we use that profile, else create a new one. */
+ shaper_profile = odp_tm_shaper_lookup(shaper_name);
+ if (shaper_profile != ODP_TM_INVALID) {
+ odp_tm_shaper_params_update(shaper_profile, &shaper_params);
+ } else {
+ shaper_profile = odp_tm_shaper_create(shaper_name,
+ &shaper_params);
+ shaper_profiles[num_shaper_profiles] = shaper_profile;
+ num_shaper_profiles++;
+ }
+
+ return odp_tm_node_shaper_config(tm_node, shaper_profile);
+}
+
+int traffic_mngr_check_shaper(void)
+{
+ odp_cpumask_t cpumask;
+ int cpucount = odp_cpumask_all_available(&cpumask);
+
+ if (cpucount < 2) {
+ LOG_DBG("\nSkipping shaper test because cpucount = %d "
+ "is less then min number 2 required\n", cpucount);
+ LOG_DBG("Rerun with more cpu resources\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+int traffic_mngr_check_scheduler(void)
+{
+ odp_cpumask_t cpumask;
+ int cpucount = odp_cpumask_all_available(&cpumask);
+
+ if (cpucount < 2) {
+ LOG_DBG("\nSkipping scheduler test because cpucount = %d "
+ "is less then min number 2 required\n", cpucount);
+ LOG_DBG("Rerun with more cpu resources\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int test_shaper_bw(const char *shaper_name,
+ const char *node_name,
+ uint8_t priority,
+ uint64_t commit_bps)
+{
+ odp_tm_queue_t tm_queue;
+ rcv_stats_t rcv_stats;
+ pkt_info_t pkt_info;
+ uint64_t expected_rcv_gap_us;
+ uint32_t num_pkts, pkt_len, pkts_rcvd_in_order, avg_rcv_gap;
+ uint32_t min_rcv_gap, max_rcv_gap, pkts_sent;
+ int rc, ret_code;
+
+ /* This test can support a commit_bps from 64K to 2 Gbps and possibly
+ * up to a max of 10 Gbps, but no higher. */
+ CU_ASSERT_FATAL(commit_bps <= (10ULL * 1000000000ULL));
+
+ /* Pick a tm_queue and set the parent node's shaper BW to be commit_bps
+ * with a small burst tolerance. Then send the traffic with a pkt_len
+ * such that the pkt start time to next pkt start time is 10,000 bit
+ * times and then measure the average inter-arrival receive "gap" in
+ * microseconds. */
+ tm_queue = find_tm_queue(0, node_name, priority);
+ if (set_shaper(node_name, shaper_name, commit_bps, 10000) != 0)
+ return -1;
+
+ init_xmt_pkts(&pkt_info);
+ num_pkts = 50;
+ pkt_len = (10000 / 8) - (ETHERNET_OVHD_LEN + CRC_LEN);
+ pkt_info.pkt_class = 1;
+ if (make_pkts(num_pkts, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+
+ /* The expected inter arrival receive gap in seconds is equal to
+ * "10,000 bits / commit_bps". To get the gap time in microseconds
+ * we multiply this by one million. The timeout we use is 50 times
+ * this gap time (since we send 50 pkts) multiplied by 4 to be
+ * conservative, plus a constant time of 1 millisecond to account for
+ * testing delays. This then needs to be expressed in nanoseconds by
+ * multiplying by 1000. */
+ expected_rcv_gap_us = (1000000ULL * 10000ULL) / commit_bps;
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ commit_bps);
+ pkts_rcvd_in_order = pkts_rcvd_in_send_order();
+ ret_code = -1;
+
+ /* First verify that MOST of the pkts were received in any order. */
+ if (num_rcv_pkts <= (pkts_sent / 2)) {
+ /* This is fairly major failure in that most of the pkts didn't
+ * even get received, regardless of rate or order. Log the error
+ * to assist with debugging */
+ LOG_ERR("Sent %u pkts but only %u came back\n",
+ pkts_sent, num_rcv_pkts);
+ CU_ASSERT(num_rcv_pkts <= (pkts_sent / 2));
+ } else if (pkts_rcvd_in_order <= 32) {
+ LOG_ERR("Sent %u pkts but only %u came back (%u in order)\n",
+ pkts_sent, num_rcv_pkts, pkts_rcvd_in_order);
+ CU_ASSERT(pkts_rcvd_in_order <= 32);
+ } else {
+ if (pkts_rcvd_in_order < pkts_sent)
+ LOG_DBG("Info: of %u pkts sent %u came back (%u "
+ "in order)\n", pkts_sent,
+ num_rcv_pkts, pkts_rcvd_in_order);
+
+ /* Next determine the inter arrival receive pkt statistics. */
+ rc = rcv_rate_stats(&rcv_stats, pkt_info.pkt_class);
+ CU_ASSERT(rc == 0);
+
+ /* Next verify that the rcvd pkts have an average inter-receive
+ * gap of "expected_rcv_gap_us" microseconds, +/- 25%. */
+ avg_rcv_gap = rcv_stats.avg_rcv_gap;
+ min_rcv_gap = ((MIN_SHAPER_BW_RCV_GAP * expected_rcv_gap_us) /
+ 100) - 2;
+ max_rcv_gap = ((MAX_SHAPER_BW_RCV_GAP * expected_rcv_gap_us) /
+ 100) + 2;
+ if ((avg_rcv_gap < min_rcv_gap) ||
+ (max_rcv_gap < avg_rcv_gap)) {
+ LOG_ERR("min=%u avg_rcv_gap=%u max=%u "
+ "std_dev_gap=%u\n",
+ rcv_stats.min_rcv_gap, avg_rcv_gap,
+ rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
+ LOG_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
+ "rcv_gap range=%u..%u\n",
+ expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
+ } else if (expected_rcv_gap_us < rcv_stats.std_dev_gap) {
+ LOG_ERR("min=%u avg_rcv_gap=%u max=%u "
+ "std_dev_gap=%u\n",
+ rcv_stats.min_rcv_gap, avg_rcv_gap,
+ rcv_stats.max_rcv_gap, rcv_stats.std_dev_gap);
+ LOG_ERR(" expected_rcv_gap=%" PRIu64 " acceptable "
+ "rcv_gap range=%u..%u\n",
+ expected_rcv_gap_us, min_rcv_gap, max_rcv_gap);
+ ret_code = 0;
+ } else {
+ ret_code = 0;
+ }
+
+ CU_ASSERT((min_rcv_gap <= avg_rcv_gap) &&
+ (avg_rcv_gap <= max_rcv_gap));
+ CU_ASSERT(rcv_stats.std_dev_gap <= expected_rcv_gap_us);
+ }
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return ret_code;
+}
+
+static int set_sched_fanin(const char *node_name,
+ const char *sched_base_name,
+ odp_tm_sched_mode_t sched_mode,
+ uint8_t sched_weights[FANIN_RATIO])
+{
+ odp_tm_sched_params_t sched_params;
+ odp_tm_sched_t sched_profile;
+ tm_node_desc_t *node_desc, *child_desc;
+ odp_tm_node_t tm_node, fanin_node;
+ uint32_t fanin_cnt, fanin, priority;
+ uint8_t sched_weight;
+ char sched_name[TM_NAME_LEN];
+ int rc;
+
+ node_desc = find_node_desc(0, node_name);
+ if (node_desc == NULL)
+ return -1;
+
+ fanin_cnt = MIN(node_desc->num_children, FANIN_RATIO);
+ for (fanin = 0; fanin < fanin_cnt; fanin++) {
+ odp_tm_sched_params_init(&sched_params);
+ sched_weight = sched_weights[fanin];
+
+ /* Set the weights and mode the same for all priorities */
+ for (priority = 0; priority < NUM_PRIORITIES; priority++) {
+ sched_params.sched_modes[priority] = sched_mode;
+ sched_params.sched_weights[priority] = sched_weight;
+ }
+
+ /* Create the scheduler profile name using the sched_base_name
+ * and the fanin index */
+ snprintf(sched_name, sizeof(sched_name), "%s_%u",
+ sched_base_name, fanin);
+
+ /* First see if a sched profile already exists with this name,
+ * in which case we use that profile, else create a new one. */
+ sched_profile = odp_tm_sched_lookup(sched_name);
+ if (sched_profile != ODP_TM_INVALID) {
+ odp_tm_sched_params_update(sched_profile,
+ &sched_params);
+ } else {
+ sched_profile = odp_tm_sched_create(sched_name,
+ &sched_params);
+ sched_profiles[num_sched_profiles] = sched_profile;
+ num_sched_profiles++;
+ }
+
+ /* Apply the weights to the nodes fan-in. */
+ child_desc = node_desc->children[fanin];
+ tm_node = node_desc->node;
+ fanin_node = child_desc->node;
+ rc = odp_tm_node_sched_config(tm_node, fanin_node,
+ sched_profile);
+ if (rc != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int test_sched_queue_priority(const char *shaper_name,
+ const char *node_name,
+ uint32_t num_pkts)
+{
+ odp_tm_queue_t tm_queues[NUM_PRIORITIES];
+ pkt_info_t pkt_info;
+ uint32_t pkt_cnt, pkts_in_order, base_idx;
+ uint32_t idx, unique_id, pkt_len, base_pkt_len, pkts_sent;
+ int priority;
+
+ memset(unique_id_list, 0, sizeof(unique_id_list));
+ for (priority = 0; priority < NUM_PRIORITIES; priority++)
+ tm_queues[priority] = find_tm_queue(0, node_name, priority);
+
+ /* Enable the shaper to be low bandwidth. */
+ pkt_len = 1400;
+ set_shaper(node_name, shaper_name, 64 * 1000, 4 * pkt_len);
+
+ /* Make a couple of low priority dummy pkts first. */
+ init_xmt_pkts(&pkt_info);
+ if (make_pkts(4, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ /* Now make "num_pkts" first at the lowest priority, then "num_pkts"
+ * at the second lowest priority, etc until "num_pkts" are made last
+ * at the highest priority (which is always priority 0). */
+ pkt_cnt = NUM_PRIORITIES * num_pkts;
+ base_pkt_len = 256;
+ for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--) {
+ unique_id = cpu_unique_id;
+ pkt_info.pkt_class = priority + 1;
+ pkt_len = base_pkt_len + 64 * priority;
+ if (make_pkts(num_pkts, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ base_idx = priority * num_pkts;
+ for (idx = 0; idx < num_pkts; idx++)
+ unique_id_list[base_idx + idx] = unique_id++;
+ }
+
+ /* Send the low priority dummy pkts first. The arrival order of
+ * these pkts will be ignored. */
+ pkts_sent = send_pkts(tm_queues[NUM_PRIORITIES - 1], 4);
+
+ /* Now send "num_pkts" first at the lowest priority, then "num_pkts"
+ * at the second lowest priority, etc until "num_pkts" are sent last
+ * at the highest priority. */
+ for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--)
+ pkts_sent += send_pkts(tm_queues[priority], num_pkts);
+
+ busy_wait(1000000); /* wait 1 millisecond */
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ pkt_cnt + 4, 64 * 1000);
+
+ /* Check rcvd packet arrivals to make sure that pkts arrived in
+ * priority order, except for perhaps the first few lowest priority
+ * dummy pkts. */
+ pkts_in_order = pkts_rcvd_in_given_order(unique_id_list, pkt_cnt, 0,
+ false, false);
+ if (pkts_in_order != pkt_cnt) {
+ LOG_ERR("pkts_sent=%u pkt_cnt=%u num_rcv_pkts=%u"
+ " rcvd_in_order=%u\n", pkts_sent, pkt_cnt, num_rcv_pkts,
+ pkts_in_order);
+ }
+
+ CU_ASSERT(pkts_in_order == pkt_cnt);
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return 0;
+}
+
+static int test_sched_node_priority(const char *shaper_name,
+ const char *node_name,
+ uint32_t num_pkts)
+{
+ odp_tm_queue_t *tm_queues, tm_queue;
+ tm_node_desc_t *node_desc;
+ queue_array_t *queue_array;
+ pkt_info_t pkt_info;
+ uint32_t total_num_queues, max_queues, num_queues, pkt_cnt;
+ uint32_t pkts_in_order, base_idx, queue_idx, idx, unique_id;
+ uint32_t pkt_len, base_pkt_len, total_pkt_cnt, pkts_sent;
+ int priority;
+
+ memset(unique_id_list, 0, sizeof(unique_id_list));
+ node_desc = find_node_desc(0, node_name);
+ if (node_desc == NULL)
+ return -1;
+
+ total_num_queues = 0;
+ for (priority = 0; priority < NUM_PRIORITIES; priority++) {
+ max_queues = NUM_LEVEL2_TM_NODES;
+ queue_array = &queues_set.queue_array[priority];
+ tm_queues = queue_array->tm_queues;
+ num_queues = find_child_queues(0, node_desc, priority,
+ tm_queues, max_queues);
+ queue_array->num_queues = num_queues;
+ queue_array->priority = priority;
+ total_num_queues += num_queues;
+ }
+
+ /* Enable the shaper to be low bandwidth. */
+ pkt_len = 1400;
+ set_shaper(node_name, shaper_name, 64 * 1000, 4 * pkt_len);
+
+ /* Make a couple of low priority large dummy pkts first. */
+ init_xmt_pkts(&pkt_info);
+ if (make_pkts(4, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ /* Now make "num_pkts" for each tm_queue at the lowest priority, then
+ * "num_pkts" for each tm_queue at the second lowest priority, etc.
+ * until "num_pkts" for each tm_queue at the highest priority are made
+ * last. Note that the highest priority is always priority 0. */
+ total_pkt_cnt = total_num_queues * num_pkts;
+ base_pkt_len = 256;
+ base_idx = 0;
+ for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--) {
+ unique_id = cpu_unique_id;
+ queue_array = &queues_set.queue_array[priority];
+ num_queues = queue_array->num_queues;
+ pkt_cnt = num_queues * num_pkts;
+ pkt_info.pkt_class = priority + 1;
+ pkt_len = base_pkt_len + 64 * priority;
+ if (make_pkts(pkt_cnt, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ base_idx = priority * num_pkts;
+ for (idx = 0; idx < pkt_cnt; idx++)
+ unique_id_list[base_idx + idx] = unique_id++;
+ }
+
+ /* Send the low priority dummy pkts first. The arrival order of
+ * these pkts will be ignored. */
+ queue_array = &queues_set.queue_array[NUM_PRIORITIES - 1];
+ tm_queue = queue_array->tm_queues[0];
+ pkts_sent = send_pkts(tm_queue, 4);
+
+ /* Now send "num_pkts" for each tm_queue at the lowest priority, then
+ * "num_pkts" for each tm_queue at the second lowest priority, etc.
+ * until "num_pkts" for each tm_queue at the highest priority are sent
+ * last. */
+ for (priority = NUM_PRIORITIES - 1; 0 <= priority; priority--) {
+ queue_array = &queues_set.queue_array[priority];
+ num_queues = queue_array->num_queues;
+ for (queue_idx = 0; queue_idx < num_queues; queue_idx++) {
+ tm_queue = queue_array->tm_queues[queue_idx];
+ pkts_sent += send_pkts(tm_queue, num_pkts);
+ }
+ }
+
+ busy_wait(1000000); /* wait 1 millisecond */
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ pkts_sent, 64 * 1000);
+
+ /* Check rcvd packet arrivals to make sure that pkts arrived in
+ * priority order, except for perhaps the first few lowest priority
+ * dummy pkts. */
+ pkts_in_order = pkts_rcvd_in_given_order(unique_id_list, total_pkt_cnt,
+ 0, false, false);
+ CU_ASSERT(pkts_in_order == total_pkt_cnt);
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return 0;
+}
+
+static int test_sched_wfq(const char *sched_base_name,
+ const char *shaper_name,
+ const char *node_name,
+ odp_tm_sched_mode_t sched_mode,
+ uint8_t sched_weights[FANIN_RATIO])
+{
+ odp_tm_queue_t tm_queues[FANIN_RATIO], tm_queue;
+ tm_node_desc_t *node_desc, *child_desc;
+ rcv_stats_t rcv_stats[FANIN_RATIO];
+ pkt_info_t pkt_info;
+ uint32_t fanin_cnt, fanin, num_queues, pkt_cnt;
+ uint32_t pkt_len, pkts_sent, pkt_idx;
+ uint8_t pkt_class;
+ int priority, rc;
+
+ memset(tm_queues, 0, sizeof(tm_queues));
+ node_desc = find_node_desc(0, node_name);
+ if (node_desc == NULL)
+ return -1;
+
+ rc = set_sched_fanin(node_name, sched_base_name, sched_mode,
+ sched_weights);
+ if (rc != 0)
+ return -1;
+
+ /* Now determine at least one tm_queue that feeds into each fanin/
+ * child node. */
+ priority = 0;
+ fanin_cnt = MIN(node_desc->num_children, FANIN_RATIO);
+ for (fanin = 0; fanin < fanin_cnt; fanin++) {
+ child_desc = node_desc->children[fanin];
+ num_queues = find_child_queues(0, child_desc, priority,
+ &tm_queues[fanin], 1);
+ if (num_queues != 1)
+ return -1;
+ }
+
+ /* Enable the shaper to be low bandwidth. */
+ pkt_len = 1400;
+ set_shaper(node_name, shaper_name, 64 * 1000, 8 * pkt_len);
+
+ /* Make a couple of low priority dummy pkts first. */
+ init_xmt_pkts(&pkt_info);
+ if (make_pkts(4, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ /* Make 100 pkts for each fanin of this node, alternating amongst
+ * the inputs. */
+ pkt_cnt = FANIN_RATIO * 100;
+ fanin = 0;
+ for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) {
+ pkt_len = 128 + 128 * fanin;
+ pkt_info.pkt_class = 1 + fanin++;
+ if (make_pkts(1, pkt_len, &pkt_info) != 0)
+ return -1;
+
+ if (FANIN_RATIO <= fanin)
+ fanin = 0;
+ }
+
+ /* Send the low priority dummy pkts first. The arrival order of
+ * these pkts will be ignored. */
+ pkts_sent = send_pkts(tm_queues[NUM_PRIORITIES - 1], 4);
+
+ /* Now send the test pkts, alternating amongst the input queues. */
+ fanin = 0;
+ for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) {
+ tm_queue = tm_queues[fanin++];
+ pkts_sent += send_pkts(tm_queue, 1);
+ if (FANIN_RATIO <= fanin)
+ fanin = 0;
+ }
+
+ busy_wait(1000000); /* wait 1 millisecond */
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ pkt_cnt + 4, 64 * 1000);
+
+ /* Check rcvd packet arrivals to make sure that pkts arrived in
+ * an order commensurate with their weights, sched mode and pkt_len. */
+ for (fanin = 0; fanin < fanin_cnt; fanin++) {
+ pkt_class = 1 + fanin;
+ CU_ASSERT(rcv_rate_stats(&rcv_stats[fanin], pkt_class) == 0);
+ }
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return 0;
+}
+
+static int set_queue_thresholds(odp_tm_queue_t tm_queue,
+ const char *threshold_name,
+ odp_tm_threshold_params_t *threshold_params)
+{
+ odp_tm_threshold_t threshold_profile;
+
+ /* First see if a threshold profile already exists with this name, in
+ * which case we use that profile, else create a new one. */
+ threshold_profile = odp_tm_thresholds_lookup(threshold_name);
+ if (threshold_profile != ODP_TM_INVALID) {
+ odp_tm_thresholds_params_update(threshold_profile,
+ threshold_params);
+ } else {
+ threshold_profile = odp_tm_threshold_create(threshold_name,
+ threshold_params);
+ threshold_profiles[num_threshold_profiles] = threshold_profile;
+ num_threshold_profiles++;
+ }
+
+ return odp_tm_queue_threshold_config(tm_queue, threshold_profile);
+}
+
+static int test_threshold(const char *threshold_name,
+ const char *shaper_name,
+ const char *node_name,
+ uint8_t priority,
+ uint32_t max_pkts,
+ uint32_t max_bytes)
+{
+ odp_tm_threshold_params_t threshold_params;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ uint32_t num_pkts, pkt_len, pkts_sent;
+
+ odp_tm_threshold_params_init(&threshold_params);
+ if (max_pkts != 0) {
+ max_pkts = MIN(max_pkts, MAX_PKTS / 3);
+ threshold_params.max_pkts = max_pkts;
+ threshold_params.enable_max_pkts = true;
+ num_pkts = 2 * max_pkts;
+ pkt_len = 256;
+ } else if (max_bytes != 0) {
+ max_bytes = MIN(max_bytes, MAX_PKTS * MAX_PAYLOAD / 3);
+ threshold_params.max_bytes = max_bytes;
+ threshold_params.enable_max_bytes = true;
+ num_pkts = 2 * max_bytes / MAX_PAYLOAD;
+ pkt_len = MAX_PAYLOAD;
+ } else {
+ return -1;
+ }
+
+ /* Pick a tm_queue and set the tm_queue's threshold profile and then
+ * send in twice the amount of traffic as suggested by the thresholds
+ * and make sure at least SOME pkts get dropped. */
+ tm_queue = find_tm_queue(0, node_name, priority);
+ if (set_queue_thresholds(tm_queue, threshold_name,
+ &threshold_params) != 0) {
+ LOG_ERR("set_queue_thresholds failed\n");
+ return -1;
+ }
+
+ /* Enable the shaper to be very low bandwidth. */
+ set_shaper(node_name, shaper_name, 256 * 1000, 8 * pkt_len);
+
+ init_xmt_pkts(&pkt_info);
+ pkt_info.drop_eligible = true;
+ pkt_info.pkt_class = 1;
+ if (make_pkts(num_pkts, pkt_len, &pkt_info) != 0) {
+ LOG_ERR("make_pkts failed\n");
+ return -1;
+ }
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ 1 * GBPS);
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+
+ if (num_rcv_pkts < num_pkts)
+ return 0;
+
+ CU_ASSERT(num_rcv_pkts < pkts_sent);
+ return 0;
+}
+
+static wred_pkt_cnts_t *search_expected_pkt_rcv_tbl(odp_tm_percent_t confidence,
+ odp_tm_percent_t drop_perc)
+{
+ wred_pkt_cnts_t *wred_pkt_cnts;
+ uint32_t idx, table_size;
+
+ /* Search the EXPECTED_PKT_RCVD table to find a matching entry */
+ table_size = sizeof(EXPECTED_PKT_RCVD) / sizeof(wred_pkt_cnts_t);
+ for (idx = 0; idx < table_size; idx++) {
+ wred_pkt_cnts = &EXPECTED_PKT_RCVD[idx];
+ if ((wred_pkt_cnts->confidence_percent == confidence) &&
+ (wred_pkt_cnts->drop_percent == drop_perc))
+ return wred_pkt_cnts;
+ }
+
+ return NULL;
+}
+
+static int set_queue_wred(odp_tm_queue_t tm_queue,
+ const char *wred_name,
+ uint8_t pkt_color,
+ odp_tm_percent_t drop_percent,
+ odp_bool_t use_byte_fullness,
+ odp_bool_t use_dual_slope)
+{
+ odp_tm_wred_params_t wred_params;
+ odp_tm_wred_t wred_profile;
+
+ odp_tm_wred_params_init(&wred_params);
+ if (use_dual_slope) {
+ wred_params.min_threshold = TM_PERCENT(20);
+ wred_params.med_threshold = TM_PERCENT(40);
+ wred_params.med_drop_prob = drop_percent;
+ wred_params.max_drop_prob = drop_percent;
+ } else {
+ wred_params.min_threshold = 0;
+ wred_params.med_threshold = TM_PERCENT(20);
+ wred_params.med_drop_prob = 0;
+ wred_params.max_drop_prob = 2 * drop_percent;
+ }
+
+ wred_params.enable_wred = true;
+ wred_params.use_byte_fullness = use_byte_fullness;
+
+ /* First see if a wred profile already exists with this name, in
+ * which case we use that profile, else create a new one. */
+ wred_profile = odp_tm_wred_lookup(wred_name);
+ if (wred_profile != ODP_TM_INVALID) {
+ odp_tm_wred_params_update(wred_profile, &wred_params);
+ } else {
+ wred_profile = odp_tm_wred_create(wred_name, &wred_params);
+ if (wred_profiles[num_wred_profiles - 1][pkt_color] ==
+ ODP_TM_INVALID) {
+ wred_profiles[num_wred_profiles - 1][pkt_color] =
+ wred_profile;
+ } else {
+ wred_profiles[num_wred_profiles][pkt_color] =
+ wred_profile;
+ num_wred_profiles++;
+ }
+ }
+
+ return odp_tm_queue_wred_config(tm_queue, pkt_color, wred_profile);
+}
+
+static int test_byte_wred(const char *wred_name,
+ const char *shaper_name,
+ const char *threshold_name,
+ const char *node_name,
+ uint8_t priority,
+ uint8_t pkt_color,
+ odp_tm_percent_t drop_percent,
+ odp_bool_t use_dual_slope)
+{
+ odp_tm_threshold_params_t threshold_params;
+ wred_pkt_cnts_t *wred_pkt_cnts;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ uint32_t num_fill_pkts, num_test_pkts, pkts_sent;
+
+ /* Pick the tm_queue and set the tm_queue's wred profile to drop the
+ * given percentage of traffic, then send 100 pkts and see how many
+ * pkts are received. */
+ tm_queue = find_tm_queue(0, node_name, priority);
+ set_queue_wred(tm_queue, wred_name, pkt_color, drop_percent,
+ true, use_dual_slope);
+
+ /* Enable the shaper to be very low bandwidth. */
+ set_shaper(node_name, shaper_name, 64 * 1000, 8 * PKT_BUF_SIZE);
+
+ /* Set the threshold to be byte based and to handle 200 pkts of
+ * size PKT_BUF_SIZE. This way the byte-fullness for the wred test
+ * pkts will be around 60%. */
+ odp_tm_threshold_params_init(&threshold_params);
+ threshold_params.max_bytes = 200 * PKT_BUF_SIZE;
+ threshold_params.enable_max_bytes = true;
+ if (set_queue_thresholds(tm_queue, threshold_name,
+ &threshold_params) != 0) {
+ LOG_ERR("set_queue_thresholds failed\n");
+ return -1;
+ }
+
+ /* Make and send the first batch of pkts whose job is to set the
+ * queue byte fullness to around 60% for the subsequent test packets.
+ * These packets MUST have drop_eligible false. */
+ init_xmt_pkts(&pkt_info);
+ num_fill_pkts = 120;
+ pkt_info.pkt_color = pkt_color;
+ pkt_info.pkt_class = 0;
+ pkt_info.drop_eligible = false;
+ if (make_pkts(num_fill_pkts, PKT_BUF_SIZE, &pkt_info) != 0)
+ return -1;
+
+ send_pkts(tm_queue, num_fill_pkts);
+
+ /* Now send the real test pkts, which are all small so as to try to
+ * keep the byte fullness still close to the 60% point. These pkts
+ * MUST have drop_eligible true. */
+ num_test_pkts = 100;
+ pkt_info.pkt_class = 1;
+ pkt_info.drop_eligible = true;
+ if (make_pkts(num_test_pkts, 128, &pkt_info) != 0)
+ return -1;
+
+ pkts_sent = send_pkts(tm_queue, num_test_pkts);
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ num_fill_pkts + pkts_sent, 64 * 1000);
+
+ /* Search the EXPECTED_PKT_RCVD table to find a matching entry */
+ wred_pkt_cnts = search_expected_pkt_rcv_tbl(TM_PERCENT(99.9),
+ drop_percent);
+ if (wred_pkt_cnts == NULL)
+ return -1;
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+
+ if ((wred_pkt_cnts->min_cnt <= pkts_sent) &&
+ (pkts_sent <= wred_pkt_cnts->max_cnt))
+ return 0;
+
+ CU_ASSERT((wred_pkt_cnts->min_cnt <= pkts_sent) &&
+ (pkts_sent <= wred_pkt_cnts->max_cnt));
+ return 0;
+}
+
+static int test_pkt_wred(const char *wred_name,
+ const char *shaper_name,
+ const char *threshold_name,
+ const char *node_name,
+ uint8_t priority,
+ uint8_t pkt_color,
+ odp_tm_percent_t drop_percent,
+ odp_bool_t use_dual_slope)
+{
+ odp_tm_threshold_params_t threshold_params;
+ wred_pkt_cnts_t *wred_pkt_cnts;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ uint32_t num_fill_pkts, num_test_pkts, pkts_sent;
+
+ /* Pick the tm_queue and set the tm_queue's wred profile to drop the
+ * given percentage of traffic, then send 100 pkts and see how many
+ * pkts are received. */
+ tm_queue = find_tm_queue(0, node_name, priority);
+ set_queue_wred(tm_queue, wred_name, pkt_color, drop_percent,
+ false, use_dual_slope);
+
+ /* Enable the shaper to be very low bandwidth. */
+ set_shaper(node_name, shaper_name, 64 * 1000, 1000);
+
+ /* Set the threshold to be pkt based and to handle 1000 pkts. This
+ * way the pkt-fullness for the wred test pkts will be around 60%. */
+ odp_tm_threshold_params_init(&threshold_params);
+ threshold_params.max_pkts = 1000;
+ threshold_params.enable_max_pkts = true;
+ if (set_queue_thresholds(tm_queue, threshold_name,
+ &threshold_params) != 0) {
+ LOG_ERR("set_queue_thresholds failed\n");
+ return -1;
+ }
+
+ /* Make and send the first batch of pkts whose job is to set the
+ * queue pkt fullness to around 60% for the subsequent test packets.
+ * These packets MUST have drop_eligible false. */
+ init_xmt_pkts(&pkt_info);
+ num_fill_pkts = 600;
+ pkt_info.pkt_color = pkt_color;
+ pkt_info.pkt_class = 0;
+ pkt_info.drop_eligible = false;
+ if (make_pkts(num_fill_pkts, 80, &pkt_info) != 0)
+ return -1;
+
+ send_pkts(tm_queue, num_fill_pkts);
+
+ /* Now send the real test pkts. These pkts MUST have drop_eligible
+ * true. */
+ num_test_pkts = 100;
+ pkt_info.pkt_class = 1;
+ pkt_info.drop_eligible = true;
+ if (make_pkts(num_test_pkts, 80, &pkt_info) != 0)
+ return -1;
+
+ pkts_sent = send_pkts(tm_queue, num_test_pkts);
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
+ num_fill_pkts + pkts_sent, 64 * 1000);
+
+ /* Search the EXPECTED_PKT_RCVD table to find a matching entry */
+ wred_pkt_cnts = search_expected_pkt_rcv_tbl(TM_PERCENT(99.9),
+ drop_percent);
+ if (wred_pkt_cnts == NULL)
+ return -1;
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+
+ if ((wred_pkt_cnts->min_cnt <= pkts_sent) &&
+ (pkts_sent <= wred_pkt_cnts->max_cnt))
+ return 0;
+
+ CU_ASSERT((wred_pkt_cnts->min_cnt <= pkts_sent) &&
+ (pkts_sent <= wred_pkt_cnts->max_cnt));
+ return 0;
+}
+
+static int test_query_functions(const char *shaper_name,
+ const char *node_name,
+ uint8_t priority,
+ uint32_t num_pkts)
+{
+ odp_tm_query_info_t query_info;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ uint64_t commit_bps, expected_pkt_cnt, expected_byte_cnt;
+ int rc;
+
+ /* Pick a tm_queue and set the egress node's shaper BW to be 64K bps
+ * with a small burst tolerance. Then send the traffic. */
+ tm_queue = find_tm_queue(0, node_name, priority);
+ commit_bps = 64 * 1000;
+ if (set_shaper(node_name, shaper_name, commit_bps, 1000) != 0)
+ return -1;
+
+ init_xmt_pkts(&pkt_info);
+ pkt_info.pkt_class = 1;
+ if (make_pkts(num_pkts, PKT_BUF_SIZE, &pkt_info) != 0)
+ return -1;
+
+ send_pkts(tm_queue, num_pkts);
+
+ /* Assume all but 2 of the pkts are still in the queue.*/
+ expected_pkt_cnt = num_pkts - 2;
+ expected_byte_cnt = expected_pkt_cnt * PKT_BUF_SIZE;
+
+ rc = odp_tm_queue_query(tm_queue,
+ ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
+ &query_info);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(query_info.total_pkt_cnt_valid);
+ CU_ASSERT(expected_pkt_cnt < query_info.total_pkt_cnt);
+ CU_ASSERT(query_info.total_byte_cnt_valid);
+ CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
+
+ rc = odp_tm_priority_query(odp_tm_systems[0], priority,
+ ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
+ &query_info);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(query_info.total_pkt_cnt_valid);
+ CU_ASSERT(expected_pkt_cnt < query_info.total_pkt_cnt);
+ CU_ASSERT(query_info.total_byte_cnt_valid);
+ CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
+
+ rc = odp_tm_total_query(odp_tm_systems[0],
+ ODP_TM_QUERY_PKT_CNT | ODP_TM_QUERY_BYTE_CNT,
+ &query_info);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(query_info.total_pkt_cnt_valid);
+ CU_ASSERT(expected_pkt_cnt < query_info.total_pkt_cnt);
+ CU_ASSERT(query_info.total_byte_cnt_valid);
+ CU_ASSERT(expected_byte_cnt < query_info.total_byte_cnt);
+
+ /* Disable the shaper, so as to get the pkts out quicker. */
+ set_shaper(node_name, shaper_name, 0, 0);
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, num_pkts,
+ commit_bps);
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return 0;
+}
+
+static int check_vlan_marking_pkts(void)
+{
+ odp_packet_t rcv_pkt;
+ uint32_t rcv_pkt_idx, err_cnt;
+ uint16_t tci;
+ uint8_t pkt_class, dei, expected_dei;
+
+ /* Check rcvd packets to make sure that pkt_class 1 pkts continue to
+ * not have a VLAN header, pkt class 2 pkts have a VLAN header with the
+ * drop precedence not set and pkt class 3 pkts have a VLAN header with
+ * the DEI bit set. */
+ err_cnt = 0;
+ for (rcv_pkt_idx = 0; rcv_pkt_idx < num_rcv_pkts; rcv_pkt_idx++) {
+ rcv_pkt = rcv_pkts[rcv_pkt_idx];
+ pkt_class = rcv_pkt_descs[rcv_pkt_idx].pkt_class;
+
+ switch (pkt_class) {
+ case 1:
+ /* Make sure no VLAN header. */
+ if (odp_packet_has_vlan(rcv_pkt)) {
+ err_cnt++;
+ LOG_ERR("VLAN incorrectly added\n");
+ CU_ASSERT(odp_packet_has_vlan(rcv_pkt));
+ }
+ break;
+
+ case 2:
+ case 3:
+ /* Make sure it does have a VLAN header */
+ if (!odp_packet_has_vlan(rcv_pkt)) {
+ err_cnt++;
+ LOG_ERR("VLAN header missing\n");
+ CU_ASSERT(!odp_packet_has_vlan(rcv_pkt));
+ break;
+ }
+
+ /* Make sure DEI bit is 0 if pkt_class == 2, and 1 if
+ * pkt_class == 3. */
+ if (get_vlan_tci(rcv_pkt, &tci) != 0) {
+ err_cnt++;
+ LOG_ERR("VLAN header missing\n");
+ CU_ASSERT(!odp_packet_has_vlan(rcv_pkt));
+ break;
+ }
+
+ dei = (tci >> ODPH_VLANHDR_DEI_SHIFT) & 1;
+ expected_dei = (pkt_class == 2) ? 0 : 1;
+ if (dei != expected_dei) {
+ LOG_ERR("expected_dei=%u rcvd dei=%u\n",
+ expected_dei, dei);
+ err_cnt++;
+ CU_ASSERT(dei == expected_dei);
+ }
+ break;
+
+ default:
+ /* Log error but otherwise ignore, since it is
+ * probably a stray pkt from a previous test. */
+ LOG_ERR("Pkt rcvd with invalid pkt class\n");
+ }
+ }
+
+ return (err_cnt == 0) ? 0 : -1;
+}
+
+static int test_vlan_marking(const char *node_name,
+ odp_packet_color_t pkt_color)
+{
+ odp_packet_color_t color;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ odp_tm_t odp_tm;
+ uint32_t pkt_cnt, num_pkts, pkt_len, pkts_sent;
+ int rc;
+
+ /* First disable vlan marking for all colors. These "disable" calls
+ * should NEVER fail. */
+ odp_tm = odp_tm_systems[0];
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ rc = odp_tm_vlan_marking(odp_tm, color, false);
+ if (rc != 0) {
+ LOG_ERR("disabling odp_tm_vlan_marking() failed\n");
+ return -1;
+ }
+ }
+
+ /* Next enable vlan marking for just the given color parameter */
+ rc = odp_tm_vlan_marking(odp_tm, pkt_color, true);
+
+ tm_queue = find_tm_queue(0, node_name, 0);
+ if (tm_queue == ODP_TM_INVALID) {
+ LOG_ERR("No tm_queue found for node_name='%s'\n", node_name);
+ return -1;
+ }
+
+ /* Next make 2*X pkts of each color, half with vlan headers -
+ * half without. */
+ init_xmt_pkts(&pkt_info);
+
+ pkt_cnt = 5;
+ num_pkts = 0;
+ pkt_len = 600;
+ pkt_info.pkt_class = 1;
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ num_pkts += pkt_cnt;
+ pkt_info.pkt_color = color;
+ if (make_pkts(pkt_cnt, pkt_len, &pkt_info) != 0)
+ return -1;
+ }
+
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ num_pkts += pkt_cnt;
+ pkt_info.pkt_color = color;
+ pkt_info.pkt_class = (color == pkt_color) ? 3 : 2;
+ pkt_info.use_vlan = true;
+ pkt_info.vlan_tci = VLAN_NO_DEI;
+ if (make_pkts(pkt_cnt, pkt_len, &pkt_info) != 0)
+ return -1;
+ }
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ 1000 * 1000);
+ if (num_rcv_pkts == 0) {
+ LOG_ERR("No pkts received\n");
+ rc = -1;
+ } else if (num_rcv_pkts != pkts_sent) {
+ LOG_ERR("pkts_sent=%u but num_rcv_pkts=%u\n",
+ pkts_sent, num_rcv_pkts);
+ dump_rcvd_pkts(0, num_rcv_pkts - 1);
+ CU_ASSERT(num_rcv_pkts == pkts_sent);
+ } else {
+ rc = check_vlan_marking_pkts();
+ }
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return rc;
+}
+
+static int check_tos_marking_pkts(odp_bool_t use_ipv6,
+ odp_bool_t use_tcp,
+ odp_bool_t test_ecn,
+ odp_bool_t test_drop_prec,
+ uint8_t unmarked_tos,
+ uint8_t new_dscp,
+ uint8_t dscp_mask)
+{
+ odp_packet_t rcv_pkt;
+ uint32_t rcv_pkt_idx;
+ uint8_t unmarked_ecn, unmarked_dscp, shifted_dscp, pkt_class;
+ uint8_t tos, expected_tos;
+ int rc;
+
+ /* Turn off test_ecn for UDP pkts, since ECN marking should
+ * only happen for TCP pkts. */
+ if (!use_tcp)
+ test_ecn = false;
+
+ /* The expected_tos value is only the expected TOS/TC field for pkts
+ * that have been enabled for modification, as indicated by the
+ * pkt_class associated with this pkt. */
+ unmarked_ecn = (unmarked_tos & ODPH_IP_TOS_ECN_MASK)
+ >> ODPH_IP_TOS_ECN_SHIFT;
+ unmarked_dscp = (unmarked_tos & ODPH_IP_TOS_DSCP_MASK)
+ >> ODPH_IP_TOS_DSCP_SHIFT;
+ new_dscp = (new_dscp & dscp_mask) | (unmarked_dscp & ~dscp_mask);
+ shifted_dscp = new_dscp << ODPH_IP_TOS_DSCP_SHIFT;
+
+ if (test_ecn && test_drop_prec)
+ expected_tos = shifted_dscp | ODPH_IP_ECN_CE;
+ else if (test_ecn)
+ expected_tos = unmarked_tos | ODPH_IP_ECN_CE;
+ else if (test_drop_prec)
+ expected_tos = shifted_dscp | unmarked_ecn;
+ else
+ expected_tos = unmarked_tos;
+
+ for (rcv_pkt_idx = 0; rcv_pkt_idx < num_rcv_pkts; rcv_pkt_idx++) {
+ rcv_pkt = rcv_pkts[rcv_pkt_idx];
+ pkt_class = rcv_pkt_descs[rcv_pkt_idx].pkt_class;
+
+ /* Check that the pkts match the use_ipv6 setting */
+ if (use_ipv6)
+ rc = odp_packet_has_ipv6(rcv_pkt);
+ else
+ rc = odp_packet_has_ipv4(rcv_pkt);
+
+ if (rc != 1) {
+ if (use_ipv6)
+ LOG_ERR("Expected IPv6 pkt but got IPv4");
+ else
+ LOG_ERR("Expected IPv4 pkt but got IPv6");
+
+ return -1;
+ }
+
+ /* Check that the pkts match the use_tcp setting */
+ if (use_tcp)
+ rc = odp_packet_has_tcp(rcv_pkt);
+ else
+ rc = odp_packet_has_udp(rcv_pkt);
+
+ if (rc != 1) {
+ if (use_tcp)
+ LOG_ERR("Expected TCP pkt but got UDP");
+ else
+ LOG_ERR("Expected UDP pkt but got TCP");
+
+ return -1;
+ }
+
+ /* Now get the tos field to see if it was changed */
+ rc = get_ip_tos(rcv_pkt, &tos);
+ if (rc != 0) {
+ LOG_ERR("get_ip_tos failed\n");
+ return -1;
+ }
+
+ switch (pkt_class) {
+ case 2:
+ /* Tos field must be unchanged. */
+ if (unmarked_tos != tos) {
+ LOG_ERR("Tos was changed from 0x%X to 0x%X\n",
+ unmarked_tos, tos);
+ return -1;
+ }
+ break;
+
+ case 3:
+ /* Tos field must be changed. */
+ if (tos != expected_tos) {
+ LOG_ERR("tos=0x%X instead of expected 0x%X\n",
+ tos, expected_tos);
+ CU_ASSERT(tos == expected_tos);
+ }
+ break;
+
+ default:
+ /* Log error but otherwise ignore, since it is
+ * probably a stray pkt from a previous test. */
+ LOG_ERR("Pkt rcvd with invalid pkt class=%u\n",
+ pkt_class);
+ }
+ }
+
+ return 0;
+}
+
+static int test_ip_marking(const char *node_name,
+ odp_packet_color_t pkt_color,
+ odp_bool_t use_ipv6,
+ odp_bool_t use_tcp,
+ odp_bool_t test_ecn,
+ odp_bool_t test_drop_prec,
+ uint8_t new_dscp,
+ uint8_t dscp_mask)
+{
+ odp_packet_color_t color;
+ odp_tm_queue_t tm_queue;
+ pkt_info_t pkt_info;
+ odp_tm_t odp_tm;
+ uint32_t pkt_cnt, num_pkts, pkt_len, pkts_sent;
+ int rc, ret_code;
+
+ /* First disable IP TOS marking for all colors. These "disable" calls
+ * should NEVER fail. */
+ odp_tm = odp_tm_systems[0];
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ rc = odp_tm_ecn_marking(odp_tm, color, false);
+ if (rc != 0) {
+ LOG_ERR("disabling odp_tm_ecn_marking() failed\n");
+ return -1;
+ }
+
+ rc = odp_tm_drop_prec_marking(odp_tm, color, false);
+ if (rc != 0) {
+ LOG_ERR("disabling odp_tm_drop_prec_marking failed\n");
+ return -1;
+ }
+ }
+
+ /* Next enable IP TOS marking for just the given color parameter */
+ if ((!test_ecn) && (!test_drop_prec))
+ return 0;
+
+ if (test_ecn) {
+ rc = odp_tm_ecn_marking(odp_tm, pkt_color, true);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_ecn_marking() call failed\n");
+ return -1;
+ }
+ }
+
+ if (test_drop_prec) {
+ rc = odp_tm_drop_prec_marking(odp_tm, pkt_color, true);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_drop_prec_marking() call failed\n");
+ return -1;
+ }
+ }
+
+ tm_queue = find_tm_queue(0, node_name, 0);
+ if (tm_queue == ODP_TM_INVALID) {
+ LOG_ERR("No tm_queue found for node_name='%s'\n", node_name);
+ return -1;
+ }
+
+ init_xmt_pkts(&pkt_info);
+ pkt_info.use_ipv6 = use_ipv6;
+ pkt_info.use_tcp = use_tcp;
+ pkt_info.ip_tos = DEFAULT_TOS;
+
+ pkt_cnt = 5;
+ num_pkts = 0;
+ pkt_len = 1340;
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ num_pkts += pkt_cnt;
+ pkt_info.pkt_color = color;
+ if (test_drop_prec || (test_ecn && use_tcp))
+ pkt_info.pkt_class = (color == pkt_color) ? 3 : 2;
+ else
+ pkt_info.pkt_class = 2;
+
+ if (make_pkts(pkt_cnt, pkt_len, &pkt_info) != 0) {
+ LOG_ERR("make_pkts failed\n");
+ return -1;
+ }
+ }
+
+ pkts_sent = send_pkts(tm_queue, num_pkts);
+ num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin, pkts_sent,
+ 1000 * 1000);
+ ret_code = -1;
+
+ if (num_rcv_pkts == 0) {
+ LOG_ERR("No pkts received\n");
+ CU_ASSERT(num_rcv_pkts != 0);
+ ret_code = -1;
+ } else if (num_rcv_pkts != pkts_sent) {
+ LOG_ERR("pkts_sent=%u but num_rcv_pkts=%u\n",
+ pkts_sent, num_rcv_pkts);
+ dump_rcvd_pkts(0, num_rcv_pkts - 1);
+ CU_ASSERT(num_rcv_pkts == pkts_sent);
+ ret_code = -1;
+ } else {
+ rc = check_tos_marking_pkts(use_ipv6, use_tcp, test_ecn,
+ test_drop_prec, DEFAULT_TOS,
+ new_dscp, dscp_mask);
+ CU_ASSERT(rc == 0);
+ ret_code = (rc == 0) ? 0 : -1;
+ }
+
+ flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
+ CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
+ return ret_code;
+}
+
+static int test_protocol_marking(const char *node_name,
+ odp_packet_color_t pkt_color,
+ odp_bool_t test_ecn,
+ odp_bool_t test_drop_prec,
+ uint8_t new_dscp,
+ uint8_t dscp_mask)
+{
+ uint32_t errs = 0;
+ int rc;
+
+ /* Now call test_ip_marking once for all combinations of IPv4 or IPv6
+ * pkts AND for UDP or TCP. */
+ rc = test_ip_marking(node_name, pkt_color, USE_IPV4, USE_UDP,
+ test_ecn, test_drop_prec, new_dscp, dscp_mask);
+ CU_ASSERT(rc == 0);
+ if (rc != 0) {
+ LOG_ERR("test_ip_marking failed using IPV4/UDP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
+ pkt_color, test_ecn, test_drop_prec);
+ errs++;
+ }
+
+ rc = test_ip_marking(node_name, pkt_color, USE_IPV6, USE_UDP,
+ test_ecn, test_drop_prec, new_dscp, dscp_mask);
+ CU_ASSERT(rc == 0);
+ if (rc != 0) {
+ LOG_ERR("test_ip_marking failed using IPV6/UDP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
+ pkt_color, test_ecn, test_drop_prec);
+ errs++;
+ }
+
+ rc = test_ip_marking(node_name, pkt_color, USE_IPV4, USE_TCP,
+ test_ecn, test_drop_prec, new_dscp, dscp_mask);
+ CU_ASSERT(rc == 0);
+ if (rc != 0) {
+ LOG_ERR("test_ip_marking failed using IPV4/TCP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
+ pkt_color, test_ecn, test_drop_prec);
+ errs++;
+ }
+
+ rc = test_ip_marking(node_name, pkt_color, USE_IPV6, USE_TCP,
+ test_ecn, test_drop_prec, new_dscp, dscp_mask);
+ CU_ASSERT(rc == 0);
+ if (rc != 0) {
+ LOG_ERR("test_ip_marking failed using IPV6/TCP pkts color=%u "
+ "test_ecn=%u test_drop_prec=%u\n",
+ pkt_color, test_ecn, test_drop_prec);
+ errs++;
+ }
+
+ return (errs == 0) ? 0 : -1;
+}
+
+static int ip_marking_tests(const char *node_name,
+ odp_bool_t test_ecn,
+ odp_bool_t test_drop_prec)
+{
+ odp_packet_color_t color;
+ uint32_t errs = 0;
+ uint8_t new_dscp, dscp_mask;
+ int rc;
+
+ dscp_mask = DROP_PRECEDENCE_MASK;
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ if (tm_capabilities.marking_colors_supported[color]) {
+ if (color == PKT_YELLOW)
+ new_dscp = MEDIUM_DROP_PRECEDENCE;
+ else if (color == PKT_RED)
+ new_dscp = HIGH_DROP_PRECEDENCE;
+ else
+ new_dscp = LOW_DROP_PRECEDENCE;
+
+ rc = test_protocol_marking(node_name, color, test_ecn,
+ test_drop_prec, new_dscp,
+ dscp_mask);
+ CU_ASSERT(rc == 0);
+ if (rc != 0)
+ errs++;
+ }
+ }
+
+ return (errs == 0) ? 0 : -1;
+}
+
+static int walk_tree_backwards(odp_tm_node_t tm_node)
+{
+ odp_tm_node_fanin_info_t fanin_info;
+ odp_tm_node_info_t node_info;
+ odp_tm_queue_t first_tm_queue;
+ odp_tm_node_t first_tm_node;
+ uint32_t tm_queue_fanin, tm_node_fanin;
+ int rc;
+
+ /* Start from the given tm_node and try to go backwards until a valid
+ * and active tm_queue is reached. */
+ rc = odp_tm_node_info(tm_node, &node_info);
+ if (rc != 0) {
+ LOG_ERR("odp_tm_node_info failed for tm_node=0x%" PRIX64 "\n",
+ tm_node);
+ return rc;
+ }
+
+ if ((node_info.tm_queue_fanin == 0) &&
+ (node_info.tm_node_fanin == 0)) {
+ LOG_ERR("odp_tm_node_info showed no fanin for this node\n");
+ return -1;
+ }
+
+ fanin_info.tm_queue = ODP_TM_INVALID;
+ fanin_info.tm_node = ODP_TM_INVALID;
+ fanin_info.is_last = false;
+
+ /* TBD* Loop over the entire fanin list verifying the fanin counts.
+ * Also remember the first tm_queue and tm_node seen. */
+ tm_queue_fanin = 0;
+ tm_node_fanin = 0;
+ first_tm_queue = ODP_TM_INVALID;
+ first_tm_node = ODP_TM_INVALID;
+
+ while (!fanin_info.is_last) {
+ rc = odp_tm_node_fanin_info(tm_node, &fanin_info);
+ if (rc != 0)
+ return rc;
+
+ if ((fanin_info.tm_queue != ODP_TM_INVALID) &&
+ (fanin_info.tm_node != ODP_TM_INVALID)) {
+ LOG_ERR("Both tm_queue and tm_node are set\n");
+ return -1;
+ } else if (fanin_info.tm_queue != ODP_TM_INVALID) {
+ tm_queue_fanin++;
+ if (first_tm_queue == ODP_TM_INVALID)
+ first_tm_queue = fanin_info.tm_queue;
+ } else if (fanin_info.tm_node != ODP_TM_INVALID) {
+ tm_node_fanin++;
+ if (first_tm_node == ODP_TM_INVALID)
+ first_tm_node = fanin_info.tm_node;
+ } else {
+ LOG_ERR("both tm_queue and tm_node are INVALID\n");
+ return -1;
+ }
+ }
+
+ if (tm_queue_fanin != node_info.tm_queue_fanin)
+ LOG_ERR("tm_queue_fanin count error\n");
+ else if (tm_node_fanin != node_info.tm_node_fanin)
+ LOG_ERR("tm_node_fanin count error\n");
+
+ /* If we have found a tm_queue then we are successfully done. */
+ if (first_tm_queue != ODP_TM_INVALID)
+ return 0;
+
+ /* Now recurse up a level */
+ return walk_tree_backwards(first_tm_node);
+}
+
+static int test_fanin_info(const char *node_name)
+{
+ tm_node_desc_t *node_desc;
+ odp_tm_node_t tm_node;
+
+ node_desc = find_node_desc(0, node_name);
+ if (node_desc == NULL) {
+ LOG_ERR("node_name %s not found\n", node_name);
+ return -1;
+ }
+
+ tm_node = node_desc->node;
+ if (tm_node == ODP_TM_INVALID) {
+ LOG_ERR("tm_node is ODP_TM_INVALID\n");
+ return -1;
+ }
+
+ return walk_tree_backwards(node_desc->node);
+}
+
+void traffic_mngr_test_capabilities(void)
+{
+ CU_ASSERT(test_overall_capabilities() == 0);
+}
+
+void traffic_mngr_test_tm_create(void)
+{
+ /* Create the first/primary TM system. */
+ CU_ASSERT_FATAL(create_tm_system() == 0);
+ dump_tm_tree(0);
+}
+
+void traffic_mngr_test_shaper(void)
+{
+ CU_ASSERT(test_shaper_bw("bw1", "node_1_1_1", 0, 1 * MBPS) == 0);
+ CU_ASSERT(test_shaper_bw("bw4", "node_1_1_1", 1, 4 * MBPS) == 0);
+ CU_ASSERT(test_shaper_bw("bw10", "node_1_1_1", 2, 10 * MBPS) == 0);
+ CU_ASSERT(test_shaper_bw("bw40", "node_1_1_1", 3, 40 * MBPS) == 0);
+ CU_ASSERT(test_shaper_bw("bw100", "node_1_1_2", 0, 100 * MBPS) == 0);
+}
+
+void traffic_mngr_test_scheduler(void)
+{
+ CU_ASSERT(test_sched_queue_priority("que_prio", "node_1_1_3", 10) == 0);
+ return;
+
+ /* The following tests are not quite ready for production use. */
+ CU_ASSERT(test_sched_node_priority("node_prio", "node_1_3", 4) == 0);
+
+ CU_ASSERT(test_sched_wfq("sched_rr", "shaper_rr", "node_1_3",
+ ODP_TM_FRAME_BASED_WEIGHTS,
+ EQUAL_WEIGHTS) == 0);
+ CU_ASSERT(test_sched_wfq("sched_wrr", "shaper_wrr", "node_1_3",
+ ODP_TM_FRAME_BASED_WEIGHTS,
+ INCREASING_WEIGHTS) == 0);
+ CU_ASSERT(test_sched_wfq("sched_wfq", "shaper_wfq", "node_1_3",
+ ODP_TM_BYTE_BASED_WEIGHTS,
+ INCREASING_WEIGHTS) == 0);
+}
+
+void traffic_mngr_test_thresholds(void)
+{
+ CU_ASSERT(test_threshold("thresh_A", "shaper_A", "node_1_2_1", 0,
+ 16, 0) == 0);
+ CU_ASSERT(test_threshold("thresh_B", "shaper_B", "node_1_2_1", 1,
+ 0, 6400) == 0);
+}
+
+void traffic_mngr_test_byte_wred(void)
+{
+ if (!tm_capabilities.tm_queue_wred_supported) {
+ LOG_DBG("\nwas not run because tm_capabilities indicates"
+ " no WRED support\n");
+ return;
+ }
+
+ CU_ASSERT(test_byte_wred("byte_wred_30G", "byte_bw_30G",
+ "byte_thresh_30G", "node_1_3_1", 1,
+ ODP_PACKET_GREEN, TM_PERCENT(30), true) == 0);
+ CU_ASSERT(test_byte_wred("byte_wred_50Y", "byte_bw_50Y",
+ "byte_thresh_50Y", "node_1_3_1", 2,
+ ODP_PACKET_YELLOW, TM_PERCENT(50), true) == 0);
+ CU_ASSERT(test_byte_wred("byte_wred_70R", "byte_bw_70R",
+ "byte_thresh_70R", "node_1_3_1", 3,
+ ODP_PACKET_RED, TM_PERCENT(70), true) == 0);
+
+ CU_ASSERT(test_byte_wred("byte_wred_40G", "byte_bw_40G",
+ "byte_thresh_40G", "node_1_3_1", 1,
+ ODP_PACKET_GREEN, TM_PERCENT(30), false) == 0);
+}
+
+void traffic_mngr_test_pkt_wred(void)
+{
+ int rc;
+
+ if (!tm_capabilities.tm_queue_wred_supported) {
+ LOG_DBG("\ntest_pkt_wred was not run because tm_capabilities "
+ "indicates no WRED support\n");
+ return;
+ }
+
+ CU_ASSERT(test_pkt_wred("pkt_wred_40G", "pkt_bw_40G",
+ "pkt_thresh_40G", "node_1_3_2", 1,
+ ODP_PACKET_GREEN, TM_PERCENT(30), false) == 0);
+
+ if (!tm_capabilities.tm_queue_dual_slope_supported) {
+ LOG_DBG("since tm_capabilities indicates no dual slope "
+ "WRED support these tests are skipped.\n");
+ return;
+ }
+
+ rc = test_pkt_wred("pkt_wred_30G", "pkt_bw_30G",
+ "pkt_thresh_30G", "node_1_3_2", 1,
+ ODP_PACKET_GREEN, TM_PERCENT(30), true);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(test_pkt_wred("pkt_wred_50Y", "pkt_bw_50Y",
+ "pkt_thresh_50Y", "node_1_3_2", 2,
+ ODP_PACKET_YELLOW, TM_PERCENT(50), true) == 0);
+ CU_ASSERT(test_pkt_wred("pkt_wred_70R", "pkt_bw_70R",
+ "pkt_thresh_70R", "node_1_3_2", 3,
+ ODP_PACKET_RED, TM_PERCENT(70), true) == 0);
+}
+
+void traffic_mngr_test_query(void)
+{
+ CU_ASSERT(test_query_functions("query_shaper", "node_1_3_3", 3, 10)
+ == 0);
+}
+
+void traffic_mngr_test_marking(void)
+{
+ odp_packet_color_t color;
+ odp_bool_t test_ecn, test_drop_prec;
+ int rc;
+
+ if (tm_capabilities.vlan_marking_supported) {
+ for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+ rc = test_vlan_marking("node_1_3_1", color);
+ CU_ASSERT(rc == 0);
+ }
+ } else {
+ LOG_DBG("\ntest_vlan_marking was not run because "
+ "tm_capabilities indicates no vlan marking support\n");
+ }
+
+ if (tm_capabilities.ecn_marking_supported) {
+ test_ecn = true;
+ test_drop_prec = false;
+
+ rc = ip_marking_tests("node_1_3_2", test_ecn, test_drop_prec);
+ CU_ASSERT(rc == 0);
+ } else {
+ LOG_DBG("\necn_marking tests were not run because "
+ "tm_capabilities indicates no ecn marking support\n");
+ }
+
+ if (tm_capabilities.drop_prec_marking_supported) {
+ test_ecn = false;
+ test_drop_prec = true;
+
+ rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
+ CU_ASSERT(rc == 0);
+ } else {
+ LOG_DBG("\ndrop_prec marking tests were not run because "
+ "tm_capabilities indicates no drop precedence "
+ "marking support\n");
+ }
+
+ if (tm_capabilities.ecn_marking_supported &&
+ tm_capabilities.drop_prec_marking_supported) {
+ test_ecn = true;
+ test_drop_prec = true;
+
+ rc = ip_marking_tests("node_1_4_2", test_ecn, test_drop_prec);
+ CU_ASSERT(rc == 0);
+ }
+}
+
+void traffic_mngr_test_fanin_info(void)
+{
+ CU_ASSERT(test_fanin_info("node_1") == 0);
+ CU_ASSERT(test_fanin_info("node_1_2") == 0);
+ CU_ASSERT(test_fanin_info("node_1_3_7") == 0);
+}
+
+void traffic_mngr_test_destroy(void)
+{
+ CU_ASSERT(destroy_tm_systems() == 0);
+}
+
+odp_testinfo_t traffic_mngr_suite[] = {
+ ODP_TEST_INFO(traffic_mngr_test_capabilities),
+ ODP_TEST_INFO(traffic_mngr_test_tm_create),
+ ODP_TEST_INFO(traffic_mngr_test_shaper_profile),
+ ODP_TEST_INFO(traffic_mngr_test_sched_profile),
+ ODP_TEST_INFO(traffic_mngr_test_threshold_profile),
+ ODP_TEST_INFO(traffic_mngr_test_wred_profile),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_shaper,
+ traffic_mngr_check_shaper),
+ ODP_TEST_INFO_CONDITIONAL(traffic_mngr_test_scheduler,
+ traffic_mngr_check_scheduler),
+ ODP_TEST_INFO(traffic_mngr_test_thresholds),
+ ODP_TEST_INFO(traffic_mngr_test_byte_wred),
+ ODP_TEST_INFO(traffic_mngr_test_pkt_wred),
+ ODP_TEST_INFO(traffic_mngr_test_query),
+ ODP_TEST_INFO(traffic_mngr_test_marking),
+ ODP_TEST_INFO(traffic_mngr_test_fanin_info),
+ ODP_TEST_INFO(traffic_mngr_test_destroy),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t traffic_mngr_suites[] = {
+ { "traffic_mngr tests", traffic_mngr_suite_init,
+ traffic_mngr_suite_term, traffic_mngr_suite },
+ ODP_SUITE_INFO_NULL
+};
+
+int traffic_mngr_main(int argc, char *argv[])
+{
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ int ret = odp_cunit_register(traffic_mngr_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h
new file mode 100644
index 000000000..af115fef7
--- /dev/null
+++ b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_TRAFFIC_MNGR_H_
+#define _ODP_TEST_TRAFFIC_MNGR_H_
+
+#include <odp_cunit_common.h>
+
+int traffic_mngr_check_shaper(void);
+int traffic_mngr_check_scheduler(void);
+
+/* test functions: */
+void traffic_mngr_test_capabilities(void);
+void traffic_mngr_test_tm_create(void);
+void traffic_mngr_test_shaper_profile(void);
+void traffic_mngr_test_sched_profile(void);
+void traffic_mngr_test_threshold_profile(void);
+void traffic_mngr_test_wred_profile(void);
+void traffic_mngr_test_shaper(void);
+void traffic_mngr_test_scheduler(void);
+void traffic_mngr_test_thresholds(void);
+void traffic_mngr_test_byte_wred(void);
+void traffic_mngr_test_pkt_wred(void);
+void traffic_mngr_test_query(void);
+void traffic_mngr_test_marking(void);
+void traffic_mngr_test_fanin_info(void);
+void traffic_mngr_test_destroy(void);
+
+/* test arrays: */
+extern odp_testinfo_t traffic_mngr_suite[];
+
+/* test suite init/term functions: */
+int traffic_mngr_suite_init(void);
+int traffic_mngr_suite_term(void);
+
+/* test registry: */
+extern odp_suiteinfo_t traffic_mngr_suites[];
+
+/* main test program: */
+int traffic_mngr_main(int argc, char *argv[]);
+
+#endif
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c b/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c
new file mode 100644
index 000000000..1fc1f78d7
--- /dev/null
+++ b/test/common_plat/validation/api/traffic_mngr/traffic_mngr_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "traffic_mngr.h"
+
+int main(int argc, char *argv[])
+{
+ return traffic_mngr_main(argc, argv);
+}