aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2022-09-08 13:46:05 +0300
committerMatias Elo <matias.elo@nokia.com>2022-09-08 13:46:05 +0300
commitac602aa3978f5ac88e5247e64a6c437bce2002bf (patch)
tree3f55c04e4d70ba8c72f0939edb42ceb813d7c077 /test
parent3b9d1514e55955953786211f1fdec8e97f3688c7 (diff)
parentaca79b03168b462c7197a02dae08f815f2f975a4 (diff)
Merge tag 'v1.37.2.0' of https://github.com/OpenDataPlane/odp into odp-dpdk
Signed-off-by: Matias Elo <matias.elo@nokia.com>
Diffstat (limited to 'test')
-rw-r--r--test/common/odp_cunit_common.c202
-rw-r--r--test/common/odp_cunit_common.h58
-rw-r--r--test/performance/odp_l2fwd.c37
-rw-r--r--test/performance/odp_packet_gen.c18
-rw-r--r--test/performance/odp_pktio_perf.c38
-rw-r--r--test/performance/odp_sched_latency.c4
-rw-r--r--test/performance/odp_scheduling.c3
-rw-r--r--test/validation/api/Makefile.am1
-rw-r--r--test/validation/api/atomic/atomic.c12
-rw-r--r--test/validation/api/barrier/barrier.c79
-rw-r--r--test/validation/api/cpumask/cpumask.c168
-rw-r--r--test/validation/api/init/.gitignore1
-rw-r--r--test/validation/api/init/Makefile.am5
-rw-r--r--test/validation/api/init/init_main.c2
-rw-r--r--test/validation/api/lock/lock.c82
-rw-r--r--test/validation/api/pktio/pktio.c8
-rw-r--r--test/validation/api/pool/pool.c65
-rw-r--r--test/validation/api/queue/queue.c21
-rw-r--r--test/validation/api/scheduler/scheduler.c377
-rw-r--r--test/validation/api/shmem/shmem.c92
-rw-r--r--test/validation/api/stash/stash.c74
-rw-r--r--test/validation/api/thread/thread.c38
-rw-r--r--test/validation/api/time/time.c130
-rw-r--r--test/validation/api/timer/timer.c38
24 files changed, 1133 insertions, 420 deletions
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index c6e03dab2..dbc6cf3d8 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -1,14 +1,17 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2022, Nokia
* Copyright (c) 2021, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#define _GNU_SOURCE
+
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <sys/mman.h>
#include <odp_api.h>
#include "odp_cunit_common.h"
#include <odp/helper/odph_api.h>
@@ -28,9 +31,11 @@
/* Globals */
static int allow_skip_result;
-static odph_thread_t thread_tbl[MAX_WORKERS];
+static odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+static int threads_running;
static odp_instance_t instance;
static char *progname;
+static int (*thread_func)(void *);
/*
* global init/term functions which may be registered
@@ -45,47 +50,205 @@ static struct {
static odp_suiteinfo_t *global_testsuites;
-/** create test thread */
-int odp_cunit_thread_create(int func_ptr(void *), pthrd_arg *arg)
+#define MAX_STR_LEN 256
+#define MAX_FAILURES 10
+
+/* Recorded assertion failure for later CUnit call in the initial thread */
+typedef struct assertion_failure_t {
+ char cond[MAX_STR_LEN];
+ char file[MAX_STR_LEN];
+ unsigned int line;
+ int fatal;
+} assertion_failure_t;
+
+typedef struct thr_global_t {
+ assertion_failure_t failure[MAX_FAILURES];
+ unsigned long num_failures;
+} thr_global_t;
+
+static thr_global_t *thr_global;
+
+static __thread int initial_thread = 1; /* Are we the initial thread? */
+static __thread jmp_buf longjmp_env;
+
+void odp_cu_assert(CU_BOOL value, unsigned int line,
+ const char *condition, const char *file, CU_BOOL fatal)
+{
+ unsigned long idx;
+
+ if (initial_thread) {
+ CU_assertImplementation(value, line, condition, file, "", fatal);
+ return;
+ }
+
+ /* Assertion ok, just return */
+ if (value)
+ return;
+
+ /*
+ * Non-initial thread/process cannot call CUnit assert because:
+ *
+ * - CU_assertImplementation() is not thread-safe
+ * - In process mode an assertion failure would be lost because it
+ * would not be recorded in the memory of the initial process.
+ * - Fatal asserts in CUnit perform longjmp which cannot be done in
+ * an other thread or process that did the setjmp.
+ *
+ * --> Record the assertion failure in shared memory so that it can be
+ * processed later in the context of the initial thread/process.
+ * --> In fatal assert, longjmp within the current thread.
+ */
+
+ idx = __atomic_fetch_add(&thr_global->num_failures, 1, __ATOMIC_RELAXED);
+
+ if (idx < MAX_FAILURES) {
+ assertion_failure_t *a = &thr_global->failure[idx];
+
+ strncpy(a->cond, condition, sizeof(a->cond));
+ strncpy(a->file, file, sizeof(a->file));
+ a->cond[sizeof(a->cond) - 1] = 0;
+ a->file[sizeof(a->file) - 1] = 0;
+ a->line = line;
+ a->fatal = fatal;
+ }
+
+ if (fatal)
+ longjmp(longjmp_env, 1);
+}
+
+static void handle_postponed_asserts(void)
+{
+ unsigned long num = thr_global->num_failures;
+
+ if (num > MAX_FAILURES)
+ num = MAX_FAILURES;
+
+ for (unsigned long n = 0; n < num; n++) {
+ assertion_failure_t *a = &thr_global->failure[n];
+
+ /*
+ * Turn fatal failures into non-fatal failures as we are just
+ * reporting them. Threads that saw fatal failures which
+ * prevented them from continuing have already been stopped.
+ */
+ CU_assertImplementation(0, a->line, a->cond, a->file, "", CU_FALSE);
+ }
+ thr_global->num_failures = 0;
+}
+
+static int threads_init(void)
+{
+ static int initialized;
+
+ if (initialized)
+ return 0;
+
+ /*
+ * Use shared memory mapping for the global structure to make it
+ * visible in the child processes if running in process mode.
+ */
+ thr_global = mmap(NULL, sizeof(thr_global_t),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ if (thr_global == MAP_FAILED)
+ return -1;
+
+ initialized = 1;
+ return 0;
+}
+
+static int run_thread(void *arg)
{
+ int rc;
+
+ /* Make sure this is zero also in process mode "threads" */
+ initial_thread = 0;
+
+ if (setjmp(longjmp_env) == 0) {
+ /* Normal return, proceed to the thread function. */
+ rc = (*thread_func)(arg);
+ } else {
+ /*
+ * Return from longjmp done by the thread function.
+ * We return 0 here since odph_thread_join() does not like
+ * nonzero exit statuses.
+ */
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], int priv)
+{
+ int i, ret;
odp_cpumask_t cpumask;
odph_thread_common_param_t thr_common;
- int ret;
- int num = arg->numthrds;
- odph_thread_param_t thr_param;
+ odph_thread_param_t thr_param[num];
+
+ if (num > ODP_THREAD_COUNT_MAX) {
+ fprintf(stderr, "error: %s: too many threads: num=%d max=%d\n", __func__,
+ num, ODP_THREAD_COUNT_MAX);
+ return -1;
+ }
+
+ if (threads_running) {
+ /* thread_tbl is already in use */
+ fprintf(stderr, "error: %s: threads already running\n", __func__);
+ return -1;
+ }
+
+ thread_func = func_ptr;
odph_thread_common_param_init(&thr_common);
- odph_thread_param_init(&thr_param);
- thr_param.start = func_ptr;
- thr_param.arg = arg;
- thr_param.thr_type = ODP_THREAD_WORKER;
+ if (arg == NULL)
+ priv = 0;
+
+ for (i = 0; i < num; i++) {
+ odph_thread_param_init(&thr_param[i]);
+
+ thr_param[i].start = run_thread;
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+
+ if (arg)
+ thr_param[i].arg = arg[i];
+ else
+ thr_param[i].arg = NULL;
+
+ if (priv == 0)
+ break;
+ }
odp_cpumask_default_worker(&cpumask, num);
thr_common.instance = instance;
thr_common.cpumask = &cpumask;
- thr_common.share_param = 1;
+ thr_common.share_param = !priv;
/* Create and start additional threads */
- ret = odph_thread_create(thread_tbl, &thr_common, &thr_param, num);
+ ret = odph_thread_create(thread_tbl, &thr_common, thr_param, num);
if (ret != num)
fprintf(stderr, "error: odph_thread_create() failed.\n");
+ threads_running = (ret > 0);
+
return ret;
}
-/** exit from test thread */
-int odp_cunit_thread_exit(pthrd_arg *arg)
+int odp_cunit_thread_join(int num)
{
- int num = arg->numthrds;
-
- /* Wait for other threads to exit */
+ /* Wait for threads to exit */
if (odph_thread_join(thread_tbl, num) != num) {
fprintf(stderr, "error: odph_thread_join() failed.\n");
return -1;
}
+ threads_running = 0;
+ thread_func = 0;
+
+ handle_postponed_asserts();
return 0;
}
@@ -500,6 +663,9 @@ int odp_cunit_update(odp_suiteinfo_t testsuites[])
*/
int odp_cunit_register(odp_suiteinfo_t testsuites[])
{
+ if (threads_init())
+ return -1;
+
/* call test executable init hook, if any */
if (global_init_term.global_init_ptr) {
if ((*global_init_term.global_init_ptr)(&instance) == 0) {
diff --git a/test/common/odp_cunit_common.h b/test/common/odp_cunit_common.h
index f290fd670..5959163d3 100644
--- a/test/common/odp_cunit_common.h
+++ b/test/common/odp_cunit_common.h
@@ -20,8 +20,6 @@
#include <CUnit/Basic.h>
#include <odp_api.h>
-#define MAX_WORKERS 32 /**< Maximum number of work threads */
-
typedef int (*cunit_test_check_active)(void);
typedef struct {
@@ -67,14 +65,6 @@ typedef struct {
uint32_t bar;
} test_shared_data_t;
-/**
- * Thread argument
- */
-typedef struct {
- int testcase; /**< specifies which set of API's to exercise */
- int numthrds; /**< no of pthreads to create */
-} pthrd_arg;
-
/* parse parameters that affect the behaviour of odp_cunit_common */
int odp_cunit_parse_options(int argc, char *argv[]);
/* register suites to be run via odp_cunit_run() */
@@ -84,9 +74,16 @@ int odp_cunit_update(odp_suiteinfo_t testsuites[]);
/* the function, called by module main(), to run the testsuites: */
int odp_cunit_run(void);
-/** create thread for start_routine function (which returns 0 on success) */
-int odp_cunit_thread_create(int func_ptr(void *), pthrd_arg *arg);
-int odp_cunit_thread_exit(pthrd_arg *);
+/* Create threads for a validation test
+ *
+ * Thread arguments table (arg[]) can be set to NULL, when there are no arguments.
+ * When 'priv' is 0, the same argument pointer (arg[0]) is passed to all threads. Otherwise,
+ * a pointer is passed (from arg[]) to each thread. Returns 0 on success.
+ */
+int odp_cunit_thread_create(int num, int func_ptr(void *arg), void *const arg[], int priv);
+
+/* Wait for previously created threads to exit */
+int odp_cunit_thread_join(int num);
/**
* Global tests initialization/termination.
@@ -111,6 +108,9 @@ int odp_cunit_set_inactive(void);
/* Check from CI_SKIP environment variable if the test case should be skipped by CI */
int odp_cunit_ci_skip(const char *test_name);
+void odp_cu_assert(CU_BOOL value, unsigned int line,
+ const char *condition, const char *file, CU_BOOL fatal);
+
/*
* Wrapper for CU_assertImplementation for the fatal asserts to show the
* compiler and static analyzers that the function does not return if the
@@ -120,7 +120,7 @@ int odp_cunit_ci_skip(const char *test_name);
static inline void odp_cu_assert_fatal(CU_BOOL value, unsigned int line,
const char *condition, const char *file)
{
- CU_assertImplementation(value, line, condition, file, "", CU_TRUE);
+ odp_cu_assert(value, line, condition, file, CU_TRUE);
if (!value) {
/* not reached */
@@ -135,31 +135,61 @@ static inline void odp_cu_assert_fatal(CU_BOOL value, unsigned int line,
* compatibility with CU and existing code that assumes this kind of macros.
*/
+#undef CU_ASSERT
+#define CU_ASSERT(value) \
+ { odp_cu_assert((value), __LINE__, #value, __FILE__, CU_FALSE); }
+
#undef CU_ASSERT_FATAL
#define CU_ASSERT_FATAL(value) \
{ odp_cu_assert_fatal((value), __LINE__, #value, __FILE__); }
+#undef CU_FAIL
+#define CU_FAIL(msg) \
+ { odp_cu_assert(CU_FALSE, __LINE__, ("CU_FAIL(" #msg ")"), __FILE__, CU_FALSE); }
+
#undef CU_FAIL_FATAL
#define CU_FAIL_FATAL(msg) \
{ odp_cu_assert_fatal(CU_FALSE, __LINE__, ("CU_FAIL_FATAL(" #msg ")"), __FILE__); }
+#undef CU_ASSERT_EQUAL
+#define CU_ASSERT_EQUAL(actual, expected) \
+ { odp_cu_assert(((actual) == (expected)), __LINE__, \
+ ("CU_ASSERT_EQUAL(" #actual "," #expected ")"), \
+ __FILE__, CU_FALSE); }
+
#undef CU_ASSERT_EQUAL_FATAL
#define CU_ASSERT_EQUAL_FATAL(actual, expected) \
{ odp_cu_assert_fatal(((actual) == (expected)), __LINE__, \
("CU_ASSERT_EQUAL_FATAL(" #actual "," #expected ")"), \
__FILE__); }
+#undef CU_ASSERT_NOT_EQUAL
+#define CU_ASSERT_NOT_EQUAL(actual, expected) \
+ { odp_cu_assert(((actual) != (expected)), __LINE__, \
+ ("CU_ASSERT_NOT_EQUAL(" #actual "," #expected ")"), \
+ __FILE__, CU_FALSE); }
+
#undef CU_ASSERT_NOT_EQUAL_FATAL
#define CU_ASSERT_NOT_EQUAL_FATAL(actual, expected) \
{ odp_cu_assert_fatal(((actual) != (expected)), __LINE__, \
("CU_ASSERT_NOT_EQUAL_FATAL(" #actual "," #expected ")"), \
__FILE__); }
+#undef CU_ASSERT_PTR_NULL
+#define CU_ASSERT_PTR_NULL(value) \
+ { odp_cu_assert((NULL == (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NULL(" #value ")"), __FILE__, CU_FALSE); }
+
#undef CU_ASSERT_PTR_NULL_FATAL
#define CU_ASSERT_PTR_NULL_FATAL(value) \
{ odp_cu_assert_fatal((NULL == (const void *)(value)), __LINE__, \
("CU_ASSERT_PTR_NULL_FATAL(" #value ")"), __FILE__); }
+#undef CU_ASSERT_PTR_NOT_NULL
+#define CU_ASSERT_PTR_NOT_NULL(value) \
+ { odp_cu_assert((NULL != (const void *)(value)), __LINE__, \
+ ("CU_ASSERT_PTR_NOT_NULL_FATAL(" #value ")"), __FILE__, CU_FALSE); }
+
#undef CU_ASSERT_PTR_NOT_NULL_FATAL
#define CU_ASSERT_PTR_NOT_NULL_FATAL(value) \
{ odp_cu_assert_fatal((NULL != (const void *)(value)), __LINE__, \
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 1833dcf78..9ba0d775a 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -105,6 +105,7 @@ typedef struct {
int num_groups; /* Number of scheduling groups */
int group_mode; /* How threads join groups */
int burst_rx; /* Receive burst size */
+ int rx_queues; /* RX queues per interface */
int pool_per_if; /* Create pool per interface */
uint32_t num_pkt; /* Number of packets per pool */
bool vector_mode; /* Vector mode enabled */
@@ -345,7 +346,8 @@ static inline int copy_packets(odp_packet_t *pkt_tbl, int pkts)
/*
* Return number of packets remaining in the pkt_tbl
*/
-static int process_extra_features(odp_packet_t *pkt_tbl, int pkts, stats_t *stats)
+static inline int process_extra_features(odp_packet_t *pkt_tbl, int pkts,
+ stats_t *stats)
{
if (odp_unlikely(gbl_args->appl.extra_feat)) {
if (gbl_args->appl.packet_copy) {
@@ -376,12 +378,12 @@ static int process_extra_features(odp_packet_t *pkt_tbl, int pkts, stats_t *stat
return pkts;
}
-static void send_packets(odp_packet_t *pkt_tbl,
- int pkts,
- int use_event_queue,
- odp_queue_t tx_queue,
- odp_pktout_queue_t pktout_queue,
- stats_t *stats)
+static inline void send_packets(odp_packet_t *pkt_tbl,
+ int pkts,
+ int use_event_queue,
+ odp_queue_t tx_queue,
+ odp_pktout_queue_t pktout_queue,
+ stats_t *stats)
{
int sent;
unsigned int tx_drops;
@@ -1056,12 +1058,15 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
}
if (num_rx > (int)pktio_capa.max_input_queues) {
- printf("Sharing %i input queues between %i workers\n",
- pktio_capa.max_input_queues, num_rx);
num_rx = pktio_capa.max_input_queues;
mode_rx = ODP_PKTIO_OP_MT;
+ printf("Maximum number of input queues: %i\n", num_rx);
}
+ if (num_rx < gbl_args->appl.num_workers)
+ printf("Sharing %i input queues between %i workers\n",
+ num_rx, gbl_args->appl.num_workers);
+
if (num_tx > (int)pktio_capa.max_output_queues) {
printf("Sharing %i output queues between %i workers\n",
pktio_capa.max_output_queues, num_tx);
@@ -1069,7 +1074,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
mode_tx = ODP_PKTIO_OP_MT;
}
- pktin_param.hash_enable = (num_rx > 1) ? 1 : 0;
+ pktin_param.hash_enable = (num_rx > 1 || gbl_args->appl.flow_aware) ? 1 : 0;
pktin_param.hash_proto.proto.ipv4_udp = 1;
pktin_param.num_queues = num_rx;
pktin_param.op_mode = mode_rx;
@@ -1481,6 +1486,8 @@ static void usage(char *progname)
" used by default.\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
+ " -q, --rx_queues <num> Number of RX queues per interface in scheduler mode\n"
+ " 0: RX queue per worker CPU (default)\n"
" -p, --packet_copy 0: Don't copy packet (default)\n"
" 1: Create and send copy of the received packet.\n"
" Free the original packet.\n"
@@ -1537,6 +1544,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"group_mode", required_argument, NULL, 'G'},
{"prio", required_argument, NULL, 'I'},
{"burst_rx", required_argument, NULL, 'b'},
+ {"rx_queues", required_argument, NULL, 'q'},
{"packet_copy", required_argument, NULL, 'p'},
{"pool_per_if", required_argument, NULL, 'y'},
{"num_pkt", required_argument, NULL, 'n'},
@@ -1554,7 +1562,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:b:p:y:n:l:L:w:x:z:M:uPfvh";
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:b:q:p:y:n:l:L:w:x:z:M:uPfvh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1566,6 +1574,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->error_check = 0; /* don't check packet errors by default */
appl_args->packet_copy = 0;
appl_args->burst_rx = 0;
+ appl_args->rx_queues = 0;
appl_args->verbose = 0;
appl_args->chksum = 0; /* don't use checksum offload by default */
appl_args->pool_per_if = 0;
@@ -1745,6 +1754,9 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'b':
appl_args->burst_rx = atoi(optarg);
break;
+ case 'q':
+ appl_args->rx_queues = atoi(optarg);
+ break;
case 'p':
appl_args->packet_copy = atoi(optarg);
break;
@@ -1869,6 +1881,7 @@ static void print_info(void)
printf("Flow aware: %s\n", appl_args->flow_aware ?
"yes" : "no");
printf("Burst size: %i\n", appl_args->burst_rx);
+ printf("RX queues per IF: %i\n", appl_args->rx_queues);
printf("Number of pools: %i\n", appl_args->pool_per_if ?
appl_args->if_count : 1);
@@ -2258,7 +2271,7 @@ int main(int argc, char *argv[])
odp_schedule_group_t grp;
/* A queue per worker in scheduled mode */
- num_rx = num_workers;
+ num_rx = gbl_args->appl.rx_queues > 0 ? gbl_args->appl.rx_queues : num_workers;
num_tx = num_workers;
if (!gbl_args->appl.sched_mode) {
diff --git a/test/performance/odp_packet_gen.c b/test/performance/odp_packet_gen.c
index 1407887e4..28a3789a0 100644
--- a/test/performance/odp_packet_gen.c
+++ b/test/performance/odp_packet_gen.c
@@ -61,6 +61,7 @@ typedef struct test_options_t {
uint16_t udp_src;
uint16_t udp_dst;
uint32_t wait_sec;
+ uint32_t wait_start_sec;
uint32_t mtu;
odp_bool_t promisc_mode;
@@ -194,6 +195,7 @@ static void print_usage(void)
" -h, --help This help\n"
" -w, --wait <sec> Wait up to <sec> seconds for network links to be up.\n"
" Default: 0 (don't check link status)\n"
+ " -W, --wait_start <sec> Wait <sec> seconds before starting traffic. Default: 0\n"
"\n");
}
@@ -268,12 +270,13 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{"mtu", required_argument, NULL, 'M'},
{"quit", required_argument, NULL, 'q'},
{"wait", required_argument, NULL, 'w'},
+ {"wait_start", required_argument, NULL, 'W'},
{"update_stat", required_argument, NULL, 'u'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:Ph";
+ static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:W:Ph";
test_options->num_pktio = 0;
test_options->num_rx = 1;
@@ -305,6 +308,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
test_options->quit = 0;
test_options->update_msec = 0;
test_options->wait_sec = 0;
+ test_options->wait_start_sec = 0;
test_options->mtu = 0;
for (i = 0; i < MAX_PKTIOS; i++) {
@@ -478,6 +482,9 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
case 'w':
test_options->wait_sec = atoi(optarg);
break;
+ case 'W':
+ test_options->wait_start_sec = atoi(optarg);
+ break;
case 'h':
/* fall through */
default:
@@ -931,11 +938,8 @@ static int start_pktios(test_global_t *global)
global->pktio[i].started = 1;
}
- if (!test_options->wait_sec)
- return 0;
-
/* Wait until all links are up */
- for (i = 0; i < num_pktio; i++) {
+ for (i = 0; test_options->wait_sec && i < num_pktio; i++) {
while (1) {
odp_pktio_t pktio = global->pktio[i].pktio;
@@ -957,6 +961,10 @@ static int start_pktios(test_global_t *global)
odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
}
}
+
+ if (test_options->wait_start_sec)
+ odp_time_wait_ns(test_options->wait_start_sec * ODP_TIME_SEC_IN_NS);
+
return 0;
}
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 2638d7e6b..06620fd27 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -91,26 +91,16 @@ typedef struct {
int num_ifaces;
} test_args_t;
-struct rx_stats_s {
+typedef struct ODP_ALIGNED_CACHE {
uint64_t rx_cnt; /* Valid packets received */
uint64_t rx_ignore; /* Ignored packets */
-};
-
-typedef union rx_stats_u {
- struct rx_stats_s s;
- uint8_t pad[CACHE_ALIGN_ROUNDUP(sizeof(struct rx_stats_s))];
} pkt_rx_stats_t;
-struct tx_stats_s {
+typedef struct ODP_ALIGNED_CACHE {
uint64_t tx_cnt; /* Packets transmitted */
uint64_t alloc_failures;/* Packet allocation failures */
uint64_t enq_failures; /* Enqueue failures */
odp_time_t idle_ticks; /* Idle ticks count in TX loop */
-};
-
-typedef union tx_stats_u {
- struct tx_stats_s s;
- uint8_t pad[CACHE_ALIGN_ROUNDUP(sizeof(struct tx_stats_s))];
} pkt_tx_stats_t;
/* Test global variables */
@@ -348,8 +338,8 @@ static int run_thread_tx(void *arg)
if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) {
odp_time_t diff = odp_time_diff(cur_time, idle_start);
- stats->s.idle_ticks =
- odp_time_sum(diff, stats->s.idle_ticks);
+ stats->idle_ticks =
+ odp_time_sum(diff, stats->idle_ticks);
idle_start = ODP_TIME_NULL;
}
@@ -358,12 +348,12 @@ static int run_thread_tx(void *arg)
alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts);
if (alloc_cnt != batch_len)
- stats->s.alloc_failures++;
+ stats->alloc_failures++;
tx_cnt = send_packets(pktout, tx_packet, alloc_cnt);
unsent_pkts = alloc_cnt - tx_cnt;
- stats->s.enq_failures += unsent_pkts;
- stats->s.tx_cnt += tx_cnt;
+ stats->enq_failures += unsent_pkts;
+ stats->tx_cnt += tx_cnt;
cur_time = odp_time_local();
}
@@ -371,9 +361,9 @@ static int run_thread_tx(void *arg)
if (gbl_args->args.verbose)
printf(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
" AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
- thr_id, stats->s.tx_cnt, stats->s.enq_failures,
- stats->s.alloc_failures,
- odp_time_to_ns(stats->s.idle_ticks) /
+ thr_id, stats->tx_cnt, stats->enq_failures,
+ stats->alloc_failures,
+ odp_time_to_ns(stats->idle_ticks) /
(uint64_t)ODP_TIME_MSEC_IN_NS);
return 0;
@@ -442,9 +432,9 @@ static int run_thread_rx(void *arg)
if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
pkt = odp_packet_from_event(ev[i]);
if (pktio_pkt_has_magic(pkt))
- stats->s.rx_cnt++;
+ stats->rx_cnt++;
else
- stats->s.rx_ignore++;
+ stats->rx_ignore++;
}
odp_event_free(ev[i]);
}
@@ -473,8 +463,8 @@ static int process_results(uint64_t expected_tx_cnt,
int len = 0;
for (i = 0; i < odp_thread_count_max(); ++i) {
- rx_pkts += gbl_args->rx_stats[i].s.rx_cnt;
- tx_pkts += gbl_args->tx_stats[i].s.tx_cnt;
+ rx_pkts += gbl_args->rx_stats[i].rx_cnt;
+ tx_pkts += gbl_args->tx_stats[i].tx_cnt;
}
if (rx_pkts == 0) {
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index 0894a403d..d4cbfda19 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -106,10 +106,8 @@ typedef struct {
} test_stat_t;
/** Performance test statistics (per core) */
-typedef union ODP_ALIGNED_CACHE {
+typedef struct ODP_ALIGNED_CACHE {
test_stat_t prio[NUM_PRIOS]; /**< Test statistics per priority */
-
- uint8_t pad[CACHE_ALIGN_ROUNDUP(NUM_PRIOS * sizeof(test_stat_t))];
} core_stat_t;
/** Test global variables */
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
index 0e45b5e4b..520c0fb70 100644
--- a/test/performance/odp_scheduling.c
+++ b/test/performance/odp_scheduling.c
@@ -53,9 +53,6 @@ typedef struct {
typedef struct ODP_ALIGNED_CACHE {
uint64_t num_ev;
-
- /* Round up the struct size to cache line size */
- uint8_t pad[ODP_CACHE_LINE_SIZE - sizeof(uint64_t)];
} queue_context_t;
/** Test global variables */
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
index c1a2539bc..91e600577 100644
--- a/test/validation/api/Makefile.am
+++ b/test/validation/api/Makefile.am
@@ -53,6 +53,7 @@ TESTS = \
init/init_num_thr$(EXEEXT) \
init/init_feature_enabled$(EXEEXT) \
init/init_feature_disabled$(EXEEXT) \
+ init/init_test_param_init$(EXEEXT) \
ipsec/ipsec_sync$(EXEEXT) \
ipsec/ipsec_async$(EXEEXT) \
ipsec/ipsec_inline_in$(EXEEXT) \
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index d4b15cf32..5a1fdf11b 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -12,6 +12,8 @@
#include <odp_cunit_common.h>
#include <unistd.h>
+#define MAX_WORKERS 32
+
#define ADD_SUB_CNT 5
#define CNT 100000ULL
@@ -969,7 +971,6 @@ static int atomic_init(odp_instance_t *inst)
{
uint32_t workers_count, max_threads;
int ret = 0;
- odp_cpumask_t mask;
odp_init_t init_param;
odph_helper_options_t helper_options;
@@ -1002,7 +1003,7 @@ static int atomic_init(odp_instance_t *inst)
global_mem->g_num_threads = MAX_WORKERS;
- workers_count = odp_cpumask_default_worker(&mask, 0);
+ workers_count = odp_cpumask_default_worker(NULL, 0);
max_threads = (workers_count >= MAX_WORKERS) ?
MAX_WORKERS : workers_count;
@@ -1271,13 +1272,12 @@ static int test_atomic_non_relaxed_thread(void *arg UNUSED)
static void test_atomic_functional(int test_fn(void *), void validate_fn(void))
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
test_atomic_init();
test_atomic_store();
- odp_cunit_thread_create(test_fn, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, test_fn, NULL, 0);
+ odp_cunit_thread_join(num);
validate_fn();
}
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index aa0ca90b3..710947997 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -12,6 +13,7 @@
#include <unistd.h>
#define VERBOSE 0
+#define MAX_WORKERS 32
#define MAX_ITERATIONS 1000
#define BARRIER_ITERATIONS 64
@@ -296,38 +298,10 @@ static void barrier_test_memory_barrier(void)
temp_result = a + b + c + d;
}
-static void barrier_test_no_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(no_barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-static void barrier_test_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-odp_testinfo_t barrier_suite_barrier[] = {
- ODP_TEST_INFO(barrier_test_memory_barrier),
- ODP_TEST_INFO(barrier_test_no_barrier_functional),
- ODP_TEST_INFO(barrier_test_barrier_functional),
- ODP_TEST_INFO_NULL
-};
-
static int barrier_init(odp_instance_t *inst)
{
uint32_t workers_count, max_threads;
int ret = 0;
- odp_cpumask_t mask;
odp_init_t init_param;
odph_helper_options_t helper_options;
@@ -362,7 +336,7 @@ static int barrier_init(odp_instance_t *inst)
global_mem->g_iterations = MAX_ITERATIONS;
global_mem->g_verbose = VERBOSE;
- workers_count = odp_cpumask_default_worker(&mask, 0);
+ workers_count = odp_cpumask_default_worker(NULL, 0);
max_threads = (workers_count >= MAX_WORKERS) ?
MAX_WORKERS : workers_count;
@@ -404,6 +378,53 @@ static int barrier_term(odp_instance_t inst)
return 0;
}
+static void barrier_single_thread(void)
+{
+ odp_barrier_t barrier;
+
+ odp_barrier_init(&barrier, 1);
+
+ printf(" Calling wait...");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 1");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 2");
+
+ odp_barrier_wait(&barrier);
+
+ printf(" 3. ");
+}
+
+static void barrier_test_no_barrier_functional(void)
+{
+ int num = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(num, no_barrier_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
+}
+
+static void barrier_test_barrier_functional(void)
+{
+ int num = global_mem->g_num_threads;
+
+ barrier_test_init();
+ odp_cunit_thread_create(num, barrier_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
+}
+
+odp_testinfo_t barrier_suite_barrier[] = {
+ ODP_TEST_INFO(barrier_test_memory_barrier),
+ ODP_TEST_INFO(barrier_single_thread),
+ ODP_TEST_INFO(barrier_test_no_barrier_functional),
+ ODP_TEST_INFO(barrier_test_barrier_functional),
+ ODP_TEST_INFO_NULL
+};
+
odp_suiteinfo_t barrier_suites[] = {
{"barrier", NULL, NULL,
barrier_suite_barrier},
diff --git a/test/validation/api/cpumask/cpumask.c b/test/validation/api/cpumask/cpumask.c
index 0983bb4b5..b358a5be6 100644
--- a/test/validation/api/cpumask/cpumask.c
+++ b/test/validation/api/cpumask/cpumask.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
+ * Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,82 +10,150 @@
#include "odp_cunit_common.h"
#include "mask_common.h"
-/* default worker parameter to get all that may be available */
-#define ALL_AVAILABLE 0
+static int cpumask_max_count(void)
+{
+ odp_cpumask_t mask;
+
+ odp_cpumask_setall(&mask);
+
+ return odp_cpumask_count(&mask);
+}
static void cpumask_test_odp_cpumask_def_control(void)
{
- unsigned int num, max_num;
- unsigned int mask_count;
- unsigned int max_cpus = mask_capacity();
odp_cpumask_t mask;
+ int num, count, all;
+ int max = cpumask_max_count();
+ int request = 7;
+
+ CU_ASSERT_FATAL(max > 1);
+
+ if (request > max)
+ request = max - 1;
- num = odp_cpumask_default_control(&mask, ALL_AVAILABLE);
- mask_count = odp_cpumask_count(&mask);
+ all = odp_cpumask_default_control(&mask, 0);
+ num = all;
+ count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num);
CU_ASSERT(num > 0);
- CU_ASSERT(num <= max_cpus);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
- max_num = odp_cpumask_default_control(&mask, max_cpus);
- mask_count = odp_cpumask_count(&mask);
+ num = odp_cpumask_default_control(&mask, max);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+ CU_ASSERT(num == all);
+
+ num = odp_cpumask_default_control(&mask, 1);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num == 1);
+ CU_ASSERT(num == count);
+
+ num = odp_cpumask_default_control(&mask, request);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= request);
+ CU_ASSERT(num == count);
- CU_ASSERT(max_num > 0);
- CU_ASSERT(max_num == mask_count);
- CU_ASSERT(max_num <= max_cpus);
- CU_ASSERT(max_num <= num);
+ CU_ASSERT(odp_cpumask_default_control(NULL, request) == num);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 0) == all);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 1) == 1);
}
static void cpumask_test_odp_cpumask_def_worker(void)
{
- unsigned int num, max_num;
- unsigned int mask_count;
- unsigned int max_cpus = mask_capacity();
odp_cpumask_t mask;
+ int num, count, all;
+ int max = cpumask_max_count();
+ int request = 7;
- num = odp_cpumask_default_worker(&mask, ALL_AVAILABLE);
- mask_count = odp_cpumask_count(&mask);
+ CU_ASSERT_FATAL(max > 1);
+
+ if (request > max)
+ request = max - 1;
+
+ all = odp_cpumask_default_worker(&mask, 0);
+ num = all;
+ count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num);
CU_ASSERT(num > 0);
- CU_ASSERT(num <= max_cpus);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+
+ num = odp_cpumask_default_worker(&mask, max);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num == count);
+ CU_ASSERT(num <= max);
+ CU_ASSERT(num == all);
+
+ num = odp_cpumask_default_worker(&mask, 1);
+ count = odp_cpumask_count(&mask);
+
+ CU_ASSERT(num == 1);
+ CU_ASSERT(num == count);
- max_num = odp_cpumask_default_worker(&mask, max_cpus);
- mask_count = odp_cpumask_count(&mask);
+ num = odp_cpumask_default_worker(&mask, request);
+ count = odp_cpumask_count(&mask);
- CU_ASSERT(max_num > 0);
- CU_ASSERT(max_num == mask_count);
- CU_ASSERT(max_num <= max_cpus);
- CU_ASSERT(max_num <= num);
+ CU_ASSERT(num > 0);
+ CU_ASSERT(num <= request);
+ CU_ASSERT(num == count);
+
+ CU_ASSERT(odp_cpumask_default_worker(NULL, request) == num);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 0) == all);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 1) == 1);
}
static void cpumask_test_odp_cpumask_def(void)
{
- unsigned mask_count;
- unsigned num_worker;
- unsigned num_control;
- unsigned max_cpus = mask_capacity();
- unsigned available_cpus = odp_cpu_count();
- unsigned requested_cpus;
- odp_cpumask_t mask;
-
- CU_ASSERT(available_cpus <= max_cpus);
+ odp_cpumask_t mask, all_mask, overlap;
+ int count, all, num_worker, num_control, request;
+ int max = cpumask_max_count();
+ int cpu_count = odp_cpu_count();
+
+ all = odp_cpumask_all_available(&all_mask);
+ count = odp_cpumask_count(&all_mask);
+
+ CU_ASSERT_FATAL(cpu_count > 0);
+ CU_ASSERT_FATAL(all > 0);
+ CU_ASSERT(all == cpu_count);
+ CU_ASSERT(all <= max);
+ CU_ASSERT(all == count);
+
+ request = all - 1;
+ if (request == 0)
+ request = 1;
+
+ num_worker = odp_cpumask_default_worker(&mask, request);
+ count = odp_cpumask_count(&mask);
+ CU_ASSERT(num_worker > 0);
+ CU_ASSERT(num_worker <= request);
+ CU_ASSERT(num_worker == count);
- if (available_cpus > 1)
- requested_cpus = available_cpus - 1;
- else
- requested_cpus = available_cpus;
- num_worker = odp_cpumask_default_worker(&mask, requested_cpus);
- mask_count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num_worker);
+ /* Check that CPUs are in the all CPUs mask */
+ odp_cpumask_zero(&overlap);
+ odp_cpumask_and(&overlap, &mask, &all_mask);
+ CU_ASSERT(odp_cpumask_count(&overlap) == num_worker);
num_control = odp_cpumask_default_control(&mask, 1);
- mask_count = odp_cpumask_count(&mask);
- CU_ASSERT(mask_count == num_control);
+ count = odp_cpumask_count(&mask);
+ CU_ASSERT(num_control == 1);
+ CU_ASSERT(num_control == count);
- CU_ASSERT(num_control >= 1);
- CU_ASSERT(num_worker <= available_cpus);
- CU_ASSERT(num_worker > 0);
+ odp_cpumask_zero(&overlap);
+ odp_cpumask_and(&overlap, &mask, &all_mask);
+ CU_ASSERT(odp_cpumask_count(&overlap) == num_control);
+
+ CU_ASSERT(odp_cpumask_default_worker(NULL, request) == num_worker);
+ CU_ASSERT(odp_cpumask_default_worker(NULL, 0) <= all);
+ CU_ASSERT(odp_cpumask_default_control(NULL, 0) <= all);
}
odp_testinfo_t cpumask_suite[] = {
diff --git a/test/validation/api/init/.gitignore b/test/validation/api/init/.gitignore
index 5001771bf..e312d2cf6 100644
--- a/test/validation/api/init/.gitignore
+++ b/test/validation/api/init/.gitignore
@@ -5,3 +5,4 @@ init_log_thread
init_num_thr
init_feature_enabled
init_feature_disabled
+init_test_param_init
diff --git a/test/validation/api/init/Makefile.am b/test/validation/api/init/Makefile.am
index 479e3a71b..1ddf1dd0d 100644
--- a/test/validation/api/init/Makefile.am
+++ b/test/validation/api/init/Makefile.am
@@ -3,7 +3,8 @@ include ../Makefile.inc
# Keep init test cases in separate binaries. Some implementations may not allow
# the same application process to call odp_init_global() multiple times.
test_PROGRAMS = init_defaults init_abort init_log init_num_thr \
- init_feature_enabled init_feature_disabled init_log_thread
+ init_feature_enabled init_feature_disabled init_log_thread \
+ init_test_param_init
init_defaults_CPPFLAGS = -DINIT_TEST=0 $(AM_CPPFLAGS)
init_abort_CPPFLAGS = -DINIT_TEST=1 $(AM_CPPFLAGS)
@@ -12,6 +13,7 @@ init_num_thr_CPPFLAGS = -DINIT_TEST=3 $(AM_CPPFLAGS)
init_feature_enabled_CPPFLAGS = -DINIT_TEST=4 $(AM_CPPFLAGS)
init_feature_disabled_CPPFLAGS = -DINIT_TEST=5 $(AM_CPPFLAGS)
init_log_thread_CPPFLAGS = -DINIT_TEST=6 $(AM_CPPFLAGS)
+init_test_param_init_CPPFLAGS = -DINIT_TEST=7 $(AM_CPPFLAGS)
init_defaults_SOURCES = init_main.c
init_abort_SOURCES = init_main.c
@@ -20,3 +22,4 @@ init_num_thr_SOURCES = init_main.c
init_feature_enabled_SOURCES = init_main.c
init_feature_disabled_SOURCES = init_main.c
init_log_thread_SOURCES = init_main.c
+init_test_param_init_SOURCES = init_main.c
diff --git a/test/validation/api/init/init_main.c b/test/validation/api/init/init_main.c
index f8fd96aeb..5cbaf72eb 100644
--- a/test/validation/api/init/init_main.c
+++ b/test/validation/api/init/init_main.c
@@ -242,7 +242,6 @@ static void init_test_feature_disabled(void)
}
odp_testinfo_t testinfo[] = {
- ODP_TEST_INFO(init_test_param_init),
ODP_TEST_INFO(init_test_defaults),
ODP_TEST_INFO(init_test_abort),
ODP_TEST_INFO(init_test_log),
@@ -250,6 +249,7 @@ odp_testinfo_t testinfo[] = {
ODP_TEST_INFO(init_test_feature_enabled),
ODP_TEST_INFO(init_test_feature_disabled),
ODP_TEST_INFO(init_test_log_thread),
+ ODP_TEST_INFO(init_test_param_init),
};
odp_testinfo_t init_suite[] = {
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index 394cd820a..c5e07c776 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -13,6 +13,7 @@
#define VERBOSE 0
+#define MAX_WORKERS 32
#define MIN_ITERATIONS 1000
#define MAX_ITERATIONS 30000
#define ITER_MPLY_FACTOR 3
@@ -1004,11 +1005,10 @@ static int rwlock_recursive_functional_test(void *arg UNUSED)
/* Thread-unsafe tests */
static void lock_test_no_lock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(no_lock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, no_lock_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_no_locking[] = {
@@ -1019,40 +1019,36 @@ odp_testinfo_t lock_suite_no_locking[] = {
/* Spin lock tests */
static void lock_test_spinlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(spinlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_api_tests, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_spinlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_spinlock_init(&global_mem->global_spinlock);
- odp_cunit_thread_create(spinlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_spinlock_recursive_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(spinlock_recursive_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_recursive_api_tests, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_spinlock_recursive_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_spinlock_recursive_init(&global_mem->global_recursive_spinlock);
- odp_cunit_thread_create(spinlock_recursive_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, spinlock_recursive_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_spinlock[] = {
@@ -1070,22 +1066,19 @@ odp_testinfo_t lock_suite_spinlock_recursive[] = {
/* Ticket lock tests */
static void lock_test_ticketlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(ticketlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, ticketlock_api_tests, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_ticketlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_ticketlock_init(&global_mem->global_ticketlock);
-
- odp_cunit_thread_create(ticketlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, ticketlock_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_ticketlock[] = {
@@ -1097,21 +1090,19 @@ odp_testinfo_t lock_suite_ticketlock[] = {
/* RW lock tests */
static void lock_test_rwlock_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(rwlock_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_api_tests, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_rwlock_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_rwlock_init(&global_mem->global_rwlock);
- odp_cunit_thread_create(rwlock_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_rwlock[] = {
@@ -1122,21 +1113,19 @@ odp_testinfo_t lock_suite_rwlock[] = {
static void lock_test_rwlock_recursive_api(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
- odp_cunit_thread_create(rwlock_recursive_api_tests, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_recursive_api_tests, NULL, 0);
+ odp_cunit_thread_join(num);
}
static void lock_test_rwlock_recursive_functional(void)
{
- pthrd_arg arg;
+ int num = global_mem->g_num_threads;
- arg.numthrds = global_mem->g_num_threads;
odp_rwlock_recursive_init(&global_mem->global_recursive_rwlock);
- odp_cunit_thread_create(rwlock_recursive_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
+ odp_cunit_thread_create(num, rwlock_recursive_functional_test, NULL, 0);
+ odp_cunit_thread_join(num);
}
odp_testinfo_t lock_suite_rwlock_recursive[] = {
@@ -1161,7 +1150,6 @@ static int lock_init(odp_instance_t *inst)
{
uint32_t workers_count, max_threads;
int ret = 0;
- odp_cpumask_t mask;
odp_init_t init_param;
odph_helper_options_t helper_options;
@@ -1196,7 +1184,7 @@ static int lock_init(odp_instance_t *inst)
global_mem->g_iterations = 0; /* tuned by first test */
global_mem->g_verbose = VERBOSE;
- workers_count = odp_cpumask_default_worker(&mask, 0);
+ workers_count = odp_cpumask_default_worker(NULL, 0);
max_threads = (workers_count >= MAX_WORKERS) ?
MAX_WORKERS : workers_count;
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 9a33fd983..84fad32b6 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -1495,11 +1495,17 @@ static void pktio_test_promisc(void)
odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
ret = odp_pktio_promisc_mode(pktio);
CU_ASSERT(ret >= 0);
+ CU_ASSERT(ret == 0 || ret == 1);
+
+ if (capa.set_op.op.promisc_mode) {
+ /* Disabled by default */
+ CU_ASSERT(ret == 0);
+ }
- CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0);
if (!capa.set_op.op.promisc_mode) {
printf("promiscuous mode not supported\n");
ret = odp_pktio_close(pktio);
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 3befe4939..c5f57d17f 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -11,6 +11,8 @@
#include "test_common_macros.h"
#include <odp/helper/odph_api.h>
+#define MAX_WORKERS 32
+
#define BUF_SIZE 1500
#define BUF_NUM 1000
#define TMO_NUM 1000
@@ -895,8 +897,7 @@ static int run_pool_test_create_after_fork(void *arg ODP_UNUSED)
static void pool_test_create_after_fork(void)
{
odp_shm_t shm;
- odp_cpumask_t unused;
- pthrd_arg thrdarg;
+ int num;
/* No single VA required since reserve is done before fork */
shm = odp_shm_reserve(NULL, sizeof(global_shared_mem_t), 0, 0);
@@ -904,17 +905,17 @@ static void pool_test_create_after_fork(void)
global_mem = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL_FATAL(global_mem);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- global_mem->nb_threads = thrdarg.numthrds;
+ global_mem->nb_threads = num;
global_mem->pool = ODP_POOL_INVALID;
- odp_barrier_init(&global_mem->init_barrier, thrdarg.numthrds + 1);
+ odp_barrier_init(&global_mem->init_barrier, num + 1);
odp_atomic_init_u32(&global_mem->index, 0);
/* Fork here */
- odp_cunit_thread_create(run_pool_test_create_after_fork, &thrdarg);
+ odp_cunit_thread_create(num, run_pool_test_create_after_fork, NULL, 0);
/* Wait until thread 0 has created the test pool */
odp_barrier_wait(&global_mem->init_barrier);
@@ -922,7 +923,7 @@ static void pool_test_create_after_fork(void)
buffer_alloc_loop(global_mem->pool, BUF_NUM, BUF_SIZE);
/* Wait for all thread endings */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
CU_ASSERT(!odp_pool_destroy(global_mem->pool));
@@ -1091,6 +1092,11 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
odp_pool_stats_opt_t supported;
uint32_t i, j, num_pool, num_obj, cache_size;
uint32_t max_pools = 2;
+ uint16_t first = 0;
+ uint16_t last = ODP_POOL_MAX_THREAD_STATS - 1;
+
+ if (last > odp_thread_count_max() - 1)
+ last = odp_thread_count_max() - 1;
odp_pool_param_init(&param);
@@ -1168,6 +1174,8 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
uint32_t num_fails = 0;
CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+ stats.thread.first = first;
+ stats.thread.last = last;
CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
CU_ASSERT(stats.available <= num_obj);
@@ -1178,10 +1186,17 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
CU_ASSERT(stats.cache_available <= num_obj);
CU_ASSERT(stats.cache_alloc_ops == 0);
CU_ASSERT(stats.cache_free_ops == 0);
+ CU_ASSERT(stats.thread.first == first);
+ CU_ASSERT(stats.thread.last == last);
+ for (j = 0; j < ODP_POOL_MAX_THREAD_STATS; j++)
+ CU_ASSERT(stats.thread.cache_available[j] <= stats.cache_available);
/* Allocate the events */
for (j = 0; j < num_alloc_rounds; j++) {
odp_event_t new_event = ODP_EVENT_INVALID;
+ uint64_t total_cached = 0;
+ uint16_t first_id = 0;
+ uint16_t last_id = last;
if (pool_type == ODP_POOL_BUFFER) {
odp_buffer_t buf = odp_buffer_alloc(pool[i]);
@@ -1213,16 +1228,42 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
CU_ASSERT(stats.available <= num_obj - num_events);
CU_ASSERT(stats.cache_available <= num_obj - num_events);
+
+ while (first_id < odp_thread_count_max()) {
+ stats.thread.first = first_id;
+ stats.thread.last = last_id;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ for (int i = 0; i < ODP_POOL_MAX_THREAD_STATS; i++) {
+ uint64_t cached = stats.thread.cache_available[i];
+
+ CU_ASSERT(cached <= num_obj - num_events);
+ total_cached += cached;
+ }
+ first_id = last_id + 1;
+ last_id += ODP_POOL_MAX_THREAD_STATS;
+ if (last_id >= odp_thread_count_max())
+ last_id = odp_thread_count_max() - 1;
+ };
+
+ if (supported.bit.cache_available && supported.bit.thread_cache_available &&
+ ODP_POOL_MAX_THREAD_STATS >= odp_thread_count_max())
+ CU_ASSERT(stats.cache_available == total_cached);
}
CU_ASSERT(num_events == num_obj);
num_event[i] = num_events;
+ stats.thread.first = first;
+ stats.thread.last = last;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
/* All events are allocated, available count in pool and pool
* local caches should be zero. */
- CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
CU_ASSERT(stats.available == 0);
CU_ASSERT(stats.cache_available == 0);
+ for (j = 0; j < ODP_POOL_MAX_THREAD_STATS; j++)
+ CU_ASSERT(stats.thread.cache_available[j] == 0);
if (supported.bit.alloc_ops)
CU_ASSERT(stats.alloc_ops > 0 && stats.alloc_ops <= num_obj + 1);
if (supported.bit.alloc_fails)
@@ -1236,6 +1277,8 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
for (i = 0; i < num_pool; i++) {
odp_event_free_multi(event[i], num_event[i]);
+ stats.thread.first = odp_thread_id();
+ stats.thread.last = odp_thread_id();
CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
if (supported.bit.available && supported.bit.cache_available)
@@ -1255,6 +1298,8 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
printf(" cache_available: %" PRIu64 "\n", stats.cache_available);
printf(" cache_alloc_ops: %" PRIu64 "\n", stats.cache_alloc_ops);
printf(" cache_free_ops: %" PRIu64 "\n", stats.cache_free_ops);
+ printf(" thread.cache_available[0]: %" PRIu64 "\n",
+ stats.thread.cache_available[0]);
}
CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index cd5e030d3..8f1278f61 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -8,6 +8,7 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
+#define MAX_WORKERS 32
#define BURST_SIZE (8)
#define MAX_NUM_EVENT (1 * 1024)
#define MAX_ITERATION (100)
@@ -17,7 +18,6 @@
#define ENQ_RETRIES 100
typedef struct {
- pthrd_arg cu_thr;
int num_workers;
odp_barrier_t barrier;
odp_queue_t queue;
@@ -60,7 +60,6 @@ static int queue_suite_init(void)
test_globals_t *globals;
odp_pool_param_t params;
int num_workers;
- odp_cpumask_t mask;
shm = odp_shm_reserve(GLOBALS_NAME, sizeof(test_globals_t),
ODP_CACHE_LINE_SIZE, 0);
@@ -73,7 +72,7 @@ static int queue_suite_init(void)
globals = odp_shm_addr(shm);
memset(globals, 0, sizeof(test_globals_t));
- num_workers = odp_cpumask_default_worker(&mask, 0);
+ num_workers = odp_cpumask_default_worker(NULL, 0);
if (num_workers > MAX_WORKERS)
num_workers = MAX_WORKERS;
@@ -494,6 +493,7 @@ static void test_pair(odp_nonblocking_t nonblocking,
odp_event_t ev;
odp_shm_t shm;
test_globals_t *globals;
+ void *arg;
shm = odp_shm_lookup(GLOBALS_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
@@ -546,14 +546,14 @@ static void test_pair(odp_nonblocking_t nonblocking,
odp_atomic_init_u32(&globals->pair.counter, 0);
/* Create one worker thread */
- globals->cu_thr.numthrds = 1;
- odp_cunit_thread_create(queue_pair_work_loop, (pthrd_arg *)globals);
+ arg = globals;
+ odp_cunit_thread_create(1, queue_pair_work_loop, &arg, 0);
/* Run this thread as the second thread */
CU_ASSERT(queue_pair_work_loop(globals) == 0);
/* Wait worker to terminate */
- odp_cunit_thread_exit((pthrd_arg *)globals);
+ odp_cunit_thread_join(1);
CU_ASSERT(globals->pair.passed_a);
CU_ASSERT(globals->pair.passed_b);
@@ -972,6 +972,8 @@ static void multithread_test(odp_nonblocking_t nonblocking)
odp_queue_capability_t capa;
uint32_t queue_size, max_size;
uint32_t num, sum, num_free, i;
+ int num_workers;
+ void *arg;
CU_ASSERT(odp_queue_capability(&capa) == 0);
@@ -1000,7 +1002,7 @@ static void multithread_test(odp_nonblocking_t nonblocking)
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
globals = odp_shm_addr(shm);
- globals->cu_thr.numthrds = globals->num_workers;
+ num_workers = globals->num_workers;
odp_queue_param_init(&qparams);
qparams.type = ODP_QUEUE_TYPE_PLAIN;
@@ -1015,10 +1017,11 @@ static void multithread_test(odp_nonblocking_t nonblocking)
CU_ASSERT(alloc_and_enqueue(queue, pool, num) == num);
- odp_cunit_thread_create(queue_test_worker, (pthrd_arg *)globals);
+ arg = globals;
+ odp_cunit_thread_create(num_workers, queue_test_worker, &arg, 0);
/* Wait for worker threads to terminate */
- odp_cunit_thread_exit((pthrd_arg *)globals);
+ odp_cunit_thread_join(num_workers);
sum = 0;
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index a9c94f5a8..878e99e07 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -9,6 +9,7 @@
#include "odp_cunit_common.h"
#include <odp/helper/odph_api.h>
+#define MAX_WORKERS 32
#define MAX_ORDERED_LOCKS 2
#define MAX_POOL_SIZE (1024 * 1024)
#define MSG_POOL_SIZE (64 * 1024)
@@ -61,6 +62,8 @@
#define SCHED_AND_PLAIN_ROUNDS 10000
#define ATOMICITY_ROUNDS 100
+#define FIFO_MAX_EVENTS 151
+
/* Test global variables */
typedef struct {
int num_workers;
@@ -93,10 +96,22 @@ typedef struct {
odp_atomic_u32_t helper_ready;
odp_atomic_u32_t helper_active;
} order_wait;
+ struct {
+ odp_barrier_t barrier;
+ int multi;
+ odp_queue_t queue;
+ odp_pool_t pool;
+ uint32_t num_events;
+ uint32_t num_enq;
+ uint32_t burst;
+ odp_atomic_u32_t cur_thr;
+ uint16_t num_thr;
+ odp_event_t event[FIFO_MAX_EVENTS];
+ } fifo;
+
} test_globals_t;
typedef struct {
- pthrd_arg cu_thr;
test_globals_t *globals;
odp_schedule_sync_t sync;
int num_queues;
@@ -1340,7 +1355,8 @@ static void chaos_run(unsigned int qtype)
test_globals_t *globals;
thread_args_t *args;
odp_shm_t shm;
- int i, rc;
+ int i, rc, num_thr;
+ void *arg_ptr;
odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
@@ -1403,14 +1419,15 @@ static void chaos_run(unsigned int qtype)
}
/* Test runs also on the main thread */
- args->cu_thr.numthrds = globals->num_workers - 1;
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_create(chaos_thread, &args->cu_thr);
+ num_thr = globals->num_workers - 1;
+ arg_ptr = args;
+ if (num_thr > 0)
+ odp_cunit_thread_create(num_thr, chaos_thread, &arg_ptr, 0);
chaos_thread(args);
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_exit(&args->cu_thr);
+ if (num_thr > 0)
+ odp_cunit_thread_join(num_thr);
if (CHAOS_DEBUG)
printf("Thread %d returning from chaos threads..cleaning up\n",
@@ -1812,6 +1829,8 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
odp_shm_t shm;
test_globals_t *globals;
thread_args_t *args;
+ void *arg_ptr;
+ int num;
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
@@ -1843,15 +1862,16 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
/* Create and launch worker threads */
/* Test runs also on the main thread */
- args->cu_thr.numthrds = globals->num_workers - 1;
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_create(schedule_common_, &args->cu_thr);
+ num = globals->num_workers - 1;
+ arg_ptr = args;
+ if (num > 0)
+ odp_cunit_thread_create(num, schedule_common_, &arg_ptr, 0);
schedule_common_(args);
/* Wait for worker threads to terminate */
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_exit(&args->cu_thr);
+ if (num > 0)
+ odp_cunit_thread_join(num);
/* Cleanup ordered queues for next pass */
if (sync == ODP_SCHED_SYNC_ORDERED)
@@ -2389,10 +2409,10 @@ static void scheduler_test_order_wait_2_threads(void)
odp_schedule_capability_t sched_capa;
odp_queue_param_t queue_param;
odp_queue_t queue;
- pthrd_arg thr_arg = {.numthrds = 1};
int ret;
odp_time_t start;
odp_event_t ev;
+ int num = 1;
CU_ASSERT(!odp_schedule_capability(&sched_capa));
@@ -2406,8 +2426,8 @@ static void scheduler_test_order_wait_2_threads(void)
odp_atomic_init_u32(&globals->order_wait.helper_ready, 0);
odp_atomic_init_u32(&globals->order_wait.helper_active, 0);
- ret = odp_cunit_thread_create(order_wait_helper, &thr_arg);
- CU_ASSERT_FATAL(ret == thr_arg.numthrds);
+ ret = odp_cunit_thread_create(num, order_wait_helper, NULL, 0);
+ CU_ASSERT_FATAL(ret == num);
/* Send an event to the helper thread */
enqueue_event(queue);
@@ -2471,7 +2491,7 @@ static void scheduler_test_order_wait_2_threads(void)
CU_ASSERT(ev == ODP_EVENT_INVALID);
out:
- CU_ASSERT(odp_cunit_thread_exit(&thr_arg) == 0);
+ CU_ASSERT(odp_cunit_thread_join(num) == 0);
CU_ASSERT(odp_queue_destroy(queue) == 0);
}
@@ -2570,7 +2590,8 @@ static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync)
uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
uint32_t events_per_queue = BUFS_PER_QUEUE / 2;
uint32_t prev_seq;
- int first;
+ int first, num;
+ void *arg_ptr;
CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
CU_ASSERT_FATAL(!odp_queue_capability(&queue_capa))
@@ -2653,14 +2674,15 @@ static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync)
CU_ASSERT_FATAL(seq > 2);
/* Test runs also on the main thread */
- args->cu_thr.numthrds = globals->num_workers - 1;
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_create(sched_and_plain_thread, &args->cu_thr);
+ num = globals->num_workers - 1;
+ arg_ptr = args;
+ if (num > 0)
+ odp_cunit_thread_create(num, sched_and_plain_thread, &arg_ptr, 0);
sched_and_plain_thread(args);
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_exit(&args->cu_thr);
+ if (num > 0)
+ odp_cunit_thread_join(num);
/* Check plain queue sequence numbers and free events */
first = 1;
@@ -2719,6 +2741,285 @@ static void scheduler_test_ordered_and_plain(void)
scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ORDERED);
}
+static void scheduler_fifo_init(odp_schedule_sync_t sync, int multi, uint32_t num_thr)
+{
+ odp_queue_t queue;
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ uint32_t *seq;
+ uint32_t i;
+ odp_queue_param_t queue_param;
+ odp_pool_param_t pool_param;
+ odp_schedule_capability_t sched_capa;
+ uint32_t num_events = FIFO_MAX_EVENTS;
+
+ CU_ASSERT_FATAL(!odp_schedule_capability(&sched_capa));
+
+ /* Make sure events fit into the queue */
+ if (sched_capa.max_queue_size && num_events > sched_capa.max_queue_size)
+ num_events = sched_capa.max_queue_size;
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = sync;
+ queue_param.size = num_events;
+
+ queue = odp_queue_create("sched_fifo", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.size = 32;
+ pool_param.buf.num = num_events;
+
+ pool = odp_pool_create("sched_fifo", &pool_param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num_events; i++) {
+ buf = odp_buffer_alloc(pool);
+ if (buf == ODP_BUFFER_INVALID)
+ break;
+
+ seq = odp_buffer_addr(buf);
+ *seq = i;
+ globals->fifo.event[i] = odp_buffer_to_event(buf);
+ }
+
+ CU_ASSERT_FATAL(i == num_events);
+
+ odp_barrier_init(&globals->fifo.barrier, num_thr);
+
+ globals->fifo.multi = multi;
+ globals->fifo.queue = queue;
+ globals->fifo.pool = pool;
+ globals->fifo.num_events = num_events;
+ globals->fifo.num_enq = 0;
+ globals->fifo.burst = 0;
+ globals->fifo.num_thr = num_thr;
+ odp_atomic_init_u32(&globals->fifo.cur_thr, 0);
+}
+
+static int scheduler_fifo_test(void *arg)
+{
+ odp_queue_t from;
+ odp_buffer_t buf;
+ int ret;
+ uint32_t *seq;
+ uint32_t i, num, cur_thr;
+ uint32_t num_enq = 0;
+ uint32_t thr;
+ uint16_t num_thr = globals->fifo.num_thr;
+ uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
+ int multi = globals->fifo.multi;
+ odp_queue_t queue = globals->fifo.queue;
+ odp_pool_t pool = globals->fifo.pool;
+ uint32_t num_events = globals->fifo.num_events;
+ odp_event_t events[num_events];
+
+ /* Thread index as argument */
+ thr = (uintptr_t)arg;
+
+ odp_barrier_wait(&globals->fifo.barrier);
+
+ /* Threads enqueue events in round robin */
+ while (1) {
+ cur_thr = odp_atomic_load_acq_u32(&globals->fifo.cur_thr);
+ if (cur_thr != thr)
+ continue;
+
+ num_enq = globals->fifo.num_enq;
+
+ if (num_enq >= num_events) {
+ odp_atomic_store_u32(&globals->fifo.cur_thr, (cur_thr + 1) % num_thr);
+ break;
+ }
+
+ if (multi) {
+ num = globals->fifo.burst + 1;
+ globals->fifo.burst = num % 10;
+
+ if (num > (num_events - num_enq))
+ num = num_events - num_enq;
+
+ ret = odp_queue_enq_multi(queue, &globals->fifo.event[num_enq], num);
+ CU_ASSERT(ret > 0);
+ CU_ASSERT_FATAL(ret <= (int)num);
+ } else {
+ ret = odp_queue_enq(queue, globals->fifo.event[num_enq]);
+ CU_ASSERT(ret == 0);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ if (ret > 0)
+ globals->fifo.num_enq += ret;
+
+ odp_atomic_store_rel_u32(&globals->fifo.cur_thr, (cur_thr + 1) % num_thr);
+ }
+
+ odp_barrier_wait(&globals->fifo.barrier);
+
+ if (thr != 0)
+ return 0;
+
+ /* Thread 0 checks event order and destroys queue/pool */
+ CU_ASSERT(globals->fifo.num_enq == num_events);
+ if (globals->fifo.num_enq > num_events)
+ return -1;
+
+ num_events = globals->fifo.num_enq;
+
+ for (i = 0; i < num_events; i++)
+ events[i] = ODP_EVENT_INVALID;
+
+ num = 0;
+
+ while (1) {
+ uint32_t num_recv;
+ int max_num = 3;
+ odp_event_t ev[max_num];
+
+ from = ODP_QUEUE_INVALID;
+
+ if (multi) {
+ ret = odp_schedule_multi(&from, wait, ev, max_num);
+ CU_ASSERT_FATAL(ret >= 0 && ret <= max_num);
+
+ if (ret == 0)
+ break;
+ } else {
+ ev[0] = odp_schedule(&from, wait);
+ if (ev[0] == ODP_EVENT_INVALID)
+ break;
+
+ ret = 1;
+ }
+
+ num_recv = ret;
+ CU_ASSERT(num < num_events);
+
+ if (num >= num_events) {
+ /* Drop extra events */
+ odp_event_free_multi(ev, num_recv);
+ continue;
+ }
+
+ for (i = 0; i < num_recv; i++) {
+ CU_ASSERT(odp_event_type(ev[i]) == ODP_EVENT_BUFFER);
+ events[num] = ev[i];
+ num++;
+ }
+
+ CU_ASSERT(from == queue);
+ }
+
+ CU_ASSERT(num == num_events);
+
+ for (i = 0; i < num; i++) {
+ buf = odp_buffer_from_event(events[i]);
+ seq = odp_buffer_addr(buf);
+
+ CU_ASSERT(*seq == i);
+
+ if (*seq != i)
+ ODPH_ERR("Bad sequence number %u, expected %u\n", *seq, i);
+
+ odp_buffer_free(buf);
+ }
+
+ CU_ASSERT_FATAL(!odp_queue_destroy(queue));
+ CU_ASSERT_FATAL(!odp_pool_destroy(pool));
+
+ return 0;
+}
+
+static void scheduler_fifo_parallel_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_PARALLEL, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_parallel_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_PARALLEL, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_atomic_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ATOMIC, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_atomic_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ATOMIC, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_ordered_single(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ORDERED, 0, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_ordered_multi(void)
+{
+ scheduler_fifo_init(ODP_SCHED_SYNC_ORDERED, 1, 1);
+ scheduler_fifo_test(0);
+}
+
+static void scheduler_fifo_mt(odp_schedule_sync_t sync, int multi)
+{
+ uint32_t i;
+ uint32_t num_thr = globals->num_workers;
+ uintptr_t arg[num_thr];
+
+ scheduler_fifo_init(sync, multi, num_thr);
+
+ for (i = 0; i < num_thr; i++)
+ arg[i] = i;
+
+ if (num_thr > 1)
+ odp_cunit_thread_create(num_thr - 1, scheduler_fifo_test, (void **)&arg[1], 1);
+
+ /* Main thread runs as thread 0 */
+ scheduler_fifo_test(0);
+
+ /* Wait for worker threads to terminate */
+ if (num_thr > 1)
+ odp_cunit_thread_join(num_thr - 1);
+}
+
+static void scheduler_fifo_mt_parallel_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_PARALLEL, 0);
+}
+
+static void scheduler_fifo_mt_parallel_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_PARALLEL, 1);
+}
+
+static void scheduler_fifo_mt_atomic_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ATOMIC, 0);
+}
+
+static void scheduler_fifo_mt_atomic_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ATOMIC, 1);
+}
+
+static void scheduler_fifo_mt_ordered_single(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ORDERED, 0);
+}
+
+static void scheduler_fifo_mt_ordered_multi(void)
+{
+ scheduler_fifo_mt(ODP_SCHED_SYNC_ORDERED, 1);
+}
+
static int atomicity_test_run(void *arg)
{
thread_args_t *args = (thread_args_t *)arg;
@@ -2782,7 +3083,8 @@ static void scheduler_test_atomicity(void)
odp_pool_t pool;
odp_queue_t queue;
odp_queue_param_t queue_param;
- int i;
+ int i, num;
+ void *arg_ptr;
shm = odp_shm_lookup(GLOBALS_SHM_NAME);
CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
@@ -2821,15 +3123,16 @@ static void scheduler_test_atomicity(void)
/* Create and launch worker threads */
/* Test runs also on the main thread */
args->num_workers = globals->num_workers;
- args->cu_thr.numthrds = globals->num_workers - 1;
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_create(atomicity_test_run, &args->cu_thr);
+ num = globals->num_workers - 1;
+ arg_ptr = args;
+ if (num > 0)
+ odp_cunit_thread_create(num, atomicity_test_run, &arg_ptr, 0);
atomicity_test_run(args);
/* Wait for worker threads to terminate */
- if (args->cu_thr.numthrds > 0)
- odp_cunit_thread_exit(&args->cu_thr);
+ if (num > 0)
+ odp_cunit_thread_join(num);
odp_queue_destroy(globals->atomicity_q.handle);
}
@@ -3207,7 +3510,6 @@ static void scheduler_test_mq_mt_prio_a_print(void)
static int scheduler_test_global_init(void)
{
- odp_cpumask_t mask;
odp_shm_t shm;
thread_args_t *args;
odp_pool_t pool;
@@ -3234,7 +3536,7 @@ static int scheduler_test_global_init(void)
memset(globals, 0, sizeof(test_globals_t));
globals->shm_glb = shm;
- globals->num_workers = odp_cpumask_default_worker(&mask, 0);
+ globals->num_workers = odp_cpumask_default_worker(NULL, 0);
if (globals->num_workers > MAX_WORKERS)
globals->num_workers = MAX_WORKERS;
@@ -3306,6 +3608,9 @@ static int scheduler_test_global_init(void)
static int scheduler_multi_suite_init(void)
{
+ /* Line feeds to separate output from basic suite prints */
+ printf("\n\n");
+
if (create_queues(globals) != 0)
return -1;
@@ -3423,6 +3728,18 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_ordered),
ODP_TEST_INFO(scheduler_test_atomic_and_plain),
ODP_TEST_INFO(scheduler_test_ordered_and_plain),
+ ODP_TEST_INFO(scheduler_fifo_parallel_single),
+ ODP_TEST_INFO(scheduler_fifo_parallel_multi),
+ ODP_TEST_INFO(scheduler_fifo_atomic_single),
+ ODP_TEST_INFO(scheduler_fifo_atomic_multi),
+ ODP_TEST_INFO(scheduler_fifo_ordered_single),
+ ODP_TEST_INFO(scheduler_fifo_ordered_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_parallel_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_parallel_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_atomic_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_atomic_multi),
+ ODP_TEST_INFO(scheduler_fifo_mt_ordered_single),
+ ODP_TEST_INFO(scheduler_fifo_mt_ordered_multi),
ODP_TEST_INFO(scheduler_test_atomicity),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 3bd164350..f68021310 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -10,6 +10,7 @@
#include <odp/helper/odph_api.h>
#include <stdlib.h>
+#define MAX_WORKERS 32
#define ALIGN_SIZE (128)
#define MEM_NAME "test_shmem"
#define NAME_LEN (sizeof(MEM_NAME) + 20)
@@ -135,12 +136,10 @@ static int run_test_basic_thread(void *arg ODP_UNUSED)
*/
static void shmem_test_multi_thread(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t shm2;
shared_test_data_t *shared_test_data;
- odp_cpumask_t unused;
- int i;
+ int i, num;
char max_name[ODP_SHM_NAME_LEN];
for (i = 0; i < ODP_SHM_NAME_LEN; i++)
@@ -202,14 +201,14 @@ static void shmem_test_multi_thread(void)
shared_test_data->foo = TEST_SHARE_FOO;
shared_test_data->bar = TEST_SHARE_BAR;
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ num = odp_cpumask_default_worker(NULL, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- odp_barrier_init(&shared_test_data->test_barrier1, thrdarg.numthrds);
- odp_cunit_thread_create(run_test_basic_thread, &thrdarg);
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ odp_barrier_init(&shared_test_data->test_barrier1, num);
+ odp_cunit_thread_create(num, run_test_basic_thread, NULL, 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
odp_shm_print(shm);
@@ -541,13 +540,10 @@ static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
*/
static void shmem_test_reserve_after_fork(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t thr_shm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
- int thr_index;
- int i;
+ int thr_index, i, num;
shared_test_data_small_t *pattern_small;
shared_test_data_medium_t *pattern_medium;
shared_test_data_big_t *pattern_big;
@@ -557,27 +553,27 @@ static void shmem_test_reserve_after_fork(void)
glob_data = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier1, num + 1);
+ odp_barrier_init(&glob_data->test_barrier2, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(run_test_reserve_after_fork, &thrdarg);
+ odp_cunit_thread_create(num, run_test_reserve_after_fork, NULL, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
/* perform a lookup of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
}
/* check that the patterns are correct: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
switch (thr_index % 3) {
case 0:
pattern_small =
@@ -606,7 +602,7 @@ static void shmem_test_reserve_after_fork(void)
/*
* print the mapping address of the blocks
*/
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++)
+ for (thr_index = 0; thr_index < num; thr_index++)
printf("In main Block index: %d mapped at %p\n",
thr_index, odp_shm_addr(glob_data->shm[thr_index]));
@@ -614,13 +610,13 @@ static void shmem_test_reserve_after_fork(void)
odp_barrier_wait(&glob_data->test_barrier2);
/* at the same time, (race),free of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = glob_data->shm[thr_index];
CU_ASSERT(odp_shm_free(thr_shm) == 0);
}
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* just glob_data should remain: */
@@ -734,13 +730,10 @@ static int shmem_check_flag_single_va(void)
*/
static void shmem_test_singleva_after_fork(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t thr_shm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
- int thr_index;
- int i;
+ int thr_index, i, num;
void *address;
shared_test_data_small_t *pattern_small;
shared_test_data_medium_t *pattern_medium;
@@ -752,30 +745,30 @@ static void shmem_test_singleva_after_fork(void)
glob_data = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 3);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 3);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- glob_data->nb_threads = thrdarg.numthrds;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier3, thrdarg.numthrds + 1);
- odp_barrier_init(&glob_data->test_barrier4, thrdarg.numthrds + 1);
+ glob_data->nb_threads = num;
+ odp_barrier_init(&glob_data->test_barrier1, num + 1);
+ odp_barrier_init(&glob_data->test_barrier2, num + 1);
+ odp_barrier_init(&glob_data->test_barrier3, num + 1);
+ odp_barrier_init(&glob_data->test_barrier4, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(run_test_singleva_after_fork, &thrdarg);
+ odp_cunit_thread_create(num, run_test_singleva_after_fork, NULL, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
/* perform a lookup of all memories: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
}
/* check that the patterns are correct: */
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
switch (thr_index % 3) {
case 0:
pattern_small =
@@ -804,7 +797,7 @@ static void shmem_test_singleva_after_fork(void)
/*
* check that the mapping address is common to all (SINGLE_VA):
*/
- for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ for (thr_index = 0; thr_index < num; thr_index++) {
address = odp_shm_addr(glob_data->shm[thr_index]);
CU_ASSERT(glob_data->address[thr_index] == address);
}
@@ -819,7 +812,7 @@ static void shmem_test_singleva_after_fork(void)
odp_barrier_wait(&glob_data->test_barrier4);
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* just glob_data should remain: */
@@ -976,12 +969,11 @@ static int run_test_stress(void *arg ODP_UNUSED)
*/
static void shmem_test_stress(void)
{
- pthrd_arg thrdarg;
odp_shm_t shm;
odp_shm_t globshm;
shared_test_data_t *glob_data;
- odp_cpumask_t unused;
uint32_t i;
+ int num;
globshm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t),
0, 0);
@@ -989,12 +981,12 @@ static void shmem_test_stress(void)
glob_data = odp_shm_addr(globshm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
- if (thrdarg.numthrds > MAX_WORKERS)
- thrdarg.numthrds = MAX_WORKERS;
+ num = odp_cpumask_default_worker(NULL, 0);
+ if (num > MAX_WORKERS)
+ num = MAX_WORKERS;
- glob_data->nb_threads = thrdarg.numthrds;
- odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds);
+ glob_data->nb_threads = num;
+ odp_barrier_init(&glob_data->test_barrier1, num);
odp_spinlock_init(&glob_data->stress_lock);
/* before starting the threads, mark all entries as free: */
@@ -1002,10 +994,10 @@ static void shmem_test_stress(void)
glob_data->stress[i].state = STRESS_FREE;
/* create threads */
- odp_cunit_thread_create(run_test_stress, &thrdarg);
+ odp_cunit_thread_create(num, run_test_stress, NULL, 0);
/* wait for all thread endings: */
- CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+ CU_ASSERT(odp_cunit_thread_join(num) >= 0);
/* release left overs: */
for (i = 0; i < STRESS_SIZE; i++) {
diff --git a/test/validation/api/stash/stash.c b/test/validation/api/stash/stash.c
index 4057156c7..f1de7ec00 100644
--- a/test/validation/api/stash/stash.c
+++ b/test/validation/api/stash/stash.c
@@ -139,6 +139,9 @@ static void param_defaults(uint8_t fill)
CU_ASSERT(param.put_mode == ODP_STASH_OP_MT);
CU_ASSERT(param.get_mode == ODP_STASH_OP_MT);
CU_ASSERT(param.cache_size == 0);
+ CU_ASSERT(param.stats.all == 0);
+ CU_ASSERT(param.stats.bit.count == 0);
+ CU_ASSERT(param.stats.bit.cache_count == 0);
}
static void stash_param_defaults(void)
@@ -187,6 +190,9 @@ static void stash_create_u32(void)
printf("\n Stash handle: 0x%" PRIx64 "\n", odp_stash_to_u64(stash));
+ printf("\n--- Stash print ----\n");
+ odp_stash_print(stash);
+
lookup = odp_stash_lookup("test_stash_u32");
CU_ASSERT(lookup != ODP_STASH_INVALID);
CU_ASSERT(stash == lookup);
@@ -371,6 +377,73 @@ static void stash_create_fifo_u32_all(void)
CU_ASSERT_FATAL(odp_stash_destroy(stash[i]) == 0);
}
+static void stash_stats_u32(void)
+{
+ odp_stash_t stash;
+ odp_stash_param_t param;
+ odp_stash_stats_t stats;
+ int capa_count, capa_cache_count;
+ uint32_t i, input, output;
+ uint32_t max_num = 10;
+ uint32_t num = max_num / 2;
+ uint32_t num_put = 0;
+
+ capa_count = global.capa_default.stats.bit.count;
+ capa_cache_count = global.capa_default.stats.bit.cache_count;
+
+ odp_stash_param_init(&param);
+ param.num_obj = max_num;
+ param.obj_size = sizeof(uint32_t);
+ param.stats.bit.count = capa_count;
+ param.stats.bit.cache_count = capa_cache_count;
+
+ stash = odp_stash_create("test_stats_u32", &param);
+ CU_ASSERT_FATAL(stash != ODP_STASH_INVALID);
+
+ memset(&stats, 0xff, sizeof(odp_stash_stats_t));
+
+ CU_ASSERT_FATAL(odp_stash_stats(stash, &stats) == 0);
+ CU_ASSERT(stats.count == 0);
+ CU_ASSERT(stats.cache_count == 0);
+
+ for (i = 0; i < num; i++) {
+ input = i;
+ if (odp_stash_put_u32(stash, &input, 1) == 1)
+ num_put++;
+ }
+
+ CU_ASSERT(num_put == num);
+
+ memset(&stats, 0xff, sizeof(odp_stash_stats_t));
+
+ CU_ASSERT_FATAL(odp_stash_stats(stash, &stats) == 0);
+
+ if (capa_count) {
+ /* CU_ASSERT needs extra brackets */
+ CU_ASSERT(stats.count <= num_put);
+ } else {
+ CU_ASSERT(stats.count == 0);
+ }
+
+ if (capa_cache_count) {
+ /* CU_ASSERT needs extra brackets */
+ CU_ASSERT(stats.cache_count <= num_put);
+ } else {
+ CU_ASSERT(stats.cache_count == 0);
+ }
+
+ if (capa_count && capa_cache_count)
+ CU_ASSERT((stats.count + stats.cache_count) == num_put);
+
+ for (i = 0; i < num_put; i++) {
+ output = -1;
+ CU_ASSERT(odp_stash_get_u32(stash, &output, 1) == 1);
+ CU_ASSERT(output < num);
+ }
+
+ CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
+}
+
static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
{
odp_stash_t stash;
@@ -891,6 +964,7 @@ odp_testinfo_t stash_suite[] = {
ODP_TEST_INFO(stash_default_put_u8_n),
ODP_TEST_INFO_CONDITIONAL(stash_create_u64_all, check_support_64),
ODP_TEST_INFO(stash_create_u32_all),
+ ODP_TEST_INFO(stash_stats_u32),
ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_1, check_support_fifo_64),
ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_n, check_support_fifo_64),
ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_1, check_support_fifo_64),
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index 6499140a3..f279dd16f 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -81,20 +82,29 @@ static int thread_global_term(odp_instance_t inst)
static void thread_test_odp_cpu_id(void)
{
- (void)odp_cpu_id();
- CU_PASS();
+ CU_ASSERT(odp_cpu_id() >= 0);
}
static void thread_test_odp_thread_id(void)
{
- (void)odp_thread_id();
- CU_PASS();
+ int id = odp_thread_id();
+
+ CU_ASSERT(id >= 0);
+ CU_ASSERT(id < odp_thread_count_max());
+ CU_ASSERT(id < ODP_THREAD_COUNT_MAX);
}
static void thread_test_odp_thread_count(void)
{
- (void)odp_thread_count();
- CU_PASS();
+ int count = odp_thread_count();
+
+ /* One thread running */
+ CU_ASSERT(count == 1);
+
+ CU_ASSERT(count >= 1);
+ CU_ASSERT(count <= odp_thread_count_max());
+ CU_ASSERT(count <= ODP_THREAD_COUNT_MAX);
+ CU_ASSERT(odp_thread_count_max() <= ODP_THREAD_COUNT_MAX);
}
static int thread_func(void *arg ODP_UNUSED)
@@ -114,12 +124,12 @@ static void thread_test_odp_thrmask_worker(void)
{
odp_thrmask_t mask;
int ret;
- pthrd_arg args = { .testcase = 0, .numthrds = 1 };
+ int num = 1;
CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
- odp_barrier_init(&global_mem->bar_entry, args.numthrds + 1);
- odp_barrier_init(&global_mem->bar_exit, args.numthrds + 1);
+ odp_barrier_init(&global_mem->bar_entry, num + 1);
+ odp_barrier_init(&global_mem->bar_exit, num + 1);
/* should start out with 0 worker threads */
ret = odp_thrmask_worker(&mask);
@@ -127,10 +137,10 @@ static void thread_test_odp_thrmask_worker(void)
CU_ASSERT(ret == 0);
/* start the test thread(s) */
- ret = odp_cunit_thread_create(thread_func, &args);
- CU_ASSERT(ret == args.numthrds);
+ ret = odp_cunit_thread_create(num, thread_func, NULL, 0);
+ CU_ASSERT(ret == num);
- if (ret != args.numthrds)
+ if (ret != num)
return;
/* wait for thread(s) to start */
@@ -138,13 +148,13 @@ static void thread_test_odp_thrmask_worker(void)
ret = odp_thrmask_worker(&mask);
CU_ASSERT(ret == odp_thrmask_count(&mask));
- CU_ASSERT(ret == args.numthrds);
+ CU_ASSERT(ret == num);
CU_ASSERT(ret <= odp_thread_count_max());
/* allow thread(s) to exit */
odp_barrier_wait(&global_mem->bar_exit);
- odp_cunit_thread_exit(&args);
+ odp_cunit_thread_join(num);
}
static void thread_test_odp_thrmask_control(void)
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
index 4974dcb5d..45bfc8264 100644
--- a/test/validation/api/time/time.c
+++ b/test/validation/api/time/time.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,7 +15,6 @@
#include "odp_cunit_common.h"
#define BUSY_LOOP_CNT 30000000 /* used for t > min resolution */
-#define BUSY_LOOP_CNT_LONG 6000000000 /* used for t > 4 sec */
#define MIN_TIME_RATE 32000
#define MAX_TIME_RATE 15000000000
#define DELAY_TOLERANCE 40000000 /* deviation for delay */
@@ -139,7 +139,7 @@ static void time_test_monotony(void)
lns_t2 = odp_time_local_ns();
gns_t2 = odp_time_global_ns();
- while (count < BUSY_LOOP_CNT_LONG) {
+ while (count < BUSY_LOOP_CNT) {
count++;
};
@@ -479,19 +479,37 @@ static void time_test_wait_ns(void)
}
}
-static void time_test_accuracy(time_cb time_cur, time_from_ns_cb time_from_ns)
+/* Check that ODP time is within +-5% of system time */
+static void check_time_diff(double t_odp, double t_system,
+ const char *test, int id)
+{
+ if (t_odp > t_system * 1.05) {
+ CU_FAIL("ODP time too high");
+ fprintf(stderr, "ODP time too high (%s/%d): t_odp: %f, t_system: %f\n",
+ test, id, t_odp, t_system);
+ }
+ if (t_odp < t_system * 0.95) {
+ CU_FAIL("ODP time too low");
+ fprintf(stderr, "ODP time too low (%s/%d): t_odp: %f, t_system: %f\n",
+ test, id, t_odp, t_system);
+ }
+}
+
+static void time_test_accuracy(time_cb time_cur,
+ time_cb time_cur_strict, time_from_ns_cb time_from_ns)
{
int i;
- odp_time_t t1, t2, wait, diff;
+ odp_time_t t1[2], t2[2], wait;
struct timespec ts1, ts2, tsdiff;
- double sec_t, sec_c;
+ double sec_c;
odp_time_t sec = time_from_ns(ODP_TIME_SEC_IN_NS);
i = clock_gettime(CLOCK_MONOTONIC, &ts1);
CU_ASSERT(i == 0);
- t1 = time_cur();
+ t1[0] = time_cur_strict();
+ t1[1] = time_cur();
- wait = odp_time_sum(t1, sec);
+ wait = odp_time_sum(t1[0], sec);
for (i = 0; i < 5; i++) {
odp_time_wait_until(wait);
wait = odp_time_sum(wait, sec);
@@ -499,7 +517,8 @@ static void time_test_accuracy(time_cb time_cur, time_from_ns_cb time_from_ns)
i = clock_gettime(CLOCK_MONOTONIC, &ts2);
CU_ASSERT(i == 0);
- t2 = time_cur();
+ t2[0] = time_cur_strict();
+ t2[1] = time_cur();
if (ts2.tv_nsec < ts1.tv_nsec) {
tsdiff.tv_nsec = 1000000000L + ts2.tv_nsec - ts1.tv_nsec;
@@ -508,33 +527,49 @@ static void time_test_accuracy(time_cb time_cur, time_from_ns_cb time_from_ns)
tsdiff.tv_nsec = ts2.tv_nsec - ts1.tv_nsec;
tsdiff.tv_sec = ts2.tv_sec - ts1.tv_sec;
}
-
- diff = odp_time_diff(t2, t1);
- sec_t = ((double)odp_time_to_ns(diff)) / ODP_TIME_SEC_IN_NS;
sec_c = ((double)(tsdiff.tv_nsec) / 1000000000L) + tsdiff.tv_sec;
- /* Check that ODP time is within +-5% of system time */
- CU_ASSERT(sec_t < sec_c * 1.05);
- CU_ASSERT(sec_t > sec_c * 0.95);
+ for (i = 0; i < 2; i++) {
+ odp_time_t diff = odp_time_diff(t2[i], t1[i]);
+ double sec_t = ((double)odp_time_to_ns(diff)) / ODP_TIME_SEC_IN_NS;
+
+ check_time_diff(sec_t, sec_c, __func__, i);
+ }
}
-static void time_test_accuracy_nsec(time_nsec_cb time_nsec)
+static void time_test_local_accuracy(void)
{
- uint64_t t1, t2, diff;
+ time_test_accuracy(odp_time_local, odp_time_local_strict, odp_time_local_from_ns);
+}
+
+static void time_test_global_accuracy(void)
+{
+ time_test_accuracy(odp_time_global, odp_time_global_strict, odp_time_global_from_ns);
+}
+
+static void time_test_accuracy_nsec(void)
+{
+ uint64_t t1[4], t2[4];
struct timespec ts1, ts2, tsdiff;
- double sec_t, sec_c;
+ double sec_c;
int i, ret;
ret = clock_gettime(CLOCK_MONOTONIC, &ts1);
CU_ASSERT(ret == 0);
- t1 = time_nsec();
+ t1[0] = odp_time_global_strict_ns();
+ t1[1] = odp_time_local_strict_ns();
+ t1[2] = odp_time_global_ns();
+ t1[3] = odp_time_local_ns();
for (i = 0; i < 5; i++)
odp_time_wait_ns(ODP_TIME_SEC_IN_NS);
ret = clock_gettime(CLOCK_MONOTONIC, &ts2);
CU_ASSERT(ret == 0);
- t2 = time_nsec();
+ t2[0] = odp_time_global_strict_ns();
+ t2[1] = odp_time_local_strict_ns();
+ t2[2] = odp_time_global_ns();
+ t2[3] = odp_time_local_ns();
if (ts2.tv_nsec < ts1.tv_nsec) {
tsdiff.tv_nsec = 1000000000L + ts2.tv_nsec - ts1.tv_nsec;
@@ -543,54 +578,14 @@ static void time_test_accuracy_nsec(time_nsec_cb time_nsec)
tsdiff.tv_nsec = ts2.tv_nsec - ts1.tv_nsec;
tsdiff.tv_sec = ts2.tv_sec - ts1.tv_sec;
}
-
- diff = t2 - t1;
- sec_t = ((double)diff) / ODP_TIME_SEC_IN_NS;
sec_c = ((double)(tsdiff.tv_nsec) / 1000000000L) + tsdiff.tv_sec;
- /* Check that ODP time is within +-5% of system time */
- CU_ASSERT(sec_t < sec_c * 1.05);
- CU_ASSERT(sec_t > sec_c * 0.95);
-}
-
-static void time_test_local_accuracy(void)
-{
- time_test_accuracy(odp_time_local, odp_time_local_from_ns);
-}
-
-static void time_test_global_accuracy(void)
-{
- time_test_accuracy(odp_time_global, odp_time_global_from_ns);
-}
-
-static void time_test_local_strict_accuracy(void)
-{
- time_test_accuracy(odp_time_local_strict, odp_time_local_from_ns);
-}
-
-static void time_test_global_strict_accuracy(void)
-{
- time_test_accuracy(odp_time_global_strict, odp_time_global_from_ns);
-}
+ for (i = 0; i < 4; i++) {
+ uint64_t diff = t2[i] - t1[i];
+ double sec_t = ((double)diff) / ODP_TIME_SEC_IN_NS;
-static void time_test_local_accuracy_nsec(void)
-{
- time_test_accuracy_nsec(odp_time_local_ns);
-}
-
-static void time_test_global_accuracy_nsec(void)
-{
- time_test_accuracy_nsec(odp_time_global_ns);
-}
-
-static void time_test_local_strict_accuracy_nsec(void)
-{
- time_test_accuracy_nsec(odp_time_local_strict_ns);
-}
-
-static void time_test_global_strict_accuracy_nsec(void)
-{
- time_test_accuracy_nsec(odp_time_global_strict_ns);
+ check_time_diff(sec_t, sec_c, __func__, i);
+ }
}
odp_testinfo_t time_suite_time[] = {
@@ -611,18 +606,13 @@ odp_testinfo_t time_suite_time[] = {
ODP_TEST_INFO(time_test_global_wait_until),
ODP_TEST_INFO(time_test_local_accuracy),
ODP_TEST_INFO(time_test_global_accuracy),
- ODP_TEST_INFO(time_test_local_accuracy_nsec),
- ODP_TEST_INFO(time_test_global_accuracy_nsec),
+ ODP_TEST_INFO(time_test_accuracy_nsec),
ODP_TEST_INFO(time_test_local_strict_diff),
ODP_TEST_INFO(time_test_local_strict_sum),
ODP_TEST_INFO(time_test_local_strict_cmp),
ODP_TEST_INFO(time_test_global_strict_diff),
ODP_TEST_INFO(time_test_global_strict_sum),
ODP_TEST_INFO(time_test_global_strict_cmp),
- ODP_TEST_INFO(time_test_local_strict_accuracy),
- ODP_TEST_INFO(time_test_global_strict_accuracy),
- ODP_TEST_INFO(time_test_local_strict_accuracy_nsec),
- ODP_TEST_INFO(time_test_global_strict_accuracy_nsec),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 00ef13c89..bf0fd3baf 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -15,6 +15,8 @@
#include <odp/helper/odph_api.h>
#include "odp_cunit_common.h"
+#define MAX_WORKERS 32
+
#define GLOBAL_SHM_NAME "GlobalTimerTest"
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
@@ -51,11 +53,6 @@ struct test_timer {
uint64_t tick; /* Expiration tick or TICK_INVALID */
};
-struct thread_args {
- pthrd_arg thrdarg;
- odp_queue_type_t queue_type;
-};
-
typedef struct {
/* Clock source support flags */
uint8_t clk_supported[ODP_CLOCK_NUM_SRC];
@@ -91,6 +88,9 @@ typedef struct {
/* Periodic timers supported */
int periodic;
+ /* Queue type to be tested */
+ odp_queue_type_t test_queue_type;
+
} global_shared_mem_t;
static global_shared_mem_t *global_mem;
@@ -1824,7 +1824,7 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
/* Worker thread entrypoint which performs timer alloc/set/cancel/free
* tests */
-static int worker_entrypoint(void *arg)
+static int worker_entrypoint(void *arg ODP_UNUSED)
{
int thr = odp_thread_id();
uint32_t i, allocated;
@@ -1849,27 +1849,23 @@ static int worker_entrypoint(void *arg)
uint32_t num_timers = global_mem->timers_per_thread;
uint64_t min_tmo = global_mem->param.min_tmo;
odp_queue_param_t queue_param;
- odp_queue_type_t queue_type = ODP_QUEUE_TYPE_PLAIN;
odp_thrmask_t thr_mask;
odp_schedule_group_t group;
- struct thread_args *thr_args = arg;
uint64_t sched_tmo;
- uint64_t res_ns = global_mem->param.res_ns;
+ uint64_t res_ns = global_mem->param.res_ns;
+ odp_queue_type_t queue_type = global_mem->test_queue_type;
odp_queue_param_init(&queue_param);
- if (thr_args->queue_type == ODP_QUEUE_TYPE_PLAIN) {
- queue_param.type = ODP_QUEUE_TYPE_PLAIN;
- queue_type = ODP_QUEUE_TYPE_PLAIN;
- } else {
+ queue_param.type = queue_type;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED) {
odp_thrmask_zero(&thr_mask);
odp_thrmask_set(&thr_mask, odp_thread_id());
group = odp_schedule_group_create(NULL, &thr_mask);
if (group == ODP_SCHED_GROUP_INVALID)
CU_FAIL_FATAL("Schedule group create failed");
- queue_param.type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- queue_type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.group = group;
}
@@ -2077,12 +2073,10 @@ static void timer_test_all(odp_queue_type_t queue_type)
int rc;
odp_pool_param_t params;
odp_timer_pool_param_t tparam;
- odp_cpumask_t unused;
odp_timer_pool_info_t tpinfo;
uint64_t ns, tick, ns2;
uint64_t res_ns, min_tmo, max_tmo;
uint32_t timers_allocated;
- struct thread_args thr_args;
odp_pool_capability_t pool_capa;
odp_timer_capability_t timer_capa;
odp_schedule_capability_t sched_capa;
@@ -2096,7 +2090,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
/* Reserve at least one core for running other processes so the timer
* test hopefully can run undisturbed and thus get better timing
* results. */
- num_workers = odp_cpumask_default_worker(&unused, 0);
+ num_workers = odp_cpumask_default_worker(NULL, 0);
/* force to max CPU count */
if (num_workers > MAX_WORKERS)
@@ -2205,13 +2199,11 @@ static void timer_test_all(odp_queue_type_t queue_type)
odp_atomic_init_u32(&global_mem->timers_allocated, 0);
/* Create and start worker threads */
- thr_args.thrdarg.testcase = 0;
- thr_args.thrdarg.numthrds = num_workers;
- thr_args.queue_type = queue_type;
- odp_cunit_thread_create(worker_entrypoint, &thr_args.thrdarg);
+ global_mem->test_queue_type = queue_type;
+ odp_cunit_thread_create(num_workers, worker_entrypoint, NULL, 0);
/* Wait for worker threads to exit */
- odp_cunit_thread_exit(&thr_args.thrdarg);
+ odp_cunit_thread_join(num_workers);
ODPH_DBG("Number of timeouts delivered/received too late: "
"%" PRIu32 "\n",
odp_atomic_load_u32(&global_mem->ndelivtoolate));