aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/implementers-guide/implementers-guide.adoc4
-rw-r--r--example/ipfragreass/odp_ipfragreass.c26
-rw-r--r--example/ipsec_api/odp_ipsec.c26
-rw-r--r--example/ipsec_crypto/odp_ipsec.c26
-rw-r--r--example/l3fwd/odp_l3fwd.c37
-rw-r--r--example/packet/odp_pktio.c40
-rw-r--r--example/switch/odp_switch.c31
-rw-r--r--example/timer/odp_timer_test.c23
-rw-r--r--test/performance/odp_bench_packet.c29
-rw-r--r--test/performance/odp_cpu_bench.c36
-rw-r--r--test/performance/odp_crypto.c28
-rw-r--r--test/performance/odp_ipsec.c28
-rw-r--r--test/performance/odp_pktio_ordered.c33
-rw-r--r--test/performance/odp_pktio_perf.c50
-rw-r--r--test/performance/odp_pool_perf.c27
-rw-r--r--test/performance/odp_queue_perf.c27
-rw-r--r--test/performance/odp_sched_latency.c26
-rw-r--r--test/performance/odp_sched_pktio.c51
18 files changed, 294 insertions, 254 deletions
diff --git a/doc/implementers-guide/implementers-guide.adoc b/doc/implementers-guide/implementers-guide.adoc
index 922188770..398934482 100644
--- a/doc/implementers-guide/implementers-guide.adoc
+++ b/doc/implementers-guide/implementers-guide.adoc
@@ -556,9 +556,9 @@ could (hopefully) be as simple as changing the OS related helper lib.
In the linux helper, two functions are given to create and join ODP threads:
-`odph_odpthreads_create()`
+`odph_thread_create()`
-`odph_odpthreads_join()`
+`odph_thread_join()`
These two functions abstract what an ODP thread really is and their usage
is recommended as they would be implemented in other OS`s helper lib.
diff --git a/example/ipfragreass/odp_ipfragreass.c b/example/ipfragreass/odp_ipfragreass.c
index 00e7b8643..828f11002 100644
--- a/example/ipfragreass/odp_ipfragreass.c
+++ b/example/ipfragreass/odp_ipfragreass.c
@@ -230,8 +230,9 @@ int main(void)
odp_pool_t fragment_pool;
odp_shm_t shm;
odp_cpumask_t cpumask;
- odph_odpthread_t threads[MAX_WORKERS];
- odph_odpthread_params_t thread_params;
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_packet_t dequeued_pkts[NUM_PACKETS];
odp_event_t ev;
odp_u16be_t ip_id = 0;
@@ -242,7 +243,6 @@ int main(void)
int num_workers = MAX_WORKERS;
int reassembled;
- memset(&threads, 0, sizeof(threads));
init(&instance, &fragment_pool, &shm, &cpumask, &num_workers);
/* Packet generation & fragmentation */
@@ -290,19 +290,25 @@ int main(void)
}
/* Spawn the worker threads for reassembly */
- memset(&thread_params, 0, sizeof(thread_params));
- thread_params.start = run_worker;
- thread_params.arg = 0;
- thread_params.thr_type = ODP_THREAD_WORKER;
- thread_params.instance = instance;
- odph_odpthreads_create(threads, &cpumask, &thread_params);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_worker;
+ thr_param.arg = 0;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* Go! */
printf("\n= Starting reassembly...\n");
odp_barrier_wait(&barrier);
/* Wait for all threads to complete and output statistics */
- odph_odpthreads_join(threads);
+ odph_thread_join(thread_tbl, num_workers);
for (i = 0; i < num_workers; ++i)
printf("=== Thread %02d processed %3d fragments\n", i,
thread_stats[i].frags);
diff --git a/example/ipsec_api/odp_ipsec.c b/example/ipsec_api/odp_ipsec.c
index 40fa0795a..cebc733ae 100644
--- a/example/ipsec_api/odp_ipsec.c
+++ b/example/ipsec_api/odp_ipsec.c
@@ -742,7 +742,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t *ppkt, pkt_ctx_t *ctx)
* - Sequence number assignment queue
* - Per packet crypto API completion queue
*
- * @param arg Required by "odph_odpthreads_create", unused
+ * @param arg Required by "odph_thread_create", unused
*
* @return NULL (should never return)
*/
@@ -914,7 +914,9 @@ int
main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
int num_workers;
int i;
int stream_count;
@@ -925,7 +927,6 @@ main(int argc, char *argv[])
odp_pool_param_t params;
odp_instance_t instance;
odp_init_t init_param;
- odph_odpthread_params_t thr_params;
odp_event_t ev;
/* create by default scheduled queues */
@@ -1078,13 +1079,18 @@ main(int argc, char *argv[])
/*
* Create and init worker threads
*/
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = pktio_thread;
+ thr_param.arg = NULL;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
memset(thread_tbl, 0, sizeof(thread_tbl));
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = pktio_thread;
- thr_params.arg = NULL;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* If there are streams attempt to verify them. Otherwise, run until
* SIGINT is received. */
@@ -1098,7 +1104,7 @@ main(int argc, char *argv[])
printf("All received\n");
odp_atomic_store_u32(&global->exit_threads, 1);
}
- odph_odpthreads_join(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
/* Stop and close used pktio devices */
for (i = 0; i < global->appl.if_count; i++) {
diff --git a/example/ipsec_crypto/odp_ipsec.c b/example/ipsec_crypto/odp_ipsec.c
index a55aa6aba..9fec94620 100644
--- a/example/ipsec_crypto/odp_ipsec.c
+++ b/example/ipsec_crypto/odp_ipsec.c
@@ -1042,7 +1042,7 @@ pkt_disposition_e do_ipsec_out_finish(odp_packet_t pkt,
* - Sequence number assignment queue
* - Per packet crypto API completion queue
*
- * @param arg Required by "odph_odpthreads_create", unused
+ * @param arg Required by "odph_thread_create", unused
*
* @return NULL (should never return)
*/
@@ -1203,7 +1203,9 @@ int
main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
int num_workers;
int i;
int stream_count;
@@ -1214,7 +1216,6 @@ main(int argc, char *argv[])
odp_pool_param_t params;
odp_instance_t instance;
odp_init_t init_param;
- odph_odpthread_params_t thr_params;
/* create by default scheduled queues */
queue_create = odp_queue_create;
@@ -1364,13 +1365,18 @@ main(int argc, char *argv[])
/*
* Create and init worker threads
*/
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = pktio_thread;
+ thr_param.arg = NULL;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
memset(thread_tbl, 0, sizeof(thread_tbl));
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = pktio_thread;
- thr_params.arg = NULL;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* If there are streams attempt to verify them. Otherwise, run until
* SIGINT is received. */
@@ -1384,7 +1390,7 @@ main(int argc, char *argv[])
printf("All received\n");
odp_atomic_store_u32(&global->exit_threads, 1);
}
- odph_odpthreads_join(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
/* Stop and close used pktio devices */
for (i = 0; i < global->appl.if_count; i++) {
diff --git a/example/l3fwd/odp_l3fwd.c b/example/l3fwd/odp_l3fwd.c
index 367e09a24..8debb2010 100644
--- a/example/l3fwd/odp_l3fwd.c
+++ b/example/l3fwd/odp_l3fwd.c
@@ -82,7 +82,6 @@ typedef struct {
typedef struct {
app_args_t cmd_args;
struct l3fwd_pktio_s l3fwd_pktios[MAX_NB_PKTIO];
- odph_odpthread_t l3fwd_workers[MAX_NB_WORKER];
struct thread_arg_s worker_args[MAX_NB_WORKER];
odph_ethaddr_t eth_dest_mac[MAX_NB_PKTIO];
/** Global barrier to synchronize main and workers */
@@ -937,14 +936,15 @@ static int print_speed_stats(int num_workers, int duration, int timeout)
int main(int argc, char **argv)
{
- odph_odpthread_t thread_tbl[MAX_NB_WORKER];
+ odph_thread_t thread_tbl[MAX_NB_WORKER];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_NB_WORKER];
odp_pool_t pool;
odp_pool_param_t params;
odp_shm_t shm;
odp_instance_t instance;
- odph_odpthread_params_t thr_params;
odp_cpumask_t cpumask;
- int cpu, i, j, nb_worker;
+ int i, j, nb_worker;
uint8_t mac[ODPH_ETHADDR_LEN];
uint8_t *dst_mac;
app_args_t *args;
@@ -1101,32 +1101,25 @@ int main(int argc, char **argv)
odp_barrier_init(&global->barrier, nb_worker + 1);
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_worker;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
- memset(thread_tbl, 0, sizeof(thread_tbl));
- cpu = odp_cpumask_first(&cpumask);
for (i = 0; i < nb_worker; i++) {
- struct thread_arg_s *arg;
- odp_cpumask_t thr_mask;
-
- arg = &global->worker_args[i];
- odp_cpumask_zero(&thr_mask);
- odp_cpumask_set(&thr_mask, cpu);
- thr_params.arg = arg;
- odph_odpthreads_create(&thread_tbl[i], &thr_mask,
- &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_worker;
+ thr_param[i].arg = &global->worker_args[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, thr_param, nb_worker);
+
print_speed_stats(nb_worker, args->duration, PRINT_INTERVAL);
odp_atomic_store_u32(&global->exit_threads, 1);
/* wait for other threads to join */
- for (i = 0; i < nb_worker; i++)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, nb_worker);
/* Stop and close used pktio devices */
for (i = 0; i < args->if_count; i++) {
diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
index 6065bcc88..8b2bb55e3 100644
--- a/example/packet/odp_pktio.c
+++ b/example/packet/odp_pktio.c
@@ -342,17 +342,17 @@ static int pktio_ifburst_thread(void *arg)
int main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
odp_pool_t pool;
int num_workers;
int i;
- int cpu;
odp_cpumask_t cpumask;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_pool_param_t params;
odp_instance_t instance;
odp_init_t init_param;
- odph_odpthread_params_t thr_params;
odp_shm_t shm;
/* Let helper collect its own arguments (e.g. --odph_proc) */
@@ -436,15 +436,11 @@ int main(int argc, char *argv[])
create_pktio(args->appl.if_names[i], pool, args->appl.mode);
/* Create and init worker threads */
- memset(thread_tbl, 0, sizeof(thread_tbl));
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
- cpu = odp_cpumask_first(&cpumask);
for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
int (*thr_run_func)(void *);
int if_idx;
@@ -457,21 +453,16 @@ int main(int argc, char *argv[])
thr_run_func = pktio_ifburst_thread;
else /* APPL_MODE_PKT_QUEUE */
thr_run_func = pktio_queue_thread;
- /*
- * Create threads one-by-one instead of all-at-once,
- * because each thread might get different arguments.
- * Calls odp_thread_create(cpu) for each thread
- */
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
-
- thr_params.start = thr_run_func;
- thr_params.arg = &args->thread[i];
-
- odph_odpthreads_create(&thread_tbl[i], &thd_mask, &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = thr_run_func;
+ thr_param[i].arg = &args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
if (args->appl.time) {
odp_time_wait_ns(args->appl.time *
ODP_TIME_SEC_IN_NS);
@@ -487,8 +478,7 @@ int main(int argc, char *argv[])
}
/* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, num_workers);
for (i = 0; i < args->appl.if_count; ++i)
odp_pktio_close(odp_pktio_lookup(args->thread[i].pktio_dev));
diff --git a/example/switch/odp_switch.c b/example/switch/odp_switch.c
index 989016fc8..f30d468f7 100644
--- a/example/switch/odp_switch.c
+++ b/example/switch/odp_switch.c
@@ -971,9 +971,10 @@ static void gbl_args_init(args_t *args)
int main(int argc, char **argv)
{
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
int i, j;
- int cpu;
int num_workers;
odp_shm_t shm;
odp_cpumask_t cpumask;
@@ -984,7 +985,6 @@ int main(int argc, char **argv)
int if_count;
odp_instance_t instance;
odp_init_t init_param;
- odph_odpthread_params_t thr_params;
signal(SIGINT, sig_handler);
@@ -1097,27 +1097,23 @@ int main(int argc, char **argv)
stats = gbl_args->stats;
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- thr_params.start = run_worker;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
/* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
-
for (j = 0; j < MAX_PKTIOS; j++)
gbl_args->thread[i].stats[j] = &stats[i][j];
- thr_params.arg = &gbl_args->thread[i];
-
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&thread_tbl[i], &thd_mask, &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_worker;
+ thr_param[i].arg = &gbl_args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
/* Start packet receive and transmit */
for (i = 0; i < if_count; ++i) {
odp_pktio_t pktio;
@@ -1136,8 +1132,7 @@ int main(int argc, char **argv)
odp_atomic_store_u32(&gbl_args->exit_threads, 1);
/* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, num_workers);
/* Stop and close used pktio devices */
for (i = 0; i < if_count; i++) {
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 8bfb99d8f..b10559e6e 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -332,7 +332,9 @@ static int parse_args(int argc, char *argv[], test_args_t *args)
int main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
int num_workers;
odp_queue_t queue;
uint64_t tick, ns;
@@ -344,7 +346,6 @@ int main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_instance_t instance;
odp_init_t init_param;
- odph_odpthread_params_t thr_params;
odp_shm_t shm = ODP_SHM_INVALID;
test_globals_t *gbls = NULL;
int err = 0;
@@ -509,16 +510,20 @@ int main(int argc, char *argv[])
odp_barrier_init(&gbls->test_barrier, num_workers);
/* Create and launch worker threads */
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_thread;
- thr_params.arg = gbls;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread;
+ thr_param.arg = gbls;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* Wait for worker threads to exit */
- odph_odpthreads_join(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
/* free resources */
if (odp_queue_destroy(queue))
diff --git a/test/performance/odp_bench_packet.c b/test/performance/odp_bench_packet.c
index e80e823f6..0354ef9b8 100644
--- a/test/performance/odp_bench_packet.c
+++ b/test/performance/odp_bench_packet.c
@@ -1739,7 +1739,9 @@ bench_info_t test_suite[] = {
int main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
- odph_odpthread_t worker_thread;
+ odph_thread_t worker_thread;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
int cpu;
odp_shm_t shm;
odp_cpumask_t cpumask;
@@ -1864,7 +1866,7 @@ int main(int argc, char *argv[])
odp_pool_print(gbl_args->pool);
- memset(&worker_thread, 0, sizeof(odph_odpthread_t));
+ memset(&worker_thread, 0, sizeof(odph_thread_t));
signal(SIGINT, sig_handler);
@@ -1872,20 +1874,23 @@ int main(int argc, char *argv[])
cpu = odp_cpumask_first(&cpumask);
odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_benchmarks;
- thr_params.arg = gbl_args;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
odp_cpumask_zero(&thd_mask);
odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&worker_thread, &thd_mask,
- &thr_params);
- odph_odpthreads_join(&worker_thread);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &thd_mask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_benchmarks;
+ thr_param.arg = gbl_args;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&worker_thread, &thr_common, &thr_param, 1);
+
+ odph_thread_join(&worker_thread, 1);
ret = gbl_args->bench_failed;
diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c
index a4999ae27..e0ac82846 100644
--- a/test/performance/odp_cpu_bench.c
+++ b/test/performance/odp_cpu_bench.c
@@ -521,7 +521,9 @@ int main(int argc, char *argv[])
{
stats_t *stats[MAX_WORKERS];
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
odp_cpumask_t cpumask;
odp_pool_capability_t pool_capa;
odp_pool_t pool;
@@ -540,7 +542,6 @@ int main(int argc, char *argv[])
uint32_t init_val;
unsigned int num_workers;
unsigned int i, j;
- int cpu;
int ret = 0;
/* Let helper collect its own arguments (e.g. --odph_proc) */
@@ -743,7 +744,6 @@ int main(int argc, char *argv[])
}
}
- memset(thread_tbl, 0, sizeof(thread_tbl));
odp_barrier_init(&gbl_args->init_barrier, num_workers + 1);
odp_barrier_init(&gbl_args->term_barrier, num_workers + 1);
@@ -762,34 +762,28 @@ int main(int argc, char *argv[])
}
/* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; i++) {
- odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ for (i = 0; i < num_workers; i++) {
gbl_args->thread[i].idx = i;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_thread;
- thr_params.arg = &gbl_args->thread[i];
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
-
stats[i] = &gbl_args->thread[i].stats;
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&thread_tbl[i], &thd_mask,
- &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_thread;
+ thr_param[i].arg = &gbl_args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
ret = print_stats(num_workers, stats, gbl_args->appl.time,
gbl_args->appl.accuracy);
/* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, num_workers);
for (i = 0; i < num_groups; i++) {
for (j = 0; j < QUEUES_PER_GROUP; j++) {
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
index 36324622a..4f81dab17 100644
--- a/test/performance/odp_crypto.c
+++ b/test/performance/odp_crypto.c
@@ -1032,7 +1032,9 @@ int main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
int num_workers = 1;
odph_helper_options_t helper_options;
- odph_odpthread_t thr[num_workers];
+ odph_thread_t thread_tbl[num_workers];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_instance_t instance;
odp_init_t init_param;
odp_pool_capability_t pool_capa;
@@ -1146,24 +1148,26 @@ int main(int argc, char *argv[])
printf("Run in sync mode\n");
}
- memset(thr, 0, sizeof(thr));
-
test_run_arg.crypto_args = cargs;
test_run_arg.crypto_alg_config = cargs.alg_config;
test_run_arg.crypto_capa = crypto_capa;
if (cargs.alg_config) {
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_thr_func;
- thr_params.arg = &test_run_arg;
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
if (cargs.schedule) {
- odph_odpthreads_create(&thr[0], &cpumask, &thr_params);
- odph_odpthreads_join(&thr[0]);
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thr_func;
+ thr_param.arg = &test_run_arg;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
+
+ odph_thread_join(thread_tbl, num_workers);
} else {
run_measure_one_config(&test_run_arg);
}
diff --git a/test/performance/odp_ipsec.c b/test/performance/odp_ipsec.c
index 40591092a..04788995e 100644
--- a/test/performance/odp_ipsec.c
+++ b/test/performance/odp_ipsec.c
@@ -1027,7 +1027,9 @@ int main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
int num_workers = 1;
odph_helper_options_t helper_options;
- odph_odpthread_t thr[num_workers];
+ odph_thread_t thread_tbl[num_workers];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_instance_t instance;
odp_init_t init_param;
odp_ipsec_capability_t ipsec_capa;
@@ -1164,20 +1166,22 @@ int main(int argc, char *argv[])
printf("Run in sync mode\n");
}
- memset(thr, 0, sizeof(thr));
-
if (cargs.alg_config) {
- odph_odpthread_params_t thr_param;
-
- memset(&thr_param, 0, sizeof(thr_param));
- thr_param.start = run_thr_func;
- thr_param.arg = &thr_arg;
- thr_param.thr_type = ODP_THREAD_WORKER;
- thr_param.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
if (cargs.schedule) {
- odph_odpthreads_create(&thr[0], &cpumask, &thr_param);
- odph_odpthreads_join(&thr[0]);
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thr_func;
+ thr_param.arg = &thr_arg;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
+
+ odph_thread_join(thread_tbl, num_workers);
} else {
run_measure_one_config(&cargs, cargs.alg_config);
}
diff --git a/test/performance/odp_pktio_ordered.c b/test/performance/odp_pktio_ordered.c
index d5ffcc8ab..e35386d52 100644
--- a/test/performance/odp_pktio_ordered.c
+++ b/test/performance/odp_pktio_ordered.c
@@ -1060,10 +1060,11 @@ int main(int argc, char *argv[])
odp_pool_capability_t pool_capa;
odph_ethaddr_t new_addr;
odph_helper_options_t helper_options;
- odph_odpthread_t thread_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
stats_t *stats;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- int cpu;
int i, j;
int if_count;
int ret;
@@ -1281,26 +1282,21 @@ int main(int argc, char *argv[])
odp_barrier_init(&gbl_args->barrier, num_workers + 1);
/* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.start = run_worker;
- thr_params.arg = &gbl_args->thread[i];
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ for (i = 0; i < num_workers; ++i) {
gbl_args->thread[i].stats = &stats[i];
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_odpthreads_create(&thread_tbl[i], &thd_mask,
- &thr_params);
- cpu = odp_cpumask_next(&cpumask, cpu);
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_worker;
+ thr_param[i].arg = &gbl_args->thread[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
/* Start packet receive and transmit */
for (i = 0; i < if_count; ++i) {
odp_pktio_t pktio;
@@ -1324,8 +1320,7 @@ int main(int argc, char *argv[])
odp_atomic_store_u32(&gbl_args->exit_threads, 1);
/* Master thread waits for other threads to exit */
- for (i = 0; i < num_workers; ++i)
- odph_odpthreads_join(&thread_tbl[i]);
+ odph_thread_join(thread_tbl, num_workers);
for (i = 0; i < if_count; i++) {
odp_pktio_close(gbl_args->pktios[i].pktio);
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 3d70d7d2b..593465f4f 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -603,45 +603,61 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
odp_cpumask_t *thd_mask_rx,
test_status_t *status)
{
- odph_odpthread_t thd_tbl[MAX_WORKERS];
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
thread_args_t args_tx, args_rx;
uint64_t expected_tx_cnt;
int num_tx_workers, num_rx_workers;
- odph_odpthread_params_t thr_params;
-
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = gbl_args->instance;
odp_atomic_store_u32(&gbl_args->shutdown, 0);
- memset(thd_tbl, 0, sizeof(thd_tbl));
+ memset(thread_tbl, 0, sizeof(thread_tbl));
memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size);
expected_tx_cnt = status->pps_curr * gbl_args->args.duration;
/* start receiver threads first */
- thr_params.start = run_thread_rx;
- thr_params.arg = &args_rx;
+
+ num_rx_workers = odp_cpumask_count(thd_mask_rx);
args_rx.batch_len = gbl_args->args.rx_batch_len;
- odph_odpthreads_create(&thd_tbl[0], thd_mask_rx, &thr_params);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = gbl_args->instance;
+ thr_common.cpumask = thd_mask_rx;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread_rx;
+ thr_param.arg = &args_rx;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_rx_workers);
odp_barrier_wait(&gbl_args->rx_barrier);
- num_rx_workers = odp_cpumask_count(thd_mask_rx);
/* then start transmitters */
- thr_params.start = run_thread_tx;
- thr_params.arg = &args_tx;
+
num_tx_workers = odp_cpumask_count(thd_mask_tx);
args_tx.pps = status->pps_curr / num_tx_workers;
args_tx.duration = gbl_args->args.duration;
args_tx.batch_len = gbl_args->args.tx_batch_len;
- odph_odpthreads_create(&thd_tbl[num_rx_workers], thd_mask_tx,
- &thr_params);
+
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = gbl_args->instance;
+ thr_common.cpumask = thd_mask_tx;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread_tx;
+ thr_param.arg = &args_tx;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(&thread_tbl[num_rx_workers], &thr_common, &thr_param, num_tx_workers);
odp_barrier_wait(&gbl_args->tx_barrier);
/* wait for transmitter threads to terminate */
- odph_odpthreads_join(&thd_tbl[num_rx_workers]);
+ odph_thread_join(&thread_tbl[num_rx_workers], num_tx_workers);
/* delay to allow transmitted packets to reach the receivers */
odp_time_wait_ns(SHUTDOWN_DELAY_NS);
@@ -650,7 +666,7 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
odp_atomic_store_u32(&gbl_args->shutdown, 1);
/* wait for receivers */
- odph_odpthreads_join(&thd_tbl[0]);
+ odph_thread_join(thread_tbl, num_rx_workers);
if (!status->warmup)
return process_results(expected_tx_cnt, status);
diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c
index ee97af519..957b1de00 100644
--- a/test/performance/odp_pool_perf.c
+++ b/test/performance/odp_pool_perf.c
@@ -43,7 +43,7 @@ typedef struct test_global_t {
odp_barrier_t barrier;
odp_pool_t pool;
odp_cpumask_t cpumask;
- odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
} test_global_t;
@@ -445,23 +445,28 @@ static int test_packet_pool(void *arg)
static int start_workers(test_global_t *global, odp_instance_t instance)
{
- odph_odpthread_params_t thr_params;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
test_options_t *test_options = &global->test_options;
int num_cpu = test_options->num_cpu;
int packet_pool = test_options->pool_type;
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- thr_params.arg = global;
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &global->cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.arg = global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
if (packet_pool)
- thr_params.start = test_packet_pool;
+ thr_param.start = test_packet_pool;
else
- thr_params.start = test_buffer_pool;
+ thr_param.start = test_buffer_pool;
- if (odph_odpthreads_create(global->thread_tbl, &global->cpumask,
- &thr_params) != num_cpu)
+ if (odph_thread_create(global->thread_tbl, &thr_common, &thr_param,
+ num_cpu) != num_cpu)
return -1;
return 0;
@@ -608,7 +613,7 @@ int main(int argc, char **argv)
start_workers(global, instance);
/* Wait workers to exit */
- odph_odpthreads_join(global->thread_tbl);
+ odph_thread_join(global->thread_tbl, global->test_options.num_cpu);
print_stat(global);
diff --git a/test/performance/odp_queue_perf.c b/test/performance/odp_queue_perf.c
index 33284d312..320f2f35a 100644
--- a/test/performance/odp_queue_perf.c
+++ b/test/performance/odp_queue_perf.c
@@ -44,7 +44,7 @@ typedef struct test_global_t {
odp_shm_t shm;
odp_pool_t pool;
odp_queue_t queue[MAX_QUEUES];
- odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
} test_global_t;
@@ -423,18 +423,13 @@ error:
static int start_workers(test_global_t *global)
{
- odph_odpthread_params_t thr_params;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_cpumask_t cpumask;
int ret;
test_options_t *test_options = &global->options;
int num_cpu = test_options->num_cpu;
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = global->instance;
- thr_params.start = run_test;
- thr_params.arg = global;
-
ret = odp_cpumask_default_worker(&cpumask, num_cpu);
if (num_cpu && ret != num_cpu) {
@@ -452,8 +447,18 @@ static int start_workers(test_global_t *global)
odp_barrier_init(&global->barrier, num_cpu);
- if (odph_odpthreads_create(global->thread_tbl, &cpumask, &thr_params)
- != num_cpu)
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = global->instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_test;
+ thr_param.arg = global;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ if (odph_thread_create(global->thread_tbl, &thr_common, &thr_param,
+ num_cpu) != num_cpu)
return -1;
return 0;
@@ -596,7 +601,7 @@ int main(int argc, char **argv)
}
/* Wait workers to exit */
- odph_odpthreads_join(global->thread_tbl);
+ odph_thread_join(global->thread_tbl, global->options.num_cpu);
print_stat(global);
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index c6b659aac..2910dcdbc 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -705,8 +705,9 @@ int main(int argc, char *argv[])
odp_instance_t instance;
odp_init_t init_param;
odph_helper_options_t helper_options;
- odph_odpthread_t *thread_tbl;
- odph_odpthread_params_t thr_params;
+ odph_thread_t *thread_tbl;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param;
odp_cpumask_t cpumask;
odp_pool_t pool;
odp_pool_capability_t pool_capa;
@@ -766,7 +767,7 @@ int main(int argc, char *argv[])
printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
printf(" CPU mask: %s\n", cpumaskstr);
- thread_tbl = calloc(sizeof(odph_odpthread_t), num_workers);
+ thread_tbl = calloc(sizeof(odph_thread_t), num_workers);
if (!thread_tbl) {
ODPH_ERR("no memory for thread_tbl\n");
return -1;
@@ -858,15 +859,20 @@ int main(int argc, char *argv[])
odp_barrier_init(&globals->barrier, num_workers);
/* Create and launch worker threads */
- memset(&thr_params, 0, sizeof(thr_params));
- thr_params.thr_type = ODP_THREAD_WORKER;
- thr_params.instance = instance;
- thr_params.start = run_thread;
- thr_params.arg = NULL;
- odph_odpthreads_create(thread_tbl, &cpumask, &thr_params);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = instance;
+ thr_common.cpumask = &cpumask;
+ thr_common.share_param = 1;
+
+ odph_thread_param_init(&thr_param);
+ thr_param.start = run_thread;
+ thr_param.arg = NULL;
+ thr_param.thr_type = ODP_THREAD_WORKER;
+
+ odph_thread_create(thread_tbl, &thr_common, &thr_param, num_workers);
/* Wait for worker threads to terminate */
- odph_odpthreads_join(thread_tbl);
+ odph_thread_join(thread_tbl, num_workers);
free(thread_tbl);
printf("ODP scheduling latency test complete\n\n");
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index cbdbdf4aa..589b58d97 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -1396,45 +1396,50 @@ static void destroy_timers(test_global_t *test_global)
odp_timer_pool_destroy(timer_pool);
}
-static void start_workers(odph_odpthread_t thread[],
+static void start_workers(odph_thread_t thread[],
test_global_t *test_global)
{
int i;
odp_cpumask_t cpumask;
- odph_odpthread_params_t param;
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_WORKERS];
int num = test_global->opt.num_worker;
- memset(&param, 0, sizeof(odph_odpthread_params_t));
+ odp_cpumask_zero(&cpumask);
- if (test_global->opt.timeout_us)
- param.start = worker_thread_timers;
- else if (test_global->opt.pipe_stages)
- param.start = worker_thread_pipeline;
- else
- param.start = worker_thread_direct;
-
- param.thr_type = ODP_THREAD_WORKER;
- param.instance = test_global->instance;
-
- memset(thread, 0, num * sizeof(odph_odpthread_t));
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = test_global->instance;
+ thr_common.cpumask = &cpumask;
for (i = 0; i < num; i++) {
- odp_cpumask_zero(&cpumask);
odp_cpumask_set(&cpumask, test_global->worker_cpu[i]);
test_global->worker_arg[i].worker_id = i;
test_global->worker_arg[i].test_global_ptr = test_global;
- param.arg = &test_global->worker_arg[i];
- odph_odpthreads_create(&thread[i], &cpumask, &param);
+ odph_thread_param_init(&thr_param[i]);
+
+ if (!i) {
+ if (test_global->opt.timeout_us)
+ thr_param[0].start = worker_thread_timers;
+ else if (test_global->opt.pipe_stages)
+ thr_param[0].start = worker_thread_pipeline;
+ else
+ thr_param[0].start = worker_thread_direct;
+ } else {
+ thr_param[i].start = thr_param[0].start;
+ }
+
+ thr_param[i].arg = &test_global->worker_arg[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
}
+
+ memset(thread, 0, num * sizeof(odph_thread_t));
+ odph_thread_create(thread, &thr_common, thr_param, num);
}
-static void wait_workers(odph_odpthread_t thread[], test_global_t *test_global)
+static void wait_workers(odph_thread_t thread[], test_global_t *test_global)
{
- int i;
-
- for (i = 0; i < test_global->opt.num_worker; ++i)
- odph_odpthreads_join(&thread[i]);
+ odph_thread_join(thread, test_global->opt.num_worker);
}
int main(int argc, char *argv[])
@@ -1444,7 +1449,7 @@ int main(int argc, char *argv[])
odp_shm_t shm;
odp_time_t t1 = ODP_TIME_NULL, t2 = ODP_TIME_NULL;
odph_helper_options_t helper_options;
- odph_odpthread_t thread[MAX_WORKERS];
+ odph_thread_t thread[MAX_WORKERS];
test_options_t test_options;
int ret = 0;