aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/performance/odp_l2fwd.c469
-rw-r--r--test/performance/odp_packet_gen.c48
-rw-r--r--test/performance/odp_sched_latency.c245
-rw-r--r--test/validation/api/buffer/buffer.c187
-rw-r--r--test/validation/api/classification/odp_classification_common.c5
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c119
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h3
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c3
-rw-r--r--test/validation/api/pktio/pktio.c1
-rw-r--r--test/validation/api/scheduler/scheduler.c52
-rw-r--r--test/validation/api/timer/timer.c43
11 files changed, 915 insertions, 260 deletions
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index b9e3106da..1833dcf78 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -36,6 +36,9 @@
/* Maximum number of pktio queues per interface */
#define MAX_QUEUES 32
+/* Maximum number of schedule groups */
+#define MAX_GROUPS 32
+
/* Maximum number of pktio interfaces */
#define MAX_PKTIOS 8
@@ -48,6 +51,9 @@
/* Default vector timeout */
#define DEFAULT_VEC_TMO ODP_TIME_MSEC_IN_NS
+/* Maximum thread info string length */
+#define EXTRA_STR_LEN 32
+
/* Packet input mode */
typedef enum pktin_mode_t {
DIRECT_RECV,
@@ -97,6 +103,7 @@ typedef struct {
int chksum; /* Checksum offload */
int sched_mode; /* Scheduler mode */
int num_groups; /* Number of scheduling groups */
+ int group_mode; /* How threads join groups */
int burst_rx; /* Receive burst size */
int pool_per_if; /* Create pool per interface */
uint32_t num_pkt; /* Number of packets per pool */
@@ -110,6 +117,9 @@ typedef struct {
int promisc_mode; /* Promiscuous mode enabled */
int flow_aware; /* Flow aware scheduling enabled */
int mtu; /* Interface MTU */
+ int num_prio;
+ odp_schedule_prio_t prio[MAX_PKTIOS]; /* Priority of input queues of an interface */
+
} appl_args_t;
/* Statistics */
@@ -144,11 +154,12 @@ typedef struct thread_args_t {
} pktio[MAX_PKTIOS];
/* Groups to join */
- odp_schedule_group_t group[MAX_PKTIOS];
+ odp_schedule_group_t group[MAX_GROUPS];
int thr_idx;
int num_pktio;
- int num_groups;
+ int num_grp_join;
+
} thread_args_t;
/*
@@ -192,6 +203,13 @@ typedef struct {
/* Break workers loop if set to 1 */
odp_atomic_u32_t exit_threads;
+ uint32_t pkt_len;
+ uint32_t num_pkt;
+ uint32_t seg_len;
+ uint32_t vector_num;
+ uint32_t vector_max_size;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+
} args_t;
/* Global pointer to args */
@@ -399,6 +417,7 @@ static int run_worker_sched_mode_vector(void *arg)
int i;
int pktio, num_pktio;
uint16_t max_burst;
+ odp_thrmask_t mask;
odp_pktout_queue_t pktout[MAX_PKTIOS];
odp_queue_t tx_queue[MAX_PKTIOS];
thread_args_t *thr_args = arg;
@@ -409,19 +428,14 @@ static int run_worker_sched_mode_vector(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups > 0) {
- odp_thrmask_t mask;
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
- odp_thrmask_zero(&mask);
- odp_thrmask_set(&mask, thr);
-
- /* Join non-default groups */
- for (i = 0; i < thr_args->num_groups; i++) {
- if (odp_schedule_group_join(thr_args->group[i],
- &mask)) {
- ODPH_ERR("Join failed\n");
- return -1;
- }
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
}
}
@@ -548,8 +562,10 @@ static int run_worker_sched_mode(void *arg)
int i;
int pktio, num_pktio;
uint16_t max_burst;
+ odp_thrmask_t mask;
odp_pktout_queue_t pktout[MAX_PKTIOS];
odp_queue_t tx_queue[MAX_PKTIOS];
+ char extra_str[EXTRA_STR_LEN];
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
int use_event_queue = gbl_args->appl.out_mode;
@@ -558,22 +574,31 @@ static int run_worker_sched_mode(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups > 0) {
- odp_thrmask_t mask;
+ memset(extra_str, 0, EXTRA_STR_LEN);
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
+ }
- odp_thrmask_zero(&mask);
- odp_thrmask_set(&mask, thr);
+ if (gbl_args->appl.verbose) {
+ uint64_t tmp = (uint64_t)(uintptr_t)thr_args->group[i];
- /* Join non-default groups */
- for (i = 0; i < thr_args->num_groups; i++) {
- if (odp_schedule_group_join(thr_args->group[i],
- &mask)) {
- ODPH_ERR("Join failed\n");
- return -1;
- }
+ printf("[%02i] Joined group 0x%" PRIx64 "\n", thr, tmp);
}
}
+ if (thr_args->num_grp_join)
+ snprintf(extra_str, EXTRA_STR_LEN, ", joined %i groups", thr_args->num_grp_join);
+ else if (gbl_args->appl.num_groups == 0)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_ALL");
+ else if (gbl_args->appl.num_groups)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_WORKER");
+
num_pktio = thr_args->num_pktio;
if (num_pktio > MAX_PKTIOS) {
@@ -586,10 +611,10 @@ static int run_worker_sched_mode(void *arg)
pktout[pktio] = thr_args->pktio[pktio].pktout;
}
- printf("[%02i] PKTIN_SCHED_%s, %s\n", thr,
+ printf("[%02i] PKTIN_SCHED_%s, %s%s\n", thr,
(in_mode == SCHED_PARALLEL) ? "PARALLEL" :
((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
- (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT", extra_str);
odp_barrier_wait(&gbl_args->init_barrier);
@@ -851,7 +876,7 @@ static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_poo
pktio_capa.vector.max_size : pktio_capa.vector.min_size;
printf("\nWarning: Modified vector size to %u\n\n", vec_size);
} else {
- ODPH_ERR("Error: Invalid pktio vector size %u, valid range [%u, %u]\n",
+ ODPH_ERR("Invalid pktio vector size %u, valid range [%u, %u]\n",
vec_size, pktio_capa.vector.min_size, pktio_capa.vector.max_size);
return -1;
}
@@ -870,7 +895,7 @@ static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_poo
pktio_capa.vector.max_tmo_ns : pktio_capa.vector.min_tmo_ns;
printf("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns);
} else {
- ODPH_ERR("Error: Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
+ ODPH_ERR("Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
", %" PRIu64 "]\n", vec_tmo_ns,
pktio_capa.vector.min_tmo_ns, pktio_capa.vector.max_tmo_ns);
return -1;
@@ -917,12 +942,12 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
- ODPH_ERR("Error: failed to open %s\n", dev);
+ ODPH_ERR("Pktio open failed: %s\n", dev);
return -1;
}
if (odp_pktio_info(pktio, &info)) {
- ODPH_ERR("Error: pktio info failed %s\n", dev);
+ ODPH_ERR("Pktio info failed: %s\n", dev);
return -1;
}
@@ -933,7 +958,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
odp_pktio_print(pktio);
if (odp_pktio_capability(pktio, &pktio_capa)) {
- ODPH_ERR("Error: pktio capability query failed %s\n", dev);
+ ODPH_ERR("Pktio capability query failed: %s\n", dev);
return -1;
}
@@ -957,14 +982,13 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (gbl_args->appl.promisc_mode) {
if (!pktio_capa.set_op.op.promisc_mode) {
- ODPH_ERR("Error: promisc mode set not supported %s\n",
- dev);
+ ODPH_ERR("Promisc mode set not supported: %s\n", dev);
return -1;
}
/* Enable promisc mode */
if (odp_pktio_promisc_mode_set(pktio, true)) {
- ODPH_ERR("Error: promisc mode enable failed %s\n", dev);
+ ODPH_ERR("Promisc mode enable failed: %s\n", dev);
return -1;
}
}
@@ -974,14 +998,14 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
uint32_t maxlen_output = pktio_capa.maxlen.max_output ? gbl_args->appl.mtu : 0;
if (!pktio_capa.set_op.op.maxlen) {
- ODPH_ERR("Error: modifying interface MTU not supported %s\n", dev);
+ ODPH_ERR("Modifying interface MTU not supported: %s\n", dev);
return -1;
}
if (maxlen_input &&
(maxlen_input < pktio_capa.maxlen.min_input ||
maxlen_input > pktio_capa.maxlen.max_input)) {
- ODPH_ERR("Error: unsupported MTU value %" PRIu32 " for %s "
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_input, dev,
pktio_capa.maxlen.min_input, pktio_capa.maxlen.max_input);
return -1;
@@ -989,14 +1013,14 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (maxlen_output &&
(maxlen_output < pktio_capa.maxlen.min_output ||
maxlen_output > pktio_capa.maxlen.max_output)) {
- ODPH_ERR("Error: unsupported MTU value %" PRIu32 " for %s "
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_output, dev,
pktio_capa.maxlen.min_output, pktio_capa.maxlen.max_output);
return -1;
}
if (odp_pktio_maxlen_set(pktio, maxlen_input, maxlen_output)) {
- ODPH_ERR("Error: setting MTU failed %s\n", dev);
+ ODPH_ERR("Setting MTU failed: %s\n", dev);
return -1;
}
}
@@ -1010,6 +1034,15 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
if (gbl_args->appl.sched_mode) {
+ odp_schedule_prio_t prio;
+
+ if (gbl_args->appl.num_prio) {
+ prio = gbl_args->appl.prio[idx];
+ } else {
+ prio = odp_schedule_default_prio();
+ gbl_args->appl.prio[idx] = prio;
+ }
+
if (gbl_args->appl.in_mode == SCHED_ATOMIC)
sync_mode = ODP_SCHED_SYNC_ATOMIC;
else if (gbl_args->appl.in_mode == SCHED_ORDERED)
@@ -1017,7 +1050,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
else
sync_mode = ODP_SCHED_SYNC_PARALLEL;
- pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+ pktin_param.queue_param.sched.prio = prio;
pktin_param.queue_param.sched.sync = sync_mode;
pktin_param.queue_param.sched.group = group;
}
@@ -1046,7 +1079,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (gbl_args->appl.vector_mode) {
if (!pktio_capa.vector.supported) {
- ODPH_ERR("Error: packet vector input not supported %s\n", dev);
+ ODPH_ERR("Packet vector input not supported: %s\n", dev);
return -1;
}
if (set_pktin_vector_params(&pktin_param, vec_pool, pktio_capa))
@@ -1054,43 +1087,35 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
}
if (odp_pktin_queue_config(pktio, &pktin_param)) {
- ODPH_ERR("Error: input queue config failed %s\n", dev);
+ ODPH_ERR("Input queue config failed: %s\n", dev);
return -1;
}
if (odp_pktout_queue_config(pktio, &pktout_param)) {
- ODPH_ERR("Error: output queue config failed %s\n", dev);
+ ODPH_ERR("Output queue config failed: %s\n", dev);
return -1;
}
if (gbl_args->appl.in_mode == DIRECT_RECV) {
- if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin,
- num_rx) != num_rx) {
- ODPH_ERR("Error: pktin queue query failed %s\n", dev);
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin, num_rx) != num_rx) {
+ ODPH_ERR("Pktin queue query failed: %s\n", dev);
return -1;
}
} else {
- if (odp_pktin_event_queue(pktio,
- gbl_args->pktios[idx].rx_q,
- num_rx) != num_rx) {
- ODPH_ERR("Error: pktin event queue query failed %s\n",
- dev);
+ if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].rx_q, num_rx) != num_rx) {
+ ODPH_ERR("Pktin event queue query failed: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
- if (odp_pktout_queue(pktio,
- gbl_args->pktios[idx].pktout,
- num_tx) != num_tx) {
- ODPH_ERR("Error: pktout queue query failed %s\n", dev);
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx) != num_tx) {
+ ODPH_ERR("Pktout queue query failed: %s\n", dev);
return -1;
}
} else {
- if (odp_pktout_event_queue(pktio,
- gbl_args->pktios[idx].tx_q,
- num_tx) != num_tx) {
- ODPH_ERR("Error: event queue query failed %s\n", dev);
+ if (odp_pktout_event_queue(pktio, gbl_args->pktios[idx].tx_q, num_tx) != num_tx) {
+ ODPH_ERR("Event queue query failed: %s\n", dev);
return -1;
}
}
@@ -1435,11 +1460,25 @@ static void usage(char *progname)
" -e, --error_check <arg> 0: Don't check packet errors (default)\n"
" 1: Check packet errors\n"
" -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
- " 1: Use checksum offload\n"
- " -g, --groups <num> Number of groups to use: 0 ... num\n"
- " -1: SCHED_GROUP_WORKER\n"
- " 0: SCHED_GROUP_ALL (default)\n"
- " num: must not exceed number of interfaces or workers\n"
+ " 1: Use checksum offload\n",
+ NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS);
+
+ printf(" -g, --groups <num> Number of new groups to create (1 ... num). Interfaces\n"
+ " are placed into the groups in round robin.\n"
+ " 0: Use SCHED_GROUP_ALL (default)\n"
+ " -1: Use SCHED_GROUP_WORKER\n"
+ " -G, --group_mode <arg> Select how threads join new groups (when -g > 0)\n"
+ " 0: All threads join all created groups (default)\n"
+ " 1: All threads join first N created groups.\n"
+ " N is number of interfaces (== active groups).\n"
+ " 2: Each thread joins a part of the first N groups\n"
+ " (in round robin).\n"
+ " -I, --prio <prio list> Schedule priority of packet input queues.\n"
+ " Comma separated list of priorities (no spaces). A value\n"
+ " per interface. All queues of an interface have the same\n"
+ " priority. Values must be between odp_schedule_min_prio\n"
+ " and odp_schedule_max_prio. odp_schedule_default_prio is\n"
+ " used by default.\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
" -p, --packet_copy 0: Don't copy packet (default)\n"
@@ -1464,9 +1503,7 @@ static void usage(char *progname)
" -f, --flow_aware Enable flow aware scheduling.\n"
" -v, --verbose Verbose output.\n"
" -h, --help Display help and exit.\n\n"
- "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS, DEFAULT_VEC_SIZE,
- DEFAULT_VEC_TMO, POOL_PKT_LEN
- );
+ "\n", DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
}
/*
@@ -1481,8 +1518,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
int opt;
int long_index;
char *token;
- char *addr_str;
- size_t len;
+ char *tmp_str;
+ size_t str_len, len;
int i;
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
@@ -1497,6 +1534,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"error_check", required_argument, NULL, 'e'},
{"chksum", required_argument, NULL, 'k'},
{"groups", required_argument, NULL, 'g'},
+ {"group_mode", required_argument, NULL, 'G'},
+ {"prio", required_argument, NULL, 'I'},
{"burst_rx", required_argument, NULL, 'b'},
{"packet_copy", required_argument, NULL, 'p'},
{"pool_per_if", required_argument, NULL, 'y'},
@@ -1515,7 +1554,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:p:y:n:l:L:w:x:z:M:uPfvh";
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:b:p:y:n:l:L:w:x:z:M:uPfvh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1523,6 +1562,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->dst_change = 1; /* change eth dst address by default */
appl_args->src_change = 1; /* change eth src address by default */
appl_args->num_groups = 0; /* use default group */
+ appl_args->group_mode = 0;
appl_args->error_check = 0; /* don't check packet errors by default */
appl_args->packet_copy = 0;
appl_args->burst_rx = 0;
@@ -1539,6 +1579,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->vec_size = 0;
appl_args->vec_tmo_ns = 0;
appl_args->flow_aware = 0;
+ appl_args->num_prio = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1556,60 +1597,58 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'a':
appl_args->accuracy = atoi(optarg);
break;
- /* parse packet-io interface names */
case 'r':
len = strlen(optarg);
if (len == 0) {
- usage(argv[0]);
+ ODPH_ERR("Bad dest address string\n");
exit(EXIT_FAILURE);
}
- len += 1; /* add room for '\0' */
- addr_str = malloc(len);
- if (addr_str == NULL) {
- usage(argv[0]);
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Dest address malloc() failed\n");
exit(EXIT_FAILURE);
}
/* store the mac addresses names */
- strcpy(addr_str, optarg);
- for (token = strtok(addr_str, ","), i = 0;
+ memcpy(tmp_str, optarg, str_len);
+ for (token = strtok(tmp_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
if (i >= MAX_PKTIOS) {
- printf("too many MAC addresses\n");
- usage(argv[0]);
+ ODPH_ERR("Too many MAC addresses\n");
exit(EXIT_FAILURE);
}
- if (odph_eth_addr_parse(&appl_args->addrs[i],
- token) != 0) {
- printf("invalid MAC address\n");
- usage(argv[0]);
+ if (odph_eth_addr_parse(&appl_args->addrs[i], token) != 0) {
+ ODPH_ERR("Invalid MAC address\n");
exit(EXIT_FAILURE);
}
}
appl_args->addr_count = i;
if (appl_args->addr_count < 1) {
- usage(argv[0]);
+ ODPH_ERR("Bad dest address count\n");
exit(EXIT_FAILURE);
}
- free(addr_str);
+ free(tmp_str);
break;
case 'i':
len = strlen(optarg);
if (len == 0) {
- usage(argv[0]);
+ ODPH_ERR("Bad pktio interface string\n");
exit(EXIT_FAILURE);
}
- len += 1; /* add room for '\0' */
- appl_args->if_str = malloc(len);
+ str_len = len + 1;
+
+ appl_args->if_str = malloc(str_len);
if (appl_args->if_str == NULL) {
- usage(argv[0]);
+ ODPH_ERR("Pktio interface malloc() failed\n");
exit(EXIT_FAILURE);
}
/* count the number of tokens separated by ',' */
- strcpy(appl_args->if_str, optarg);
+ memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL;
token = strtok(NULL, ","), i++)
@@ -1617,18 +1656,16 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->if_count = i;
- if (appl_args->if_count < 1 ||
- appl_args->if_count > MAX_PKTIOS) {
- usage(argv[0]);
+ if (appl_args->if_count < 1 || appl_args->if_count > MAX_PKTIOS) {
+ ODPH_ERR("Bad pktio interface count: %i\n", appl_args->if_count);
exit(EXIT_FAILURE);
}
/* allocate storage for the if names */
- appl_args->if_names =
- calloc(appl_args->if_count, sizeof(char *));
+ appl_args->if_names = calloc(appl_args->if_count, sizeof(char *));
/* store the if names (reset names string) */
- strcpy(appl_args->if_str, optarg);
+ memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
appl_args->if_names[i] = token;
@@ -1667,6 +1704,44 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'g':
appl_args->num_groups = atoi(optarg);
break;
+ case 'G':
+ appl_args->group_mode = atoi(optarg);
+ break;
+ case 'I':
+ len = strlen(optarg);
+ if (len == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Priority list malloc() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memcpy(tmp_str, optarg, str_len);
+ token = strtok(tmp_str, ",");
+
+ for (i = 0; token != NULL; token = strtok(NULL, ","), i++) {
+ if (i >= MAX_PKTIOS) {
+ ODPH_ERR("Too many priorities\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->prio[i] = atoi(token);
+ appl_args->num_prio++;
+ }
+
+ if (appl_args->num_prio == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ free(tmp_str);
+ break;
case 'b':
appl_args->burst_rx = atoi(optarg);
break;
@@ -1719,20 +1794,23 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
}
if (appl_args->if_count == 0) {
- usage(argv[0]);
+ ODPH_ERR("No pktio interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->num_prio && appl_args->num_prio != appl_args->if_count) {
+ ODPH_ERR("Different number of priorities and pktio interfaces\n");
exit(EXIT_FAILURE);
}
- if (appl_args->addr_count != 0 &&
- appl_args->addr_count != appl_args->if_count) {
- printf("Number of destination addresses differs from number"
- " of interfaces\n");
- usage(argv[0]);
+
+ if (appl_args->addr_count != 0 && appl_args->addr_count != appl_args->if_count) {
+ ODPH_ERR("Number of dest addresses differs from number of interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->burst_rx > MAX_PKT_BURST) {
- printf("Error: Burst size (%i) too large. Maximum is %i.\n",
- appl_args->burst_rx, MAX_PKT_BURST);
+ ODPH_ERR("Burst size (%i) too large. Maximum is %i.\n",
+ appl_args->burst_rx, MAX_PKT_BURST);
exit(EXIT_FAILURE);
}
@@ -1750,11 +1828,10 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
/*
* Print system and application info
*/
-static void print_info(appl_args_t *appl_args)
+static void print_info(void)
{
int i;
-
- odp_sys_info_print();
+ appl_args_t *appl_args = &gbl_args->appl;
printf("\n"
"odp_l2fwd options\n"
@@ -1801,6 +1878,28 @@ static void print_info(appl_args_t *appl_args)
appl_args->chksum ? "chksum " : "",
appl_args->packet_copy ? "packet_copy" : "");
}
+
+ printf("Num worker threads: %i\n", appl_args->num_workers);
+ printf("CPU mask: %s\n", gbl_args->cpumaskstr);
+
+ if (appl_args->num_groups > 0)
+ printf("num groups: %i\n", appl_args->num_groups);
+ else if (appl_args->num_groups == 0)
+ printf("group: ODP_SCHED_GROUP_ALL\n");
+ else
+ printf("group: ODP_SCHED_GROUP_WORKER\n");
+
+ printf("Packets per pool: %u\n", gbl_args->num_pkt);
+ printf("Packet length: %u\n", gbl_args->pkt_len);
+ printf("Segment length: %u\n", gbl_args->seg_len);
+ printf("Vectors per pool: %u\n", gbl_args->vector_num);
+ printf("Vector size: %u\n", gbl_args->vector_max_size);
+ printf("Priority per IF: ");
+
+ for (i = 0; i < appl_args->if_count; i++)
+ printf(" %i", appl_args->prio[i]);
+
+ printf("\n\n");
}
static void gbl_args_init(args_t *args)
@@ -1851,7 +1950,7 @@ static int set_vector_pool_params(odp_pool_param_t *params, odp_pool_capability_
vec_size = pool_capa.vector.max_size;
printf("\nWarning: Vector size reduced to %u\n\n", vec_size);
} else {
- ODPH_ERR("Error: Vector size too big %u. Maximum is %u.\n",
+ ODPH_ERR("Vector size too big %u. Maximum is %u.\n",
vec_size, pool_capa.vector.max_size);
return -1;
}
@@ -1871,7 +1970,7 @@ static int set_vector_pool_params(odp_pool_param_t *params, odp_pool_capability_
num_vec = pool_capa.vector.max_num;
printf("\nWarning: number of vectors reduced to %u\n\n", num_vec);
} else {
- ODPH_ERR("Error: Too many vectors (%u) per pool. Maximum is %u.\n",
+ ODPH_ERR("Too many vectors (%u) per pool. Maximum is %u.\n",
num_vec, pool_capa.vector.max_num);
return -1;
}
@@ -1896,7 +1995,6 @@ int main(int argc, char *argv[])
int num_workers, num_thr;
odp_shm_t shm;
odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odph_ethaddr_t new_addr;
odp_pool_param_t params;
int ret;
@@ -1904,8 +2002,8 @@ int main(int argc, char *argv[])
int if_count, num_pools, num_vec_pools;
int (*thr_run_func)(void *);
odp_instance_t instance;
- int num_groups;
- odp_schedule_group_t group[MAX_PKTIOS];
+ int num_groups, max_groups;
+ odp_schedule_group_t group[MAX_GROUPS];
odp_pool_t pool_tbl[MAX_PKTIOS], vec_pool_tbl[MAX_PKTIOS];
odp_pool_t pool, vec_pool;
odp_init_t init;
@@ -1917,7 +2015,7 @@ int main(int argc, char *argv[])
/* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
if (odph_options(&helper_options)) {
- ODPH_ERR("Error: reading ODP helper options failed.\n");
+ ODPH_ERR("Reading ODP helper options failed.\n");
exit(EXIT_FAILURE);
}
@@ -1939,13 +2037,13 @@ int main(int argc, char *argv[])
/* Init ODP before calling anything else */
if (odp_init_global(&instance, &init, NULL)) {
- ODPH_ERR("Error: ODP global init failed.\n");
+ ODPH_ERR("ODP global init failed.\n");
exit(EXIT_FAILURE);
}
/* Init this thread */
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- ODPH_ERR("Error: ODP local init failed.\n");
+ ODPH_ERR("ODP local init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1954,14 +2052,14 @@ int main(int argc, char *argv[])
ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- ODPH_ERR("Error: shared mem reserve failed.\n");
+ ODPH_ERR("Shared mem reserve failed.\n");
exit(EXIT_FAILURE);
}
gbl_args = odp_shm_addr(shm);
if (gbl_args == NULL) {
- ODPH_ERR("Error: shared mem alloc failed.\n");
+ ODPH_ERR("Shared mem addr failed.\n");
exit(EXIT_FAILURE);
}
gbl_args_init(gbl_args);
@@ -1969,19 +2067,18 @@ int main(int argc, char *argv[])
/* Parse and store the application arguments */
parse_args(argc, argv, &gbl_args->appl);
+ odp_sys_info_print();
+
if (sched_mode(gbl_args->appl.in_mode))
gbl_args->appl.sched_mode = 1;
- /* Print both system and application information */
- print_info(&gbl_args->appl);
-
num_workers = MAX_WORKERS;
if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
num_workers = gbl_args->appl.cpu_count;
/* Get default worker cpumask */
num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+ (void)odp_cpumask_to_str(&cpumask, gbl_args->cpumaskstr, sizeof(gbl_args->cpumaskstr));
gbl_args->appl.num_workers = num_workers;
@@ -1990,37 +2087,17 @@ int main(int argc, char *argv[])
if_count = gbl_args->appl.if_count;
- num_groups = gbl_args->appl.num_groups;
-
- printf("Num worker threads: %i\n", num_workers);
- printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("CPU mask: %s\n", cpumaskstr);
-
- if (num_groups > 0)
- printf("num groups: %i\n", num_groups);
- else if (num_groups == 0)
- printf("group: ODP_SCHED_GROUP_ALL\n");
- else
- printf("group: ODP_SCHED_GROUP_WORKER\n");
-
-
- if (num_groups > if_count || num_groups > num_workers) {
- ODPH_ERR("Too many groups. Number of groups may not exceed "
- "number of interfaces or workers.\n");
- exit(EXIT_FAILURE);
- }
-
num_pools = 1;
if (gbl_args->appl.pool_per_if)
num_pools = if_count;
if (odp_pool_capability(&pool_capa)) {
- ODPH_ERR("Error: pool capability failed\n");
+ ODPH_ERR("Pool capability failed\n");
return -1;
}
if (num_pools > (int)pool_capa.pkt.max_pools) {
- ODPH_ERR("Error: Too many pools %i\n", num_pools);
+ ODPH_ERR("Too many pools %i\n", num_pools);
return -1;
}
@@ -2063,16 +2140,15 @@ int main(int argc, char *argv[])
printf("\nWarning: number of packets reduced to %u\n\n",
num_pkt);
} else {
- ODPH_ERR("Error: Too many packets %u. Maximum is %u.\n",
+ ODPH_ERR("Too many packets %u. Maximum is %u.\n",
num_pkt, pool_capa.pkt.max_num);
return -1;
}
}
- printf("Packets per pool: %u\n", num_pkt);
- printf("Packet length: %u\n", pkt_len);
- printf("Segment length: %u\n", seg_len);
- printf("\n\n");
+ gbl_args->num_pkt = num_pkt;
+ gbl_args->pkt_len = pkt_len;
+ gbl_args->seg_len = seg_len;
/* Create packet pool */
odp_pool_param_init(&params);
@@ -2085,7 +2161,7 @@ int main(int argc, char *argv[])
pool_tbl[i] = odp_pool_create("packet pool", &params);
if (pool_tbl[i] == ODP_POOL_INVALID) {
- ODPH_ERR("Error: pool create failed %i\n", i);
+ ODPH_ERR("Pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
@@ -2097,13 +2173,13 @@ int main(int argc, char *argv[])
num_vec_pools = 0;
if (gbl_args->appl.vector_mode) {
if (!sched_mode(gbl_args->appl.in_mode)) {
- ODPH_ERR("Error: vector mode only supports scheduler pktin modes (1-3)\n");
+ ODPH_ERR("Vector mode only supports scheduler pktin modes (1-3)\n");
return -1;
}
num_vec_pools = gbl_args->appl.pool_per_if ? if_count : 1;
if (num_vec_pools > (int)pool_capa.vector.max_pools) {
- ODPH_ERR("Error: Too many vector pools %i\n", num_vec_pools);
+ ODPH_ERR("Too many vector pools %i\n", num_vec_pools);
return -1;
}
@@ -2111,15 +2187,14 @@ int main(int argc, char *argv[])
if (set_vector_pool_params(&params, pool_capa))
return -1;
- printf("Vectors per pool: %u\n", params.vector.num);
- printf("Vector size: %u\n", params.vector.max_size);
- printf("\n\n");
+ gbl_args->vector_num = params.vector.num;
+ gbl_args->vector_max_size = params.vector.max_size;
for (i = 0; i < num_vec_pools; i++) {
vec_pool_tbl[i] = odp_pool_create("vector pool", &params);
if (vec_pool_tbl[i] == ODP_POOL_INVALID) {
- ODPH_ERR("Error: vector pool create failed %i\n", i);
+ ODPH_ERR("Vector pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
@@ -2137,7 +2212,7 @@ int main(int argc, char *argv[])
odp_schedule_config_init(&sched_config);
if (odp_schedule_capability(&sched_capa)) {
- ODPH_ERR("Error: schedule capability failed\n");
+ ODPH_ERR("Schedule capability failed\n");
exit(EXIT_FAILURE);
}
@@ -2145,11 +2220,22 @@ int main(int argc, char *argv[])
if (sched_capa.max_flow_id) {
sched_config.max_flow_id = sched_capa.max_flow_id;
} else {
- ODPH_ERR("Error: flow aware mode not supported\n");
+ ODPH_ERR("Flow aware mode not supported\n");
exit(EXIT_FAILURE);
}
}
+ num_groups = gbl_args->appl.num_groups;
+ /* Predefined groups are enabled by default */
+ max_groups = sched_capa.max_groups - 3;
+ if (max_groups > MAX_GROUPS)
+ max_groups = MAX_GROUPS;
+
+ if (num_groups > max_groups) {
+ ODPH_ERR("Too many groups. Maximum is %i.\n", max_groups);
+ exit(EXIT_FAILURE);
+ }
+
odp_schedule_config(&sched_config);
/* Default */
@@ -2196,7 +2282,7 @@ int main(int argc, char *argv[])
if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
gbl_args->port_eth_addr[i].addr,
ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- ODPH_ERR("Error: interface ethernet address unknown\n");
+ ODPH_ERR("Interface ethernet address unknown\n");
exit(EXIT_FAILURE);
}
@@ -2217,6 +2303,9 @@ int main(int argc, char *argv[])
gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
+ /* Print application information */
+ print_info();
+
bind_queues();
init_port_lookup_tbl();
@@ -2245,14 +2334,47 @@ int main(int argc, char *argv[])
thr_common.sync = 1;
for (i = 0; i < num_workers; ++i) {
+ int j;
+ int num_join;
+ int mode = gbl_args->appl.group_mode;
+
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = thr_run_func;
thr_param[i].arg = &gbl_args->thread_args[i];
thr_param[i].thr_type = ODP_THREAD_WORKER;
- /* Round robin threads to groups */
- gbl_args->thread_args[i].num_groups = 1;
- gbl_args->thread_args[i].group[0] = group[i % num_groups];
+ gbl_args->thread_args[i].num_grp_join = 0;
+
+ /* Fill in list of groups to join */
+ if (gbl_args->appl.num_groups > 0) {
+ num_join = if_count < num_groups ? if_count : num_groups;
+
+ if (mode == 0 || mode == 1) {
+ /* All threads join all groups */
+ if (mode == 0)
+ num_join = num_groups;
+
+ gbl_args->thread_args[i].num_grp_join = num_join;
+
+ for (j = 0; j < num_join; j++)
+ gbl_args->thread_args[i].group[j] = group[j];
+ } else {
+ /* Thread joins first groups in round robin */
+ if (num_workers >= num_join) {
+ gbl_args->thread_args[i].num_grp_join = 1;
+ gbl_args->thread_args[i].group[0] = group[i % num_join];
+ } else {
+ int cnt = 0;
+
+ for (j = 0; i + j < num_join; j += num_workers) {
+ gbl_args->thread_args[i].group[cnt] = group[i + j];
+ cnt++;
+ }
+
+ gbl_args->thread_args[i].num_grp_join = cnt;
+ }
+ }
+ }
stats[i] = &gbl_args->thread_args[i].stats;
}
@@ -2261,7 +2383,7 @@ int main(int argc, char *argv[])
thr_param, num_workers);
if (num_thr != num_workers) {
- ODPH_ERR("Error: worker create failed %i\n", num_thr);
+ ODPH_ERR("Worker create failed: %i\n", num_thr);
exit(EXIT_FAILURE);
}
@@ -2275,8 +2397,7 @@ int main(int argc, char *argv[])
pktio = gbl_args->pktios[i].pktio;
ret = odp_pktio_start(pktio);
if (ret) {
- ODPH_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio start failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2286,8 +2407,7 @@ int main(int argc, char *argv[])
for (i = 0; i < if_count; ++i) {
if (odp_pktio_stop(gbl_args->pktios[i].pktio)) {
- ODPH_ERR("Error: unable to stop %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio stop failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2299,14 +2419,13 @@ int main(int argc, char *argv[])
/* Master thread waits for other threads to exit */
num_thr = odph_thread_join(gbl_args->thread_tbl, num_workers);
if (num_thr != num_workers) {
- ODPH_ERR("Error: worker join failed %i\n", num_thr);
+ ODPH_ERR("Worker join failed: %i\n", num_thr);
exit(EXIT_FAILURE);
}
for (i = 0; i < if_count; ++i) {
if (odp_pktio_close(gbl_args->pktios[i].pktio)) {
- ODPH_ERR("Error: unable to close %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio close failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2318,30 +2437,30 @@ int main(int argc, char *argv[])
for (i = 0; i < num_pools; i++) {
if (odp_pool_destroy(pool_tbl[i])) {
- ODPH_ERR("Error: pool destroy failed %i\n", i);
+ ODPH_ERR("Pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
for (i = 0; i < num_vec_pools; i++) {
if (odp_pool_destroy(vec_pool_tbl[i])) {
- ODPH_ERR("Error: vector pool destroy failed %i\n", i);
+ ODPH_ERR("Vector pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
if (odp_shm_free(shm)) {
- ODPH_ERR("Error: shm free\n");
+ ODPH_ERR("Shm free failed\n");
exit(EXIT_FAILURE);
}
if (odp_term_local()) {
- ODPH_ERR("Error: term local\n");
+ ODPH_ERR("Term local failed\n");
exit(EXIT_FAILURE);
}
if (odp_term_global(instance)) {
- ODPH_ERR("Error: term global\n");
+ ODPH_ERR("Term global failed\n");
exit(EXIT_FAILURE);
}
diff --git a/test/performance/odp_packet_gen.c b/test/performance/odp_packet_gen.c
index 77b6a27a7..1407887e4 100644
--- a/test/performance/odp_packet_gen.c
+++ b/test/performance/odp_packet_gen.c
@@ -30,10 +30,14 @@
#define RAND_16BIT_WORDS 128
/* Max retries to generate random data */
#define MAX_RAND_RETRIES 1000
+/* Maximum pktio index table size */
+#define MAX_PKTIO_INDEXES 1024
/* Minimum number of packets to receive in CI test */
#define MIN_RX_PACKETS_CI 800
+ODP_STATIC_ASSERT(MAX_PKTIOS <= UINT8_MAX, "Interface index must fit into uint8_t\n");
+
typedef struct test_options_t {
uint64_t gap_nsec;
uint64_t quit;
@@ -58,6 +62,7 @@ typedef struct test_options_t {
uint16_t udp_dst;
uint32_t wait_sec;
uint32_t mtu;
+ odp_bool_t promisc_mode;
struct vlan_hdr {
uint16_t tpid;
@@ -125,6 +130,9 @@ typedef struct test_global_t {
} pktio[MAX_PKTIOS];
+ /* Interface lookup table. Table index is pktio_index of the API. */
+ uint8_t if_from_pktio_idx[MAX_PKTIO_INDEXES];
+
} test_global_t;
static test_global_t *test_global;
@@ -173,6 +181,7 @@ static void print_usage(void)
" -d, --ipv4_dst IPv4 destination address. Default: 192.168.0.2\n"
" -o, --udp_src UDP source port. Default: 10000\n"
" -p, --udp_dst UDP destination port. Default: 20000\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
" -c, --c_mode <counts> Counter mode for incrementing UDP port numbers.\n"
" Specify the number of port numbers used starting from\n"
" udp_src/udp_dst. Comma-separated (no spaces) list of\n"
@@ -254,6 +263,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{"ipv4_dst", required_argument, NULL, 'd'},
{"udp_src", required_argument, NULL, 'o'},
{"udp_dst", required_argument, NULL, 'p'},
+ {"promisc_mode", no_argument, NULL, 'P'},
{"c_mode", required_argument, NULL, 'c'},
{"mtu", required_argument, NULL, 'M'},
{"quit", required_argument, NULL, 'q'},
@@ -263,7 +273,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:h";
+ static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:Ph";
test_options->num_pktio = 0;
test_options->num_rx = 1;
@@ -275,6 +285,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
test_options->bursts = 1;
test_options->gap_nsec = 1000000;
test_options->num_vlan = 0;
+ test_options->promisc_mode = 0;
strncpy(test_options->ipv4_src_s, "192.168.0.1",
sizeof(test_options->ipv4_src_s) - 1);
strncpy(test_options->ipv4_dst_s, "192.168.0.2",
@@ -385,6 +396,9 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
}
test_options->udp_dst = udp_port;
break;
+ case 'P':
+ test_options->promisc_mode = 1;
+ break;
case 'r':
test_options->num_rx = atoi(optarg);
break;
@@ -621,7 +635,7 @@ static int open_pktios(test_global_t *global)
odp_pktout_queue_param_t pktout_param;
char *name;
uint32_t i, seg_len;
- int j;
+ int j, pktio_idx;
test_options_t *test_options = &global->test_options;
uint32_t num_rx = test_options->num_rx;
int num_tx = test_options->num_tx;
@@ -649,6 +663,7 @@ static int open_pktios(test_global_t *global)
printf("%u bytes\n", test_options->mtu);
else
printf("interface default\n");
+ printf(" promisc mode: %s\n", test_options->promisc_mode ? "enabled" : "disabled");
printf(" tx burst size %u\n", test_options->burst_size);
printf(" tx bursts %u\n", test_options->bursts);
printf(" tx burst gap %" PRIu64 " nsec\n",
@@ -729,6 +744,9 @@ static int open_pktios(test_global_t *global)
global->pool = pool;
+ if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES)
+ printf("Warning: max pktio index (%u) is too large\n", odp_pktio_max_index());
+
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
@@ -750,6 +768,13 @@ static int open_pktios(test_global_t *global)
odp_pktio_print(pktio);
+ pktio_idx = odp_pktio_index(pktio);
+ if (pktio_idx < 0 || pktio_idx >= MAX_PKTIO_INDEXES) {
+ printf("Error (%s): Bad pktio index: %i\n", name, pktio_idx);
+ return -1;
+ }
+ global->if_from_pktio_idx[pktio_idx] = i;
+
if (odp_pktio_capability(pktio, &pktio_capa)) {
printf("Error (%s): Pktio capability failed.\n", name);
return -1;
@@ -814,6 +839,18 @@ static int open_pktios(test_global_t *global)
odp_pktio_config(pktio, &pktio_config);
+ if (test_options->promisc_mode) {
+ if (!pktio_capa.set_op.op.promisc_mode) {
+ ODPH_ERR("Error (%s): promisc mode set not supported\n", name);
+ return -1;
+ }
+
+ if (odp_pktio_promisc_mode_set(pktio, true)) {
+ ODPH_ERR("Error (%s): promisc mode enable failed\n", name);
+ return -1;
+ }
+ }
+
odp_pktin_queue_param_init(&pktin_param);
pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
@@ -1056,8 +1093,11 @@ static int rx_thread(void *arg)
/* All packets from the same queue are from the same pktio interface */
int index = odp_packet_input_index(odp_packet_from_event(ev[0]));
- if (index >= 0)
- global->stat[thr].pktio[index].rx_packets += num;
+ if (index >= 0) {
+ int if_idx = global->if_from_pktio_idx[index];
+
+ global->stat[thr].pktio[if_idx].rx_packets += num;
+ }
}
odp_event_free_multi(ev, num);
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index aae08bfe0..0894a403d 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -25,6 +25,7 @@
#include <getopt.h>
#define MAX_QUEUES 4096 /**< Maximum number of queues */
+#define MAX_GROUPS 64
#define EVENT_POOL_SIZE (1024 * 1024) /**< Event pool size */
#define TEST_ROUNDS 10 /**< Test rounds for each thread (millions) */
#define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */
@@ -81,6 +82,8 @@ typedef struct {
unsigned int cpu_count; /**< CPU count */
odp_schedule_sync_t sync_type; /**< Scheduler sync type */
int forward_mode; /**< Event forwarding mode */
+ int num_group;
+ int isolate;
int test_rounds; /**< Number of test rounds (millions) */
int warm_up_rounds; /**< Number of warm-up rounds */
struct {
@@ -117,6 +120,9 @@ typedef struct {
odp_pool_t pool; /**< Pool for allocating test events */
test_args_t args; /**< Parsed command line arguments */
odp_queue_t queue[NUM_PRIOS][MAX_QUEUES]; /**< Scheduled queues */
+
+ odp_schedule_group_t group[NUM_PRIOS][MAX_GROUPS];
+
} test_globals_t;
/**
@@ -343,6 +349,38 @@ static void print_results(test_globals_t *globals)
}
}
+static int join_groups(test_globals_t *globals, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int i, num;
+ int num_group = globals->args.num_group;
+
+ if (num_group <= 0)
+ return 0;
+
+ num = num_group;
+ if (globals->args.isolate)
+ num = 2 * num_group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+
+ for (i = 0; i < num; i++) {
+ if (globals->args.isolate)
+ group = globals->group[i % 2][i / 2];
+ else
+ group = globals->group[0][i];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ ODPH_ERR("Group join failed %i (thr %i)\n", i, thr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/**
* Measure latency of scheduled ODP events
*
@@ -485,6 +523,9 @@ static int run_thread(void *arg ODP_UNUSED)
return -1;
}
+ if (join_groups(globals, thr))
+ return -1;
+
if (thr == MAIN_THREAD) {
args = &globals->args;
@@ -528,6 +569,12 @@ static void usage(void)
" 0: Random (default)\n"
" 1: Incremental\n"
" 2: Use source queue\n"
+ " -g, --num_group <num> Number of schedule groups. Round robins queues into groups.\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
+ " -i, --isolate <mode> Select if shared or isolated groups are used. Ignored when num_group <= 0.\n"
+ " 0: All queues share groups (default)\n"
+ " 1: Separate groups for high and low priority queues. Creates 2xnum_group groups.\n"
" -l, --lo-prio-queues <number> Number of low priority scheduled queues\n"
" -t, --hi-prio-queues <number> Number of high priority scheduled queues\n"
" -m, --lo-prio-events-per-queue <number> Number of events per low priority queue\n"
@@ -563,24 +610,29 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
+ {"duration", required_argument, NULL, 'd'},
{"forward-mode", required_argument, NULL, 'f'},
+ {"num_group", required_argument, NULL, 'g'},
+ {"isolate", required_argument, NULL, 'i'},
{"lo-prio-queues", required_argument, NULL, 'l'},
{"hi-prio-queues", required_argument, NULL, 't'},
{"lo-prio-events-per-queue", required_argument, NULL, 'm'},
{"hi-prio-events-per-queue", required_argument, NULL, 'n'},
{"lo-prio-events", required_argument, NULL, 'o'},
{"hi-prio-events", required_argument, NULL, 'p'},
- {"sample-per-prio", no_argument, NULL, 'r'},
{"sync", required_argument, NULL, 's'},
{"warm-up", required_argument, NULL, 'w'},
+ {"sample-per-prio", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:d:f:s:l:t:m:n:o:p:rw:h";
+ static const char *shortopts = "+c:d:f:g:i:l:t:m:n:o:p:s:w:rh";
args->cpu_count = 1;
args->forward_mode = EVENT_FORWARD_RAND;
+ args->num_group = 0;
+ args->isolate = 0;
args->test_rounds = TEST_ROUNDS;
args->warm_up_rounds = WARM_UP_ROUNDS;
args->sync_type = ODP_SCHED_SYNC_PARALLEL;
@@ -608,6 +660,12 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
case 'f':
args->forward_mode = atoi(optarg);
break;
+ case 'g':
+ args->num_group = atoi(optarg);
+ break;
+ case 'i':
+ args->isolate = atoi(optarg);
+ break;
case 'l':
args->prio[LO_PRIO].queues = atoi(optarg);
break;
@@ -676,6 +734,11 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
usage();
exit(EXIT_FAILURE);
}
+
+ if (args->num_group > MAX_GROUPS) {
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
+ exit(EXIT_FAILURE);
+ }
}
static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
@@ -697,6 +760,68 @@ static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
}
}
+static int create_groups(test_globals_t *globals, odp_schedule_group_t group[], int num)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t zeromask;
+ int i, j, max;
+
+ if (num <= 0)
+ return 0;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ return 0;
+ }
+
+ max = sched_capa.max_groups - 3;
+ if (num > max) {
+ printf("Too many schedule groups %i (max %u)\n", num, max);
+ return 0;
+ }
+
+ for (i = 0; i < NUM_PRIOS; i++)
+ for (j = 0; j < MAX_GROUPS; j++)
+ globals->group[i][j] = ODP_SCHED_GROUP_INVALID;
+
+ odp_thrmask_zero(&zeromask);
+
+ for (i = 0; i < num; i++) {
+ group[i] = odp_schedule_group_create("test_group", &zeromask);
+
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Group create failed %i\n", i);
+ break;
+ }
+
+ if (globals->args.isolate) {
+ globals->group[i % 2][i / 2] = group[i];
+ } else {
+ globals->group[0][i] = group[i];
+ globals->group[1][i] = group[i];
+ }
+ }
+
+ return i;
+}
+
+static int destroy_groups(odp_schedule_group_t group[], int num)
+{
+ int i;
+
+ if (num <= 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ if (odp_schedule_group_destroy(group[i])) {
+ ODPH_ERR("Group destroy failed %i\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/**
* Test main function
*/
@@ -705,21 +830,23 @@ int main(int argc, char *argv[])
odp_instance_t instance;
odp_init_t init_param;
odph_helper_options_t helper_options;
- odph_thread_t *thread_tbl;
odph_thread_common_param_t thr_common;
odph_thread_param_t thr_param;
odp_cpumask_t cpumask;
- odp_pool_t pool;
odp_pool_capability_t pool_capa;
odp_pool_param_t params;
- odp_shm_t shm;
test_globals_t *globals;
test_args_t args;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
uint32_t pool_size;
- int i, j;
- int ret = 0;
+ int i, j, ret;
+ int num_group, tot_group;
+ odp_schedule_group_t group[2 * MAX_GROUPS];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ int err = 0;
int num_workers = 0;
+ odp_shm_t shm = ODP_SHM_INVALID;
+ odp_pool_t pool = ODP_POOL_INVALID;
printf("\nODP scheduling latency benchmark starts\n\n");
@@ -739,7 +866,7 @@ int main(int argc, char *argv[])
/* ODP global init */
if (odp_init_global(&instance, &init_param, NULL)) {
ODPH_ERR("ODP global init failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
/*
@@ -748,11 +875,17 @@ int main(int argc, char *argv[])
*/
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
ODPH_ERR("ODP global init failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
odp_sys_info_print();
+ num_group = args.num_group;
+
+ tot_group = 0;
+ if (num_group > 0)
+ tot_group = args.isolate ? 2 * num_group : num_group;
+
/* Get default worker cpumask */
if (args.cpu_count)
num_workers = args.cpu_count;
@@ -762,22 +895,22 @@ int main(int argc, char *argv[])
(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
- printf("CPU mask info:\n");
- printf(" Worker threads: %i\n", num_workers);
- printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
- printf(" CPU mask: %s\n", cpumaskstr);
-
- thread_tbl = calloc(sizeof(odph_thread_t), num_workers);
- if (!thread_tbl) {
- ODPH_ERR("no memory for thread_tbl\n");
- return -1;
- }
-
- shm = odp_shm_reserve("test_globals",
- sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+ printf("Test options:\n");
+ printf(" Worker threads: %i\n", num_workers);
+ printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf(" CPU mask: %s\n", cpumaskstr);
+ printf(" Test rounds: %iM\n", args.test_rounds);
+ printf(" Warm-up rounds: %i\n", args.warm_up_rounds);
+ printf(" Isolated groups: %i\n", args.isolate);
+ printf(" Number of groups: %i\n", num_group);
+ printf(" Created groups: %i\n", tot_group);
+ printf("\n");
+
+ shm = odp_shm_reserve("test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
ODPH_ERR("Shared memory reserve failed.\n");
- return -1;
+ err = -1;
+ goto error;
}
globals = odp_shm_addr(shm);
@@ -791,7 +924,8 @@ int main(int argc, char *argv[])
*/
if (odp_pool_capability(&pool_capa)) {
ODPH_ERR("pool capa failed\n");
- return -1;
+ err = -1;
+ goto error;
}
pool_size = EVENT_POOL_SIZE;
@@ -808,10 +942,20 @@ int main(int argc, char *argv[])
if (pool == ODP_POOL_INVALID) {
ODPH_ERR("Pool create failed.\n");
- return -1;
+ err = -1;
+ goto error;
}
globals->pool = pool;
+ /* Create groups */
+ ret = create_groups(globals, group, tot_group);
+ if (ret != tot_group) {
+ ODPH_ERR("Group create failed.\n");
+ tot_group = ret;
+ err = -1;
+ goto error;
+ }
+
/*
* Create queues for schedule test
*/
@@ -819,8 +963,13 @@ int main(int argc, char *argv[])
char name[] = "sched_XX_YY";
odp_queue_t queue;
odp_queue_param_t param;
+ odp_schedule_group_t grp;
int prio;
+ grp = ODP_SCHED_GROUP_ALL;
+ if (num_group < 0)
+ grp = ODP_SCHED_GROUP_WORKER;
+
if (i == HI_PRIO)
prio = odp_schedule_max_prio();
else
@@ -833,17 +982,22 @@ int main(int argc, char *argv[])
param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = prio;
param.sched.sync = args.sync_type;
- param.sched.group = ODP_SCHED_GROUP_ALL;
for (j = 0; j < args.prio[i].queues; j++) {
name[9] = '0' + j / 10;
name[10] = '0' + j - 10 * (j / 10);
+ /* Round robin queues into groups */
+ if (num_group > 0)
+ grp = globals->group[i][j % num_group];
+
+ param.sched.group = grp;
+
queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
ODPH_ERR("Scheduled queue create failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
globals->queue[i][j] = queue;
@@ -859,6 +1013,8 @@ int main(int argc, char *argv[])
odp_barrier_init(&globals->barrier, num_workers);
/* Create and launch worker threads */
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
odph_thread_common_param_init(&thr_common);
thr_common.instance = instance;
thr_common.cpumask = &cpumask;
@@ -873,7 +1029,6 @@ int main(int argc, char *argv[])
/* Wait for worker threads to terminate */
odph_thread_join(thread_tbl, num_workers);
- free(thread_tbl);
printf("ODP scheduling latency test complete\n\n");
@@ -885,14 +1040,36 @@ int main(int argc, char *argv[])
for (j = 0; j < num_queues; j++) {
queue = globals->queue[i][j];
- ret += odp_queue_destroy(queue);
+ if (odp_queue_destroy(queue)) {
+ ODPH_ERR("Queue destroy failed [%i][%i]\n", i, j);
+ err = -1;
+ break;
+ }
+ }
+ }
+
+error:
+ if (destroy_groups(group, tot_group)) {
+ ODPH_ERR("Group destroy failed\n");
+ err = -1;
+ }
+
+ if (pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Pool destroy failed\n");
+ err = -1;
+ }
+ }
+
+ if (shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("SHM destroy failed\n");
+ err = -1;
}
}
- ret += odp_shm_free(shm);
- ret += odp_pool_destroy(pool);
- ret += odp_term_local();
- ret += odp_term_global(instance);
+ err += odp_term_local();
+ err += odp_term_global(instance);
- return ret;
+ return err;
}
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index 07b671228..19f39e1d3 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2019, Nokia
+ * Copyright (c) 2022, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -51,7 +52,7 @@ static int buffer_suite_init(void)
return 0;
}
-static void buffer_test_pool_alloc_free(void)
+static void test_pool_alloc_free(const odp_pool_param_t *param)
{
odp_pool_t pool;
odp_event_t ev;
@@ -59,14 +60,15 @@ static void buffer_test_pool_alloc_free(void)
uint32_t num_buf = 0;
void *addr;
odp_event_subtype_t subtype;
- uint32_t num = default_param.buf.num;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
odp_buffer_t buffer[num];
odp_bool_t wrong_type = false, wrong_subtype = false;
odp_bool_t wrong_size = false, wrong_align = false;
- pool = odp_pool_create("default pool", &default_param);
+ pool = odp_pool_create("default pool", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
odp_pool_print(pool);
@@ -123,7 +125,7 @@ static void buffer_test_pool_alloc_free(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_alloc_free_multi(void)
+static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
{
odp_pool_t pool;
uint32_t i, num_buf;
@@ -131,14 +133,15 @@ static void buffer_test_pool_alloc_free_multi(void)
odp_event_t ev;
void *addr;
odp_event_subtype_t subtype;
- uint32_t num = default_param.buf.num;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
odp_buffer_t buffer[num + BURST];
odp_bool_t wrong_type = false, wrong_subtype = false;
odp_bool_t wrong_size = false, wrong_align = false;
- pool = odp_pool_create("default pool", &default_param);
+ pool = odp_pool_create("default pool", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
ret = 0;
@@ -203,16 +206,14 @@ static void buffer_test_pool_alloc_free_multi(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_single_pool(void)
+static void test_pool_single_pool(odp_pool_param_t *param)
{
odp_pool_t pool;
odp_buffer_t buffer;
- odp_pool_param_t param;
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
- pool = odp_pool_create("pool 0", &param);
+ pool = odp_pool_create("pool 0", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
odp_pool_print(pool);
@@ -246,23 +247,21 @@ static void buffer_test_pool_single_pool(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_two_pools(void)
+static void test_pool_two_pools(odp_pool_param_t *param)
{
odp_pool_t pool0, pool1;
odp_buffer_t buf, buffer[2];
- odp_pool_param_t param;
int num = 0;
if (pool_capa.buf.max_pools < 2)
return;
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
- pool0 = odp_pool_create("pool 0", &param);
+ pool0 = odp_pool_create("pool 0", param);
CU_ASSERT_FATAL(pool0 != ODP_POOL_INVALID);
- pool1 = odp_pool_create("pool 1", &param);
+ pool1 = odp_pool_create("pool 1", param);
CU_ASSERT_FATAL(pool1 != ODP_POOL_INVALID);
buffer[0] = odp_buffer_alloc(pool0);
@@ -309,15 +308,14 @@ static void buffer_test_pool_two_pools(void)
CU_ASSERT(odp_pool_destroy(pool1) == 0);
}
-static void buffer_test_pool_max_pools(void)
+static void test_pool_max_pools(odp_pool_param_t *param)
{
- odp_pool_param_t param;
uint32_t i, num_pool, num_buf;
void *addr;
odp_event_t ev;
uint32_t max_pools = pool_capa.buf.max_pools;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
odp_pool_t pool[max_pools];
odp_buffer_t buffer[max_pools];
@@ -325,11 +323,10 @@ static void buffer_test_pool_max_pools(void)
printf("\n Creating %u pools\n", max_pools);
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
for (i = 0; i < max_pools; i++) {
- pool[i] = odp_pool_create(NULL, &param);
+ pool[i] = odp_pool_create(NULL, param);
if (pool[i] == ODP_POOL_INVALID)
break;
@@ -370,12 +367,146 @@ static void buffer_test_pool_max_pools(void)
CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
}
+static void buffer_test_pool_alloc_free(void)
+{
+ test_pool_alloc_free(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi(void)
+{
+ test_pool_alloc_free_multi(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_multi_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_single_pool(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_two_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_max_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_max_pools(&param);
+}
+
odp_testinfo_t buffer_suite[] = {
ODP_TEST_INFO(buffer_test_pool_alloc_free),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_max_cache),
ODP_TEST_INFO(buffer_test_pool_alloc_free_multi),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_max_cache),
ODP_TEST_INFO(buffer_test_pool_single_pool),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_max_cache),
ODP_TEST_INFO(buffer_test_pool_two_pools),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_max_cache),
ODP_TEST_INFO(buffer_test_pool_max_pools),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_max_cache),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index dd8373b04..8eac41a1e 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -417,7 +417,7 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
ip->tot_len = odp_cpu_to_be_16(l3_len);
ip->ttl = DEFAULT_TTL;
ip->frag_offset = 0;
- ip->tos = 0;
+ ip->tos = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
odp_packet_has_ipv4_set(pkt, 1);
odph_ipv4_csum_update(pkt);
} else {
@@ -425,7 +425,8 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odp_packet_has_ipv6_set(pkt, 1);
ipv6 = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
version = ODPH_IPV6 << ODPH_IPV6HDR_VERSION_SHIFT;
- tc = DEFAULT_TOS << ODPH_IPV6HDR_TC_SHIFT;
+ tc = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
+ tc <<= ODPH_IPV6HDR_TC_SHIFT;
flow = seqno << ODPH_IPV6HDR_FLOW_LABEL_SHIFT;
ver_tc_flow = version | tc | flow;
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index 068e2112c..e69f077a2 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -13,6 +13,8 @@
#define MAX_NUM_UDP 4
#define MARK_IP 1
#define MARK_UDP 2
+#define TEST_IPV4 false
+#define TEST_IPV6 true
static odp_pool_t pkt_pool;
/** sequence number of IP packets */
@@ -560,7 +562,7 @@ static void classification_test_pmr_term_udp_sport(void)
test_pmr(&pmr_param, pkt, NO_MATCH);
}
-static void classification_test_pmr_term_ipproto(void)
+static void classification_test_pmr_term_proto(odp_bool_t ipv6)
{
odp_packet_t pkt;
uint8_t val;
@@ -578,18 +580,73 @@ static void classification_test_pmr_term_ipproto(void)
pmr_param.val_sz = sizeof(val);
pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
test_pmr(&pmr_param, pkt, MATCH);
- pkt = create_packet(default_pkt_info);
+ pkt_info.l4_type = CLS_PKT_L4_TCP;
+ pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
test_pmr(&pmr_param, pkt, NO_MATCH);
}
+static void classification_test_pmr_term_ipv4_proto(void)
+{
+ classification_test_pmr_term_proto(TEST_IPV4);
+}
+
+static void classification_test_pmr_term_ipv6_proto(void)
+{
+ classification_test_pmr_term_proto(TEST_IPV6);
+}
+
+static void classification_test_pmr_term_dscp(odp_bool_t ipv6)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = DSCP_CLASS4;
+ mask = 0x3f;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IP_DSCP;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt_info.dscp = DSCP_CLASS4;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info.dscp = 0;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_ipv4_dscp(void)
+{
+ classification_test_pmr_term_dscp(TEST_IPV4);
+}
+
+static void classification_test_pmr_term_ipv6_dscp(void)
+{
+ classification_test_pmr_term_dscp(TEST_IPV6);
+}
+
static void classification_test_pmr_term_dmac(void)
{
odp_packet_t pkt;
@@ -797,6 +854,44 @@ static void classification_test_pmr_term_vlan_id_x(void)
test_pmr(&pmr_param, pkt, NO_MATCH);
}
+static void classification_test_pmr_term_vlan_pcp_0(void)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ uint16_t tci;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_0;
+ cls_packet_info_t pkt_info;
+
+ val = 5;
+ mask = 0x7;
+ tci = ((uint16_t)val) << ODPH_VLANHDR_PCP_SHIFT;
+ tci |= 0x123;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_PCP_0;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_0 = (odph_vlanhdr_t *)(eth + 1);
+ vlan_0->tci = odp_cpu_to_be_16(tci);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
static void classification_test_pmr_term_eth_type_0(void)
{
odp_packet_t pkt;
@@ -1803,6 +1898,11 @@ static int check_capa_ip_proto(void)
return cls_capa.supported_terms.bit.ip_proto;
}
+static int check_capa_ip_dscp(void)
+{
+ return cls_capa.supported_terms.bit.ip_dscp;
+}
+
static int check_capa_dmac(void)
{
return cls_capa.supported_terms.bit.dmac;
@@ -1843,6 +1943,11 @@ static int check_capa_vlan_id_x(void)
return cls_capa.supported_terms.bit.vlan_id_x;
}
+static int check_capa_vlan_pcp_0(void)
+{
+ return cls_capa.supported_terms.bit.vlan_pcp_0;
+}
+
static int check_capa_ethtype_0(void)
{
return cls_capa.supported_terms.bit.ethtype_0;
@@ -1945,8 +2050,14 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_icmp_code),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_icmp_id,
check_capa_icmp_id),
- ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipproto,
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_proto,
+ check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv6_proto,
check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_dscp,
+ check_capa_ip_dscp),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv6_dscp,
+ check_capa_ip_dscp),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_dmac,
check_capa_dmac),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_pool_set,
@@ -1967,6 +2078,8 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_vlan_id_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_vlan_id_x,
check_capa_vlan_id_x),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_vlan_pcp_0,
+ check_capa_vlan_pcp_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_eth_type_0,
check_capa_ethtype_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_eth_type_x,
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 6b00e138b..592f37cd6 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -27,7 +27,8 @@ typedef struct cls_packet_info {
bool vlan_qinq;
odp_atomic_u32_t *seq;
cls_packet_l4_info l4_type;
- bool ipv6;
+ odp_bool_t ipv6;
+ uint8_t dscp;
uint32_t len;
} cls_packet_info_t;
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 97f721dd5..e3eff88b9 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -353,9 +353,6 @@ static int alg_packet_op(odp_packet_t pkt,
return rc;
}
- if (!result.ok)
- CU_ASSERT(odp_packet_has_error(pkt));
-
*ok = result.ok;
return 0;
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index f6408c788..d3ce41a2c 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -3078,6 +3078,7 @@ static void pktio_test_pktin_ts(void)
ns1 = 100;
ts = odp_pktio_ts_from_ns(pktio_tx, ns1);
ns2 = odp_time_to_ns(ts);
+ CU_ASSERT_FATAL(res != 0);
res_ns = ODP_TIME_SEC_IN_NS / res;
if (ODP_TIME_SEC_IN_NS % res)
res_ns++;
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 0dc2db360..490ac9fea 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -2297,6 +2297,57 @@ static void scheduler_test_ordered_lock(void)
CU_ASSERT(odp_queue_destroy(queue) == 0);
}
+static void enqueue_event(odp_queue_t queue)
+{
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ int ret;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void scheduler_test_order_wait_1_thread(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_event_t ev;
+
+ CU_ASSERT(!odp_schedule_capability(&sched_capa));
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ queue = odp_queue_create("ordered queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_type(queue) == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ORDERED);
+
+ /* Set up an ordered scheduling context */
+ enqueue_event(queue);
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ odp_event_free(ev);
+
+ /* Fail build if the capability field does not exist */
+ printf(" (capa=%d) ", sched_capa.order_wait);
+ /* Check that order wait does not get stuck or crash */
+ odp_schedule_order_wait();
+
+ /* Release the context */
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
static int sched_and_plain_thread(void *arg)
{
odp_event_t ev1, ev2;
@@ -3226,6 +3277,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_pause_resume),
ODP_TEST_INFO(scheduler_test_pause_enqueue),
ODP_TEST_INFO(scheduler_test_ordered_lock),
+ ODP_TEST_INFO(scheduler_test_order_wait_1_thread),
ODP_TEST_INFO_CONDITIONAL(scheduler_test_flow_aware,
check_flow_aware_support),
ODP_TEST_INFO(scheduler_test_parallel),
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index a8af3f4fa..ccfbf5558 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -36,11 +36,12 @@
#define TICK_INVALID (~(uint64_t)0)
/* Test case options */
-#define PRIV 1
-#define EXP_RELAX 1
-#define WAIT 0
-#define CANCEL 1
-#define RESTART 1
+#define PRIV 1
+#define EXP_RELAX 1
+#define WAIT 0
+#define CANCEL 1
+#define RESTART 1
+#define FIRST_TICK 1
/* Timer helper structure */
struct test_timer {
@@ -2235,7 +2236,7 @@ static void timer_test_sched_all(void)
timer_test_all(ODP_QUEUE_TYPE_SCHED);
}
-static void timer_test_periodic(odp_queue_type_t queue_type)
+static void timer_test_periodic(odp_queue_type_t queue_type, int use_first)
{
odp_timer_capability_t timer_capa;
odp_timer_periodic_capability_t periodic_capa;
@@ -2369,7 +2370,9 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
cur_tick = odp_timer_current_tick(timer_pool);
tick = cur_tick + odp_timer_ns_to_tick(timer_pool, period_ns / 2);
- start_param.first_tick = tick;
+ if (use_first)
+ start_param.first_tick = tick;
+
start_param.freq_multiplier = multiplier;
start_param.tmo_ev = ev;
@@ -2424,7 +2427,9 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
}
CU_ASSERT(num_tmo == num);
- CU_ASSERT(diff_ns < 2 * duration_ns);
+
+ /* Allow +-30% error on test duration */
+ CU_ASSERT((diff_ns > 0.7 * duration_ns) && (diff_ns < 1.3 * duration_ns));
/* Stop periodic timer */
ret = odp_timer_periodic_cancel(timer);
@@ -2465,6 +2470,10 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
}
}
+ /* Check that ack() returned 2 on the last event */
+ CU_ASSERT(done);
+ CU_ASSERT(ret == 2);
+
CU_ASSERT(odp_timer_free(timer) == ODP_EVENT_INVALID);
odp_timer_pool_destroy(timer_pool);
CU_ASSERT(odp_queue_destroy(queue) == 0);
@@ -2473,12 +2482,22 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
static void timer_test_periodic_sched(void)
{
- timer_test_periodic(ODP_QUEUE_TYPE_SCHED);
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0);
}
static void timer_test_periodic_plain(void)
{
- timer_test_periodic(ODP_QUEUE_TYPE_PLAIN);
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, 0);
+}
+
+static void timer_test_periodic_sched_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, FIRST_TICK);
+}
+
+static void timer_test_periodic_plain_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, FIRST_TICK);
}
odp_testinfo_t timer_suite[] = {
@@ -2553,8 +2572,12 @@ odp_testinfo_t timer_suite[] = {
check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched,
check_periodic_sched_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched_first,
+ check_periodic_sched_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain,
check_periodic_plain_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain_first,
+ check_periodic_plain_support),
ODP_TEST_INFO_NULL,
};