aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2021-07-13 10:37:51 +0300
committerMatias Elo <matias.elo@nokia.com>2021-07-13 10:37:51 +0300
commitb7ecb5f8c1a9cbe7d0f3f25344f747fd54cd43a8 (patch)
tree07d70843cc36e84b9716de356591ce1fa1078134 /test
parent6c7ac017886e2f1f63a27871254326d7cd1b48d1 (diff)
parentd0def8ae0e184ae5596bc6bcd30b90c83ae49452 (diff)
Merge tag 'v1.30.1.0' of https://github.com/OpenDataPlane/odp into odp-dpdk
Signed-off-by: Matias Elo <matias.elo@nokia.com>
Diffstat (limited to 'test')
-rw-r--r--test/performance/odp_l2fwd.c17
-rw-r--r--test/performance/odp_sched_perf.c81
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c7
-rw-r--r--test/validation/api/pktio/pktio.c543
-rw-r--r--test/validation/api/scheduler/scheduler.c64
-rw-r--r--test/validation/api/timer/timer.c89
6 files changed, 737 insertions, 64 deletions
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index c7950cfd9..3da08661c 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -344,7 +344,7 @@ static int run_worker_sched_mode_vector(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups) {
+ if (gbl_args->appl.num_groups > 0) {
odp_thrmask_t mask;
odp_thrmask_zero(&mask);
@@ -527,7 +527,7 @@ static int run_worker_sched_mode(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups) {
+ if (gbl_args->appl.num_groups > 0) {
odp_thrmask_t mask;
odp_thrmask_zero(&mask);
@@ -1523,7 +1523,8 @@ static void usage(char *progname)
" -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
" 1: Use checksum offload\n"
" -g, --groups <num> Number of groups to use: 0 ... num\n"
- " 0: SCHED_GROUP_ALL (default)\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
" num: must not exceed number of interfaces or workers\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
@@ -2071,8 +2072,13 @@ int main(int argc, char *argv[])
printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
printf("CPU mask: %s\n", cpumaskstr);
- if (num_groups)
+ if (num_groups > 0)
printf("num groups: %i\n", num_groups);
+ else if (num_groups == 0)
+ printf("group: ODP_SCHED_GROUP_ALL\n");
+ else
+ printf("group: ODP_SCHED_GROUP_WORKER\n");
+
if (num_groups > if_count || num_groups > num_workers) {
ODPH_ERR("Too many groups. Number of groups may not exceed "
@@ -2210,6 +2216,9 @@ int main(int argc, char *argv[])
if (num_groups == 0) {
group[0] = ODP_SCHED_GROUP_ALL;
num_groups = 1;
+ } else if (num_groups == -1) {
+ group[0] = ODP_SCHED_GROUP_WORKER;
+ num_groups = 1;
} else {
create_groups(num_groups, group);
}
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index 148bf11d5..4ec4f4352 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -33,7 +33,7 @@ typedef struct test_options_t {
uint32_t num_dummy;
uint32_t num_event;
uint32_t num_sched;
- uint32_t num_group;
+ int num_group;
uint32_t num_join;
uint32_t max_burst;
int queue_type;
@@ -81,6 +81,7 @@ typedef struct test_global_t {
odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ odp_atomic_u32_t num_worker;
} test_global_t;
@@ -95,9 +96,10 @@ static void print_usage(void)
" -q, --num_queue Number of queues. Default: 1.\n"
" -d, --num_dummy Number of empty queues. Default: 0.\n"
" -e, --num_event Number of events per queue. Default: 100.\n"
- " -s, --num_sched Number of events to schedule per thread\n"
+ " -s, --num_sched Number of events to schedule per thread. Default: 100 000.\n"
" -g, --num_group Number of schedule groups. Round robins threads and queues into groups.\n"
- " 0: SCHED_GROUP_ALL (default)\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
" -j, --num_join Number of groups a thread joins. Threads are divide evenly into groups,\n"
" if num_cpu is multiple of num_group and num_group is multiple of num_join.\n"
" 0: join all groups (default)\n"
@@ -115,9 +117,7 @@ static void print_usage(void)
static int parse_options(int argc, char *argv[], test_options_t *test_options)
{
- int opt;
- int long_index;
- uint32_t num_group, num_join;
+ int opt, long_index, num_group, num_join;
int ret = 0;
uint32_t ctx_size = 0;
@@ -232,21 +232,18 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
num_group = test_options->num_group;
num_join = test_options->num_join;
if (num_group > MAX_GROUPS) {
- printf("Error: Too many groups. Max supported %i.\n",
- MAX_GROUPS);
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
ret = -1;
}
- if (num_join > num_group) {
- printf("Error: num_join (%u) larger than num_group (%u).\n",
- num_join, num_group);
+ if (num_group > 0 && num_join > num_group) {
+ ODPH_ERR("num_join (%i) larger than num_group (%i).\n", num_join, num_group);
ret = -1;
}
- if (num_join && num_group > (test_options->num_cpu * num_join)) {
- printf("WARNING: Too many groups (%u). Some groups (%u) are not served.\n\n",
- num_group,
- num_group - (test_options->num_cpu * num_join));
+ if (num_join && num_group > (int)(test_options->num_cpu * num_join)) {
+ printf("WARNING: Too many groups (%i). Some groups (%i) are not served.\n\n",
+ num_group, num_group - (test_options->num_cpu * num_join));
if (test_options->forward) {
printf("Error: Cannot forward when some queues are not served.\n");
@@ -333,7 +330,7 @@ static int create_pool(test_global_t *global)
uint32_t tot_queue = test_options->tot_queue;
uint32_t tot_event = test_options->tot_event;
uint32_t queue_size = test_options->queue_size;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
uint32_t num_join = test_options->num_join;
int forward = test_options->forward;
uint64_t wait_ns = test_options->wait_ns;
@@ -352,7 +349,14 @@ static int create_pool(test_global_t *global)
printf(" num queues %u\n", num_queue);
printf(" num empty queues %u\n", num_dummy);
printf(" total queues %u\n", tot_queue);
- printf(" num groups %u\n", num_group);
+ printf(" num groups %i", num_group);
+ if (num_group == -1)
+ printf(" (ODP_SCHED_GROUP_WORKER)\n");
+ else if (num_group == 0)
+ printf(" (ODP_SCHED_GROUP_ALL)\n");
+ else
+ printf("\n");
+
printf(" num join %u\n", num_join);
printf(" forward events %i\n", forward ? 1 : 0);
printf(" wait nsec %" PRIu64 "\n", wait_ns);
@@ -422,7 +426,7 @@ static int create_groups(test_global_t *global)
test_options_t *test_options = &global->test_options;
uint32_t num_group = test_options->num_group;
- if (num_group == 0)
+ if (test_options->num_group <= 0)
return 0;
if (odp_schedule_capability(&sched_capa)) {
@@ -466,7 +470,7 @@ static int create_queues(test_global_t *global)
uint32_t num_event = test_options->num_event;
uint32_t queue_size = test_options->queue_size;
uint32_t tot_queue = test_options->tot_queue;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
int type = test_options->queue_type;
odp_pool_t pool = global->pool;
uint8_t *ctx = NULL;
@@ -510,11 +514,14 @@ static int create_queues(test_global_t *global)
queue_param.type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue_param.sched.sync = sync;
- queue_param.sched.group = ODP_SCHED_GROUP_ALL;
queue_param.size = queue_size;
+ if (num_group == -1)
+ queue_param.sched.group = ODP_SCHED_GROUP_WORKER;
+ else
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < tot_queue; i++) {
- if (num_group) {
+ if (num_group > 0) {
odp_schedule_group_t group;
/* Divide all queues evenly into groups */
@@ -598,11 +605,11 @@ static int join_group(test_global_t *global, int grp_index, int thr)
static int join_all_groups(test_global_t *global, int thr)
{
- uint32_t i;
+ int i;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
- if (num_group == 0)
+ if (num_group <= 0)
return 0;
for (i = 0; i < num_group; i++) {
@@ -647,11 +654,11 @@ static int destroy_queues(test_global_t *global)
static int destroy_groups(test_global_t *global)
{
- uint32_t i;
+ int i;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
- if (num_group == 0)
+ if (num_group <= 0)
return 0;
for (i = 0; i < num_group; i++) {
@@ -725,7 +732,7 @@ static int test_sched(void *arg)
test_options_t *test_options = &global->test_options;
uint32_t num_sched = test_options->num_sched;
uint32_t max_burst = test_options->max_burst;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
int forward = test_options->forward;
int touch_data = test_options->touch_data;
uint32_t rd_words = test_options->rd_words;
@@ -746,7 +753,7 @@ static int test_sched(void *arg)
if (forward)
ctx_offset = ROUNDUP(sizeof(odp_queue_t), 8);
- if (num_group) {
+ if (num_group > 0) {
uint32_t num_join = test_options->num_join;
if (num_join) {
@@ -890,6 +897,16 @@ static int test_sched(void *arg)
global->stat[thr].dummy_sum = data_sum + ctx_sum;
global->stat[thr].failed = ret;
+ if (odp_atomic_fetch_dec_u32(&global->num_worker) == 1) {
+ /* The last worker frees all events. This is needed when the main
+ * thread cannot do the clean up (ODP_SCHED_GROUP_WORKER). */
+ odp_event_t event;
+ uint64_t sched_wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
+
+ while ((event = odp_schedule(NULL, sched_wait)) != ODP_EVENT_INVALID)
+ odp_event_free(event);
+ }
+
/* Pause scheduling before thread exit */
odp_schedule_pause();
@@ -919,11 +936,13 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
odph_thread_common_param_t thr_common;
int i, ret;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
uint32_t num_join = test_options->num_join;
int num_cpu = test_options->num_cpu;
odph_thread_param_t thr_param[num_cpu];
+ odp_atomic_init_u32(&global->num_worker, num_cpu);
+
memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
memset(thr_param, 0, sizeof(thr_param));
memset(&thr_common, 0, sizeof(thr_common));
@@ -939,7 +958,7 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
global->thread_arg[i].global = global;
global->thread_arg[i].first_group = 0;
- if (num_group && num_join) {
+ if (num_group > 0 && num_join) {
/* Each thread joins only num_join groups, starting
* from this group index and wraping around the group
* table. */
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 7c1121579..3349ded99 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1570,6 +1570,12 @@ static void ipsec_test_default_values(void)
CU_ASSERT(config.inbound.retain_outer == ODP_PROTO_LAYER_NONE);
CU_ASSERT(config.inbound.parse_level == ODP_PROTO_LAYER_NONE);
CU_ASSERT(config.inbound.chksums.all_chksum == 0);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv4);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv6);
+ CU_ASSERT(config.inbound.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.inbound.reassembly.max_num_frags == 2);
+ CU_ASSERT(!config.inbound.reass_async);
+ CU_ASSERT(!config.inbound.reass_inline);
CU_ASSERT(config.outbound.all_chksum == 0);
CU_ASSERT(!config.stats_en);
@@ -1592,6 +1598,7 @@ static void ipsec_test_default_values(void)
CU_ASSERT(sa_param.inbound.lookup_mode == ODP_IPSEC_LOOKUP_DISABLED);
CU_ASSERT(sa_param.inbound.antireplay_ws == 0);
CU_ASSERT(sa_param.inbound.pipeline == ODP_IPSEC_PIPELINE_NONE);
+ CU_ASSERT(!sa_param.inbound.reassembly_en);
CU_ASSERT(sa_param.outbound.tunnel.type == ODP_IPSEC_TUNNEL_IPV4);
CU_ASSERT(sa_param.outbound.tunnel.ipv4.dscp == 0);
CU_ASSERT(sa_param.outbound.tunnel.ipv4.df == 0);
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 9a47dbe8c..838d50fd8 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -91,6 +91,11 @@ typedef enum {
RECV_MQ_TMO_NO_IDX,
} recv_tmo_mode_e;
+typedef enum {
+ ETH_UNICAST,
+ ETH_BROADCAST,
+} eth_addr_type_e;
+
/** size of transmitted packets */
static uint32_t packet_len = PKT_LEN_NORMAL;
@@ -157,7 +162,8 @@ static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa)
}
}
-static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst)
+static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst,
+ eth_addr_type_e dst_addr_type)
{
uint32_t len;
odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
@@ -167,9 +173,13 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t ds
CU_ASSERT(ret == ODPH_ETHADDR_LEN);
CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
- ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
- CU_ASSERT(ret == ODPH_ETHADDR_LEN);
- CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+ if (dst_addr_type == ETH_UNICAST) {
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+ } else {
+ CU_ASSERT(odph_eth_addr_parse(&eth->dst, "ff:ff:ff:ff:ff:ff") == 0);
+ }
}
static uint32_t pktio_pkt_set_seq(odp_packet_t pkt, size_t l4_hdr_len)
@@ -561,7 +571,8 @@ static int create_packets_udp(odp_packet_t pkt_tbl[],
int num,
odp_pktio_t pktio_src,
odp_pktio_t pktio_dst,
- odp_bool_t fix_cs)
+ odp_bool_t fix_cs,
+ eth_addr_type_e dst_addr_type)
{
int i, ret;
@@ -576,7 +587,7 @@ static int create_packets_udp(odp_packet_t pkt_tbl[],
break;
}
- pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, dst_addr_type);
/* Set user pointer. It should be NULL on receive side. */
odp_packet_user_ptr_set(pkt_tbl[i], (void *)1);
@@ -613,7 +624,7 @@ static int create_packets_sctp(odp_packet_t pkt_tbl[],
break;
}
- pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, ETH_UNICAST);
ret = pktio_zero_checksums(pkt_tbl[i]);
if (ret != 0) {
@@ -629,7 +640,7 @@ static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
{
return create_packets_udp(pkt_tbl, pkt_seq, num, pktio_src, pktio_dst,
- true);
+ true, ETH_UNICAST);
}
static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
@@ -1658,13 +1669,25 @@ static void pktio_test_pktio_config(void)
pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ memset(&config, 0xff, sizeof(config));
odp_pktio_config_init(&config);
+ /* Check default values */
+ CU_ASSERT(config.pktin.all_bits == 0);
+ CU_ASSERT(config.pktout.all_bits == 0);
+ CU_ASSERT(config.parser.layer == ODP_PROTO_LAYER_ALL);
+ CU_ASSERT(!config.enable_loop);
+ CU_ASSERT(!config.inbound_ipsec);
+ CU_ASSERT(!config.outbound_ipsec);
+ CU_ASSERT(!config.enable_lso);
+ CU_ASSERT(!config.reassembly.en_ipv4);
+ CU_ASSERT(!config.reassembly.en_ipv6);
+ CU_ASSERT(config.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.reassembly.max_num_frags == 2);
+
/* Indicate packet refs might be used */
config.pktout.bit.no_packet_refs = 0;
- CU_ASSERT(config.parser.layer == ODP_PROTO_LAYER_ALL);
-
CU_ASSERT(odp_pktio_config(pktio, NULL) == 0);
CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
@@ -1944,53 +1967,56 @@ static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
" in_octets %" PRIu64 "\n"
" in_packets %" PRIu64 "\n"
" in_ucast_pkts %" PRIu64 "\n"
+ " in_mcast_pkts %" PRIu64 "\n"
+ " in_bcast_pkts %" PRIu64 "\n"
" in_discards %" PRIu64 "\n"
" in_errors %" PRIu64 "\n"
" out_octets %" PRIu64 "\n"
" out_packets %" PRIu64 "\n"
" out_ucast_pkts %" PRIu64 "\n"
+ " out_mcast_pkts %" PRIu64 "\n"
+ " out_bcast_pkts %" PRIu64 "\n"
" out_discards %" PRIu64 "\n"
" out_errors %" PRIu64 "\n",
name,
s->in_octets,
s->in_packets,
s->in_ucast_pkts,
+ s->in_mcast_pkts,
+ s->in_bcast_pkts,
s->in_discards,
s->in_errors,
s->out_octets,
s->out_packets,
s->out_ucast_pkts,
+ s->out_mcast_pkts,
+ s->out_bcast_pkts,
s->out_discards,
s->out_errors);
}
#endif
-/* some pktio like netmap support various methods to
- * get statistics counters. ethtool strings are not standardised
- * and sysfs may not be supported. skip pktio_stats test until
- * we will solve that.*/
static int pktio_check_statistics_counters(void)
{
odp_pktio_t pktio;
- odp_pktio_stats_t stats;
- int ret;
+ odp_pktio_capability_t capa;
odp_pktio_param_t pktio_param;
- const char *iface = iface_name[0];
+ int ret;
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
- pktio = odp_pktio_open(iface, pool[0], &pktio_param);
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
if (pktio == ODP_PKTIO_INVALID)
return ODP_TEST_INACTIVE;
- ret = odp_pktio_stats(pktio, &stats);
+ ret = odp_pktio_capability(pktio, &capa);
(void)odp_pktio_close(pktio);
- if (ret == 0)
- return ODP_TEST_ACTIVE;
+ if (ret < 0 || capa.stats.pktio.all_counters == 0)
+ return ODP_TEST_INACTIVE;
- return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
}
static void pktio_test_statistics_counters(void)
@@ -2008,6 +2034,7 @@ static void pktio_test_statistics_counters(void)
uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
odp_pktio_stats_t stats[2];
odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
for (i = 0; i < num_ifaces; i++) {
pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
@@ -2018,6 +2045,9 @@ static void pktio_test_statistics_counters(void)
pktio_tx = pktio[0];
pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
ret = odp_pktio_start(pktio_tx);
@@ -2066,11 +2096,11 @@ static void pktio_test_statistics_counters(void)
CU_ASSERT(ret == 0);
tx_stats = &stats[0];
- CU_ASSERT((tx_stats->out_octets == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
(tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
- CU_ASSERT((tx_stats->out_packets == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
(tx_stats->out_packets >= (uint64_t)pkts));
- CU_ASSERT((tx_stats->out_ucast_pkts == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_ucast_pkts == 0) ||
(tx_stats->out_ucast_pkts >= (uint64_t)pkts));
CU_ASSERT(tx_stats->out_discards == 0);
CU_ASSERT(tx_stats->out_errors == 0);
@@ -2081,15 +2111,46 @@ static void pktio_test_statistics_counters(void)
ret = odp_pktio_stats(pktio_rx, rx_stats);
CU_ASSERT(ret == 0);
}
- CU_ASSERT((rx_stats->in_octets == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
(rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
- CU_ASSERT((rx_stats->in_packets == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
(rx_stats->in_packets >= (uint64_t)pkts));
- CU_ASSERT((rx_stats->in_ucast_pkts == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_ucast_pkts == 0) ||
(rx_stats->in_ucast_pkts >= (uint64_t)pkts));
CU_ASSERT(rx_stats->in_discards == 0);
CU_ASSERT(rx_stats->in_errors == 0);
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktio.counter.in_octets)
+ CU_ASSERT(rx_stats->in_octets == 0);
+ if (!rx_capa.stats.pktio.counter.in_packets)
+ CU_ASSERT(rx_stats->in_packets == 0);
+ if (!rx_capa.stats.pktio.counter.in_ucast_pkts)
+ CU_ASSERT(rx_stats->in_ucast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_mcast_pkts)
+ CU_ASSERT(rx_stats->in_mcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_bcast_pkts)
+ CU_ASSERT(rx_stats->in_bcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_discards)
+ CU_ASSERT(rx_stats->in_discards == 0);
+ if (!rx_capa.stats.pktio.counter.in_errors)
+ CU_ASSERT(rx_stats->in_errors == 0);
+
+ if (!tx_capa.stats.pktio.counter.out_octets)
+ CU_ASSERT(tx_stats->out_octets == 0);
+ if (!tx_capa.stats.pktio.counter.out_packets)
+ CU_ASSERT(tx_stats->out_packets == 0);
+ if (!tx_capa.stats.pktio.counter.out_ucast_pkts)
+ CU_ASSERT(tx_stats->out_ucast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_mcast_pkts)
+ CU_ASSERT(tx_stats->out_mcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_bcast_pkts)
+ CU_ASSERT(tx_stats->out_bcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_discards)
+ CU_ASSERT(tx_stats->out_discards == 0);
+ if (!tx_capa.stats.pktio.counter.out_errors)
+ CU_ASSERT(tx_stats->out_errors == 0);
+
for (i = 0; i < num_ifaces; i++) {
CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
#ifdef DEBUG_STATS
@@ -2100,6 +2161,423 @@ static void pktio_test_statistics_counters(void)
}
}
+static int pktio_check_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktio.counter.in_bcast_pkts == 0 &&
+ capa.stats.pktio.counter.out_bcast_pkts == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+ odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets_udp(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx,
+ true, ETH_BROADCAST);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT(odp_pktio_stats(pktio_tx, &stats[0]) == 0);
+ tx_stats = &stats[0];
+
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_bcast_pkts == 0) ||
+ (tx_stats->out_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
+ (tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
+ (tx_stats->out_packets >= (uint64_t)pkts));
+
+ rx_stats = &stats[0];
+ if (num_ifaces > 1) {
+ rx_stats = &stats[1];
+ CU_ASSERT(odp_pktio_stats(pktio_rx, rx_stats) == 0);
+ }
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_bcast_pkts == 0) ||
+ (rx_stats->in_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
+ (rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
+ (rx_stats->in_packets >= (uint64_t)pkts));
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ odp_pktin_queue_t pktin;
+ uint64_t wait = odp_pktin_wait_time(ODP_TIME_SEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ odp_packet_t pkt;
+
+ if (odp_pktin_recv_tmo(pktin, &pkt, 1, wait) != 1)
+ break;
+
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+
+ odp_packet_free(pkt);
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_queue_stats(pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue_stats(pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts;
+ odp_queue_t pktout;
+ odp_queue_t pktin;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_QUEUE);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_event_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ tx_pkts = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ CU_ASSERT_FATAL(send_packet_events(pktout, tx_pkt, tx_pkts) == 0);
+
+ /* Receive */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_event_queue_stats(pktio_tx, pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue_stats(pktio_rx, pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_extra_stats(void)
+{
+ odp_pktio_t pktio;
+ int num_info, num_stats, i, ret;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio) == 0);
+
+ num_info = odp_pktio_extra_stat_info(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_info >= 0);
+
+ num_stats = odp_pktio_extra_stats(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_stats >= 0);
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ /* No extra statistics supported */
+ if (num_stats == 0) {
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ return;
+ }
+
+ odp_pktio_extra_stat_info_t stats_info[num_stats];
+ uint64_t extra_stats[num_stats];
+
+ ret = odp_pktio_extra_stat_info(pktio, stats_info, num_stats);
+ CU_ASSERT(ret == num_stats);
+ num_info = ret;
+
+ ret = odp_pktio_extra_stats(pktio, extra_stats, num_stats);
+ CU_ASSERT(ret == num_stats);
+ CU_ASSERT_FATAL(ret <= num_stats);
+ num_stats = ret;
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ printf("\nPktio extra statistics\n----------------------\n");
+ for (i = 0; i < num_stats; i++)
+ printf(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
+
+ for (i = 0; i < num_stats; i++) {
+ uint64_t stat = 0;
+
+ CU_ASSERT(odp_pktio_extra_stat_counter(pktio, i, &stat) == 0);
+ }
+
+ odp_pktio_extra_stats_print(pktio);
+
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
static int pktio_check_start_stop(void)
{
if (getenv("ODP_PKTIO_TEST_DISABLE_START_STOP"))
@@ -2776,7 +3254,7 @@ static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
}
ret = create_packets_udp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
- pktio_rx, false);
+ pktio_rx, false, ETH_UNICAST);
CU_ASSERT(ret == TX_BATCH_LEN);
if (ret != TX_BATCH_LEN) {
for (i = 0; i < num_ifaces; i++) {
@@ -4132,6 +4610,13 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
pktio_check_pktin_event_sched),
ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
pktio_check_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters_bcast,
+ pktio_check_statistics_counters_bcast),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_queue_statistics_counters,
+ pktio_check_queue_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_event_queue_statistics_counters,
+ pktio_check_event_queue_statistics_counters),
+ ODP_TEST_INFO(pktio_test_extra_stats),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
pktio_check_pktin_ts),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_ts,
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 37f3b4f0b..9c84eacd3 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -68,6 +68,7 @@ typedef struct {
int buf_count;
int buf_count_cpy;
int queues_per_prio;
+ int test_debug_print;
odp_shm_t shm_glb;
odp_shm_t shm_args;
odp_pool_t pool;
@@ -860,6 +861,44 @@ static void scheduler_test_order_ignore(void)
CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
}
+static void scheduler_test_group_info_predef(void)
+{
+ odp_schedule_group_info_t info;
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int thr;
+
+ thr = odp_thread_id();
+
+ group = ODP_SCHED_GROUP_ALL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf("\n Schedule group all name: %s\n", info.name);
+
+ /* This test case runs a control thread */
+ group = ODP_SCHED_GROUP_CONTROL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group control name: %s\n", info.name);
+
+ group = ODP_SCHED_GROUP_WORKER;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(!odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group worker name: %s\n", info.name);
+}
+
static void scheduler_test_create_group(void)
{
odp_thrmask_t mask;
@@ -1787,6 +1826,9 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
fill_queues(args);
+ if (globals->test_debug_print)
+ odp_schedule_print();
+
/* Create and launch worker threads */
/* Test runs also on the main thread */
@@ -2956,6 +2998,25 @@ static void scheduler_test_flow_aware(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+/* Queues created but no events */
+static void scheduler_test_print(void)
+{
+ odp_schedule_print();
+}
+
+/* Queues with initial events enqueued */
+static void scheduler_test_mq_mt_prio_a_print(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ globals->test_debug_print = 1;
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_ONE, DISABLE_EXCL_ATOMIC);
+
+ globals->test_debug_print = 0;
+}
+
static int scheduler_test_global_init(void)
{
odp_cpumask_t mask;
@@ -3158,6 +3219,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_max_queues_a),
ODP_TEST_INFO(scheduler_test_max_queues_o),
ODP_TEST_INFO(scheduler_test_order_ignore),
+ ODP_TEST_INFO(scheduler_test_group_info_predef),
ODP_TEST_INFO(scheduler_test_create_group),
ODP_TEST_INFO(scheduler_test_create_max_groups),
ODP_TEST_INFO(scheduler_test_groups),
@@ -3178,6 +3240,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
/* Scheduler test suite which runs events through hundreds of queues. Queues are created once
* in suite init phase. */
odp_testinfo_t scheduler_multi_suite[] = {
+ ODP_TEST_INFO(scheduler_test_print),
ODP_TEST_INFO(scheduler_test_chaos),
ODP_TEST_INFO(scheduler_test_1q_1t_n),
ODP_TEST_INFO(scheduler_test_1q_1t_a),
@@ -3205,6 +3268,7 @@ odp_testinfo_t scheduler_multi_suite[] = {
ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_a),
ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_o),
ODP_TEST_INFO(scheduler_test_multi_1q_mt_a_excl),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_a_print),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 177f6f82b..0716b7999 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -554,6 +554,94 @@ static void timer_pool_max_res(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp;
+ odp_timer_pool_info_t info;
+ uint64_t ticks_per_sec;
+ double tick_hz, tick_nsec, tick_to_nsec, tick_low;
+
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
+
+ /* Highest resolution */
+ memset(&tp_param, 0, sizeof(odp_timer_pool_param_t));
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+ tp_param.num_timers = 100;
+ tp_param.priv = 0;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("tick_info_tp", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ odp_timer_pool_start();
+
+ memset(&info, 0, sizeof(odp_timer_pool_info_t));
+ CU_ASSERT_FATAL(odp_timer_pool_info(tp, &info) == 0);
+
+ /* Tick frequency in hertz. Allow 1 hz rounding error between odp_timer_ns_to_tick()
+ * and tick_info. */
+ ticks_per_sec = odp_timer_ns_to_tick(tp, ODP_TIME_SEC_IN_NS);
+ tick_hz = odp_fract_u64_to_dbl(&info.tick_info.freq);
+
+ CU_ASSERT(((double)(ticks_per_sec - 1)) <= tick_hz);
+ CU_ASSERT(((double)(ticks_per_sec + 1)) >= tick_hz);
+
+ printf("\nClock source %i\n", clk_src);
+ printf(" Ticks per second: %" PRIu64 "\n", ticks_per_sec);
+ printf(" Tick info freq: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.freq.integer,
+ info.tick_info.freq.numer,
+ info.tick_info.freq.denom);
+ printf(" Tick info freq dbl: %f\n", tick_hz);
+
+ /* One tick on nsec. For better resolution, convert 1000 ticks (and use double)
+ * instead of one tick. Allow 1 nsec rounding error between odp_timer_tick_to_ns()
+ * and tick_info. */
+ tick_to_nsec = odp_timer_tick_to_ns(tp, 1000) / 1000.0;
+ tick_nsec = odp_fract_u64_to_dbl(&info.tick_info.nsec);
+ tick_low = tick_to_nsec - 1.0;
+ if (tick_to_nsec < 1.0)
+ tick_low = 0.0;
+
+ CU_ASSERT(tick_low <= tick_nsec);
+ CU_ASSERT((tick_to_nsec + 1.0) >= tick_nsec);
+
+ printf(" Tick in nsec: %f\n", tick_to_nsec);
+ printf(" Tick info nsec: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.nsec.integer,
+ info.tick_info.nsec.numer,
+ info.tick_info.nsec.denom);
+ printf(" Tick info nsec dbl: %f\n", tick_nsec);
+
+ /* One tick in source clock cycles. Depending on clock source it may be zero.
+ * Print the values to have a reference to the fields. */
+ printf(" Tick info clk cycles: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.clk_cycle.integer,
+ info.tick_info.clk_cycle.numer,
+ info.tick_info.clk_cycle.denom);
+
+ odp_timer_pool_destroy(tp);
+}
+
+static void timer_pool_tick_info(void)
+{
+ odp_timer_clk_src_t clk_src;
+ int i;
+
+ for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
+ clk_src = ODP_CLOCK_SRC_0 + i;
+ if (global_mem->clk_supported[i]) {
+ ODPH_DBG("\nTesting clock source: %i\n", clk_src);
+ timer_pool_tick_info_run(clk_src);
+ }
+ }
+}
+
static void timer_test_event_type(odp_queue_type_t queue_type,
odp_event_type_t event_type)
{
@@ -1734,6 +1822,7 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_timeout_pool_free),
ODP_TEST_INFO(timer_pool_create_destroy),
ODP_TEST_INFO(timer_pool_max_res),
+ ODP_TEST_INFO(timer_pool_tick_info),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_plain,
check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_sched,