aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/common/odp_cunit_common.c45
-rw-r--r--test/common/odp_cunit_common.h4
-rw-r--r--test/performance/odp_ipsec.c7
-rw-r--r--test/performance/odp_sched_pktio.c6
-rw-r--r--test/performance/odp_timer_perf.c6
-rw-r--r--test/validation/api/ipsec/ipsec.c9
-rw-r--r--test/validation/api/ipsec/ipsec.h2
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c322
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c40
-rw-r--r--test/validation/api/packet/packet.c32
-rw-r--r--test/validation/api/pktio/pktio.c165
-rw-r--r--test/validation/api/scheduler/scheduler.c124
-rw-r--r--test/validation/api/time/time.c88
-rw-r--r--test/validation/api/timer/timer.c122
-rw-r--r--test/validation/api/traffic_mngr/traffic_mngr.c277
15 files changed, 897 insertions, 352 deletions
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 62418c356..f5c437344 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -259,7 +259,8 @@ int odp_cunit_print_inactive(void)
continue;
if (first) {
- printf("\n\n Inactive tests:\n");
+ printf("\n\nSuite: %s\n", sinfo->name);
+ printf(" Inactive tests:\n");
first = 0;
}
@@ -269,6 +270,34 @@ int odp_cunit_print_inactive(void)
return 0;
}
+int odp_cunit_set_inactive(void)
+{
+ CU_pSuite cur_suite;
+ CU_pTest ptest;
+ odp_suiteinfo_t *sinfo;
+ odp_testinfo_t *tinfo;
+
+ cur_suite = CU_get_current_suite();
+ if (cur_suite == NULL)
+ return -1;
+
+ sinfo = cunit_get_suite_info(cur_suite->pName);
+ if (sinfo == NULL)
+ return -1;
+
+ for (tinfo = sinfo->testinfo_tbl; tinfo->name; tinfo++) {
+ ptest = CU_get_test_by_name(tinfo->name, cur_suite);
+ if (ptest == NULL) {
+ fprintf(stderr, "%s: test not found: %s\n",
+ __func__, tinfo->name);
+ return -1;
+ }
+ CU_set_test_active(ptest, false);
+ }
+
+ return 0;
+}
+
static int default_term_func(void)
{
return odp_cunit_print_inactive();
@@ -526,3 +555,17 @@ int odp_cunit_ret(int val)
{
return allow_skip_result ? 0 : val;
}
+
+int odp_cunit_ci_skip(const char *test_name)
+{
+ const char *ci_skip;
+ const char *found;
+
+ ci_skip = getenv("CI_SKIP");
+ if (ci_skip == NULL)
+ return 0;
+
+ found = strstr(ci_skip, test_name);
+
+ return found != NULL;
+}
diff --git a/test/common/odp_cunit_common.h b/test/common/odp_cunit_common.h
index 55e52ce1c..3e06ba10c 100644
--- a/test/common/odp_cunit_common.h
+++ b/test/common/odp_cunit_common.h
@@ -105,6 +105,10 @@ void odp_cunit_register_global_term(int (*func_term_ptr)(odp_instance_t inst));
int odp_cunit_ret(int val);
int odp_cunit_print_inactive(void);
+int odp_cunit_set_inactive(void);
+
+/* Check from CI_SKIP environment variable if the test case should be skipped by CI */
+int odp_cunit_ci_skip(const char *test_name);
/*
* Wrapper for CU_ASSERT_FATAL implementation to show the compiler that
diff --git a/test/performance/odp_ipsec.c b/test/performance/odp_ipsec.c
index 2e283a63b..05a22ff0d 100644
--- a/test/performance/odp_ipsec.c
+++ b/test/performance/odp_ipsec.c
@@ -866,7 +866,12 @@ run_measure_one_config(ipsec_args_t *cargs,
odp_ipsec_status_t status;
while (1) {
- odp_event_t event = odp_queue_deq(out_queue);
+ odp_event_t event;
+
+ if (cargs->poll)
+ event = odp_queue_deq(out_queue);
+ else
+ event = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
if (event != ODP_EVENT_INVALID &&
odp_event_type(event) == ODP_EVENT_IPSEC_STATUS &&
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index c752e2a91..cbdbdf4aa 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -490,7 +490,7 @@ static int worker_thread_timers(void *arg)
ret = odp_timer_set_rel(timer, tick, NULL);
if (odp_unlikely(ret != ODP_TIMER_SUCCESS &&
- ret != ODP_TIMER_NOEVENT)) {
+ ret != ODP_TIMER_FAIL)) {
/* Tick period is too short or long. Normally,
* reset either succeeds or fails due to timer
* expiration, in which case timeout event will
@@ -1253,7 +1253,7 @@ static int create_timers(test_global_t *test_global)
if (test_global->opt.timeout_us == 0)
return 0;
- if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
printf("Timer capa failed\n");
return -1;
}
@@ -1272,7 +1272,7 @@ static int create_timers(test_global_t *test_global)
timer_param.min_tmo = timeout_ns;
timer_param.max_tmo = timeout_ns;
timer_param.num_timers = num_timer;
- timer_param.clk_src = ODP_CLOCK_CPU;
+ timer_param.clk_src = ODP_CLOCK_DEFAULT;
timer_pool = odp_timer_pool_create("sched_pktio_timer", &timer_param);
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
index 4b8318557..6989d0d4c 100644
--- a/test/performance/odp_timer_perf.c
+++ b/test/performance/odp_timer_perf.c
@@ -307,14 +307,14 @@ static int create_timer_pools(test_global_t *global)
global->timer[i][j] = ODP_TIMER_INVALID;
}
- if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa)) {
ODPH_ERR("Timer capability failed\n");
return -1;
}
memset(&timer_res_capa, 0, sizeof(odp_timer_res_capability_t));
timer_res_capa.res_ns = res_ns;
- if (odp_timer_res_capability(ODP_CLOCK_CPU, &timer_res_capa)) {
+ if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &timer_res_capa)) {
ODPH_ERR("Timer resolution capability failed\n");
return -1;
}
@@ -352,7 +352,7 @@ static int create_timer_pools(test_global_t *global)
timer_pool_param.max_tmo = max_tmo_ns;
timer_pool_param.num_timers = num_timer;
timer_pool_param.priv = priv;
- timer_pool_param.clk_src = ODP_CLOCK_CPU;
+ timer_pool_param.clk_src = ODP_CLOCK_DEFAULT;
odp_pool_param_init(&pool_param);
pool_param.type = ODP_POOL_TIMEOUT;
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index a76bac973..981bc9155 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -217,20 +217,17 @@ int ipsec_check_esp_aes_gcm_256(void)
int ipsec_check_ah_aes_gmac_128(void)
{
- return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
- ODP_AUTH_ALG_AES_GMAC, 128);
+ return ipsec_check_ah(ODP_AUTH_ALG_AES_GMAC, 128);
}
int ipsec_check_ah_aes_gmac_192(void)
{
- return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
- ODP_AUTH_ALG_AES_GMAC, 192);
+ return ipsec_check_ah(ODP_AUTH_ALG_AES_GMAC, 192);
}
int ipsec_check_ah_aes_gmac_256(void)
{
- return ipsec_check_esp(ODP_CIPHER_ALG_NULL, 0,
- ODP_AUTH_ALG_AES_GMAC, 256);
+ return ipsec_check_ah(ODP_AUTH_ALG_AES_GMAC, 256);
}
int ipsec_check_esp_null_aes_gmac_128(void)
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index 161835a33..b899fce48 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -16,6 +16,8 @@
((c) << 8) | \
((d) << 0))
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
/* test arrays: */
extern odp_testinfo_t ipsec_in_suite[];
extern odp_testinfo_t ipsec_out_suite[];
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
index 4eafed6a9..08512c8fb 100644
--- a/test/validation/api/ipsec/ipsec_test_in.c
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -1,10 +1,13 @@
/* Copyright (c) 2017-2018, Linaro Limited
* Copyright (c) 2020-2021, Marvell
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/helper/odph_api.h>
+
#include "ipsec.h"
#include "reass_test_vectors.h"
@@ -1764,132 +1767,109 @@ static void test_ipsec_sa_print(void)
ipsec_sa_destroy(in_sa);
}
-static void test_in_ipv4_esp_reass_success_two_frags(odp_ipsec_sa_t out_sa,
- odp_ipsec_sa_t in_sa)
+static void test_multi_out_in(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa,
+ uint8_t tunnel_ip_ver,
+ int num_input_packets,
+ ipsec_test_packet *input_packets[],
+ ipsec_test_packet *result_packet)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
+ uint8_t ver_ihl = result_packet->data[result_packet->l3_offset];
+ odp_bool_t is_result_ipv6 = (ODPH_IPV4HDR_VER(ver_ihl) == ODPH_IPV6);
int i;
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 2);
-
- part_prep_esp(test_out, 2, false);
-
- test_out[0].pkt_in = &pkt_ipv4_udp_p1_f1;
- test_out[1].pkt_in = &pkt_ipv4_udp_p1_f2;
-
- part_prep_plain(&test_in[1], 1, false, true);
- test_in[1].out[0].pkt_res = &pkt_ipv4_udp_p1;
-
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < num_input_packets; i++) {
+ ipsec_test_part test_out;
+ ipsec_test_part test_in;
ipsec_test_packet test_pkt;
odp_packet_t pkt;
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
+ /*
+ * Convert plain text packet to IPsec packet through
+ * outbound IPsec processing.
+ */
+ part_prep_esp(&test_out, 1, tunnel_ip_ver == ODPH_IPV6);
+ test_out.pkt_in = input_packets[i];
+ CU_ASSERT_EQUAL(ipsec_check_out(&test_out, out_sa, &pkt), 1);
+
+ /*
+ * Perform inbound IPsec processing for the IPsec packet.
+ * Expect result packet only for the last packet.
+ */
+ memset(&test_in, 0, sizeof(test_in));
+ if (i == num_input_packets - 1) {
+ part_prep_plain(&test_in, 1, is_result_ipv6, true);
+ test_in.out[0].pkt_res = result_packet;
+ }
ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ test_in.pkt_in = &test_pkt;
- ipsec_check_in_one(&test_in[i], in_sa);
+ ipsec_check_in_one(&test_in, in_sa);
}
}
-static void test_in_ipv4_esp_reass_success_four_frags(odp_ipsec_sa_t out_sa,
- odp_ipsec_sa_t in_sa)
+static void test_in_ipv4_esp_reass_success_two_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 4);
-
- part_prep_esp(test_out, 4, false);
-
- test_out[0].pkt_in = &pkt_ipv4_udp_p2_f1;
- test_out[1].pkt_in = &pkt_ipv4_udp_p2_f2;
- test_out[2].pkt_in = &pkt_ipv4_udp_p2_f3;
- test_out[3].pkt_in = &pkt_ipv4_udp_p2_f4;
-
- part_prep_plain(&test_in[3], 1, false, true);
- test_in[3].out[0].pkt_res = &pkt_ipv4_udp_p2;
-
- for (i = 0; i < 4; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f1,
+ &pkt_ipv4_udp_p1_f2,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1;
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
+}
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+static void test_in_ipv4_esp_reass_success_four_frags(odp_ipsec_sa_t out_sa,
+ odp_ipsec_sa_t in_sa)
+{
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p2_f1,
+ &pkt_ipv4_udp_p2_f2,
+ &pkt_ipv4_udp_p2_f3,
+ &pkt_ipv4_udp_p2_f4,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p2;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv4_esp_reass_success_two_frags_ooo(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 2);
-
- part_prep_esp(test_out, 2, false);
-
- test_out[0].pkt_in = &pkt_ipv4_udp_p1_f2;
- test_out[1].pkt_in = &pkt_ipv4_udp_p1_f1;
-
- part_prep_plain(&test_in[1], 1, false, true);
- test_in[1].out[0].pkt_res = &pkt_ipv4_udp_p1;
-
- for (i = 0; i < 2; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p1_f2,
+ &pkt_ipv4_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p1;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv4_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 4);
-
- part_prep_esp(test_out, 4, false);
-
- test_out[0].pkt_in = &pkt_ipv4_udp_p2_f4;
- test_out[1].pkt_in = &pkt_ipv4_udp_p2_f1;
- test_out[2].pkt_in = &pkt_ipv4_udp_p2_f2;
- test_out[3].pkt_in = &pkt_ipv4_udp_p2_f3;
-
- part_prep_plain(&test_in[3], 1, false, true);
- test_in[3].out[0].pkt_res = &pkt_ipv4_udp_p2;
-
- for (i = 0; i < 4; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv4_udp_p2_f4,
+ &pkt_ipv4_udp_p2_f1,
+ &pkt_ipv4_udp_p2_f2,
+ &pkt_ipv4_udp_p2_f3,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv4_udp_p2;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV4,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv4_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
@@ -2024,129 +2004,65 @@ static void test_in_ipv4_esp_reass_incomp(void)
static void test_in_ipv6_esp_reass_success_two_frags(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 2);
-
- part_prep_esp(test_out, 2, true);
-
- test_out[0].pkt_in = &pkt_ipv6_udp_p1_f1;
- test_out[1].pkt_in = &pkt_ipv6_udp_p1_f2;
-
- part_prep_plain(&test_in[1], 1, true, true);
- test_in[1].out[0].pkt_res = &pkt_ipv6_udp_p1;
-
- for (i = 0; i < 2; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f1,
+ &pkt_ipv6_udp_p1_f2,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv6_esp_reass_success_four_frags(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 4);
-
- part_prep_esp(test_out, 4, true);
-
- test_out[0].pkt_in = &pkt_ipv6_udp_p2_f1;
- test_out[1].pkt_in = &pkt_ipv6_udp_p2_f2;
- test_out[2].pkt_in = &pkt_ipv6_udp_p2_f3;
- test_out[3].pkt_in = &pkt_ipv6_udp_p2_f4;
-
- part_prep_plain(&test_in[3], 1, true, true);
- test_in[3].out[0].pkt_res = &pkt_ipv6_udp_p2;
-
- for (i = 0; i < 4; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p2_f1,
+ &pkt_ipv6_udp_p2_f2,
+ &pkt_ipv6_udp_p2_f3,
+ &pkt_ipv6_udp_p2_f4,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p2;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv6_esp_reass_success_two_frags_ooo(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 2);
-
- part_prep_esp(test_out, 2, true);
-
- test_out[0].pkt_in = &pkt_ipv6_udp_p1_f2;
- test_out[1].pkt_in = &pkt_ipv6_udp_p1_f1;
-
- part_prep_plain(&test_in[1], 1, true, true);
- test_in[1].out[0].pkt_res = &pkt_ipv6_udp_p1;
-
- for (i = 0; i < 2; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p1_f2,
+ &pkt_ipv6_udp_p1_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p1;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv6_esp_reass_success_four_frags_ooo(odp_ipsec_sa_t out_sa,
odp_ipsec_sa_t in_sa)
{
- ipsec_test_part test_out[MAX_FRAGS], test_in[MAX_FRAGS];
- int i;
-
- memset(test_in, 0, sizeof(test_in));
-
- CU_ASSERT(MAX_FRAGS >= 4);
-
- part_prep_esp(test_out, 4, true);
-
- test_out[1].pkt_in = &pkt_ipv6_udp_p2_f2;
- test_out[2].pkt_in = &pkt_ipv6_udp_p2_f3;
- test_out[3].pkt_in = &pkt_ipv6_udp_p2_f4;
- test_out[0].pkt_in = &pkt_ipv6_udp_p2_f1;
-
- part_prep_plain(&test_in[3], 1, true, true);
- test_in[3].out[0].pkt_res = &pkt_ipv6_udp_p2;
-
- for (i = 0; i < 4; i++) {
- ipsec_test_packet test_pkt;
- odp_packet_t pkt;
-
- CU_ASSERT_EQUAL(ipsec_check_out(&test_out[i], out_sa, &pkt), 1);
-
- ipsec_test_packet_from_pkt(&test_pkt, &pkt);
- test_in[i].pkt_in = &test_pkt;
+ ipsec_test_packet *input_packets[] = {
+ &pkt_ipv6_udp_p2_f2,
+ &pkt_ipv6_udp_p2_f3,
+ &pkt_ipv6_udp_p2_f4,
+ &pkt_ipv6_udp_p2_f1,
+ };
+ ipsec_test_packet *result_packet = &pkt_ipv6_udp_p2;
- ipsec_check_in_one(&test_in[i], in_sa);
- }
+ test_multi_out_in(out_sa, in_sa, ODPH_IPV6,
+ ARRAY_SIZE(input_packets),
+ input_packets,
+ result_packet);
}
static void test_in_ipv6_esp_reass_incomp_missing(odp_ipsec_sa_t out_sa,
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index b48dd0d6c..7c1121579 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -119,8 +119,6 @@ static struct cipher_auth_comb_param cipher_auth_comb[] = {
},
};
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-
static void test_out_ipv4_ah_sha256(void)
{
odp_ipsec_sa_param_t param;
@@ -361,17 +359,19 @@ static void test_ipsec_stats_zero_assert(odp_ipsec_stats_t *stats)
CU_ASSERT_EQUAL(stats->mtu_err, 0);
CU_ASSERT_EQUAL(stats->hard_exp_bytes_err, 0);
CU_ASSERT_EQUAL(stats->hard_exp_pkts_err, 0);
+ CU_ASSERT_EQUAL(stats->success_bytes, 0);
}
static void test_ipsec_stats_test_assert(odp_ipsec_stats_t *stats,
- enum ipsec_test_stats test)
+ enum ipsec_test_stats test,
+ uint64_t succ_bytes)
{
if (test == IPSEC_TEST_STATS_SUCCESS) {
- /* Braces needed by CU macro */
CU_ASSERT_EQUAL(stats->success, 1);
+ CU_ASSERT(stats->success_bytes >= succ_bytes);
} else {
- /* Braces needed by CU macro */
CU_ASSERT_EQUAL(stats->success, 0);
+ CU_ASSERT_EQUAL(stats->success_bytes, 0);
}
if (test == IPSEC_TEST_STATS_PROTO_ERR) {
@@ -608,20 +608,36 @@ static void test_out_in_common(const ipsec_test_flags *flags,
ipsec_check_out_in_one(&test_out, &test_in, sa_out, sa_in, flags);
- if (flags->stats == IPSEC_TEST_STATS_SUCCESS) {
- CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
- test_ipsec_stats_test_assert(&stats, flags->stats);
- }
-
if (flags->stats != IPSEC_TEST_STATS_NONE) {
+ uint64_t succ_bytes = 0;
+
+ /* Minimum bytes to be counted for stats.success_bytes */
+ if (!flags->ah) {
+ succ_bytes = test_out.pkt_in[0].len -
+ test_out.pkt_in[0].l4_offset;
+
+ if (flags->tunnel)
+ succ_bytes += test_out.pkt_in[0].l4_offset -
+ test_out.pkt_in[0].l3_offset;
+ } else {
+ succ_bytes = test_out.pkt_in[0].len -
+ test_out.pkt_in[0].l3_offset;
+
+ if (flags->tunnel)
+ succ_bytes += (flags->tunnel_is_v6 ?
+ ODPH_IPV6HDR_LEN :
+ ODPH_IPV4HDR_LEN);
+ }
+
/* All stats tests have outbound operation success and inbound
* varying.
*/
CU_ASSERT_EQUAL(odp_ipsec_stats(sa_out, &stats), 0);
- test_ipsec_stats_test_assert(&stats, IPSEC_TEST_STATS_SUCCESS);
+ test_ipsec_stats_test_assert(&stats, IPSEC_TEST_STATS_SUCCESS,
+ succ_bytes);
CU_ASSERT_EQUAL(odp_ipsec_stats(sa_in, &stats), 0);
- test_ipsec_stats_test_assert(&stats, flags->stats);
+ test_ipsec_stats_test_assert(&stats, flags->stats, succ_bytes);
}
ipsec_sa_destroy(sa_out);
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index cd3009ce3..e25260b1d 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -929,13 +929,7 @@ static void _verify_headroom_shift(odp_packet_t *pkt,
CU_ASSERT_PTR_NOT_NULL(data);
if (extended) {
CU_ASSERT(rc >= 0);
- if (shift >= 0) {
- CU_ASSERT(odp_packet_seg_len(*pkt) == shift - room);
- } else {
- CU_ASSERT(odp_packet_headroom(*pkt) >=
- (uint32_t)abs(shift) - seg_data_len);
- }
- CU_ASSERT(odp_packet_head(*pkt) != head_orig);
+ CU_ASSERT(odp_packet_seg_len(*pkt) == seg_len);
} else {
CU_ASSERT(odp_packet_headroom(*pkt) == room - shift);
CU_ASSERT(odp_packet_seg_len(*pkt) == seg_data_len + shift);
@@ -1032,15 +1026,13 @@ static void _verify_tailroom_shift(odp_packet_t *pkt,
CU_ASSERT_PTR_NOT_NULL(tail);
if (extended) {
CU_ASSERT(rc >= 0);
- CU_ASSERT(odp_packet_last_seg(*pkt) != seg);
- seg = odp_packet_last_seg(*pkt);
- if (shift > 0) {
- CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
- shift - room);
+
+ if (shift >= 0) {
+ if (rc == 0)
+ CU_ASSERT(tail == tail_orig);
} else {
- CU_ASSERT(odp_packet_tailroom(*pkt) >=
- (uint32_t)abs(shift) - seg_data_len);
- CU_ASSERT(seg_len == odp_packet_tailroom(*pkt));
+ CU_ASSERT(odp_packet_tail(*pkt) == tail);
+ CU_ASSERT(odp_packet_tailroom(*pkt) == seg_len);
}
} else {
CU_ASSERT(odp_packet_seg_data_len(*pkt, seg) ==
@@ -1049,19 +1041,15 @@ static void _verify_tailroom_shift(odp_packet_t *pkt,
if (room == 0 || (room - shift) == 0)
return;
if (shift >= 0) {
- CU_ASSERT(odp_packet_tail(*pkt) ==
- tail_orig + shift);
+ CU_ASSERT(odp_packet_tail(*pkt) == tail_orig + shift);
+ CU_ASSERT(tail == tail_orig);
} else {
+ CU_ASSERT(odp_packet_tail(*pkt) == tail);
CU_ASSERT(tail == tail_orig + shift);
}
}
CU_ASSERT(odp_packet_len(*pkt) == pkt_data_len + shift);
- if (shift >= 0) {
- CU_ASSERT(tail == tail_orig);
- } else {
- CU_ASSERT(odp_packet_tail(*pkt) == tail);
- }
}
static void packet_test_tailroom(void)
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 7549e16ef..9a47dbe8c 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -3810,6 +3810,169 @@ static void pktio_test_pktout_aging_tmo(void)
}
}
+static void pktio_test_pktin_event_queue(odp_pktin_mode_t pktin_mode)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue;
+ odp_queue_t queue, from;
+ odp_pool_t buf_pool;
+ odp_pool_param_t pool_param;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t pkt;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret, i;
+ odp_time_t t1, t2;
+ int inactive = 0;
+ int num_pkt = 0;
+ int num_buf = 0;
+ int num_bad = 0;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {0};
+ uint64_t wait_sec = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.buf.num = 2 * TX_BATCH_LEN;
+ pool_param.buf.size = 100;
+
+ buf_pool = odp_pool_create("buffer pool", &pool_param);
+ CU_ASSERT_FATAL(buf_pool != ODP_POOL_INVALID);
+
+ buf = odp_buffer_alloc(buf_pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ in_queue_param.num_queues = 1;
+ in_queue_param.hash_enable = 0;
+ in_queue_param.classifier_enable = 0;
+
+ if (pktin_mode == ODP_PKTIN_MODE_SCHED) {
+ in_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ in_queue_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ in_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ in_queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ }
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = 1;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, pktin_mode, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio_rx, &queue, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout_queue, 1) == 1);
+
+ /* Allocate and initialize test packets */
+ ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx, pktio_rx);
+ if (ret != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ ret = odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Send buffer event */
+ ret = odp_queue_enq(queue, odp_buffer_to_event(buf));
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* Receive events */
+ while (1) {
+ /* Break after 1 sec of inactivity */
+ if (pktin_mode == ODP_PKTIN_MODE_SCHED) {
+ ev = odp_schedule(&from, wait_sec);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ CU_ASSERT(from == queue);
+ } else {
+ ev = odp_queue_deq(queue);
+
+ if (ev == ODP_EVENT_INVALID) {
+ if (inactive == 0) {
+ inactive = 1;
+ t1 = odp_time_local();
+ continue;
+ } else {
+ t2 = odp_time_local();
+ if (odp_time_diff_ns(t2, t1) > ODP_TIME_SEC_IN_NS)
+ break;
+
+ continue;
+ }
+ }
+
+ inactive = 0;
+ }
+
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ num_pkt++;
+
+ } else if (odp_event_type(ev) == ODP_EVENT_BUFFER) {
+ num_buf++;
+ } else {
+ CU_FAIL("Bad event type");
+ num_bad++;
+ }
+
+ odp_event_free(ev);
+ }
+
+ CU_ASSERT(num_pkt == TX_BATCH_LEN);
+ CU_ASSERT(num_buf == 1);
+ CU_ASSERT(num_bad == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(buf_pool) == 0);
+}
+
+static void pktio_test_pktin_event_sched(void)
+{
+ pktio_test_pktin_event_queue(ODP_PKTIN_MODE_SCHED);
+}
+
+static int pktio_check_pktin_event_sched(void)
+{
+ if (odp_cunit_ci_skip("pktio_test_pktin_event_sched"))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
static int pktio_suite_init(void)
{
int i;
@@ -3965,6 +4128,8 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_plain_multi_event),
ODP_TEST_INFO(pktio_test_sched_multi_event),
ODP_TEST_INFO(pktio_test_recv_multi_event),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_event_sched,
+ pktio_check_pktin_event_sched),
ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
pktio_check_statistics_counters),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 8800350f2..37f3b4f0b 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -59,6 +59,7 @@
#define WAIT_1MS_RETRIES 1000
#define SCHED_AND_PLAIN_ROUNDS 10000
+#define ATOMICITY_ROUNDS 100
/* Test global variables */
typedef struct {
@@ -77,6 +78,10 @@ typedef struct {
odp_spinlock_t atomic_lock;
struct {
odp_queue_t handle;
+ odp_atomic_u32_t state;
+ } atomicity_q;
+ struct {
+ odp_queue_t handle;
char name[ODP_QUEUE_NAME_LEN];
} chaos_q[CHAOS_NUM_QUEUES];
struct {
@@ -145,10 +150,13 @@ static void scheduler_test_init(void)
odp_schedule_config_init(&default_config);
+ CU_ASSERT(default_config.max_flow_id == 0);
+
CU_ASSERT(default_config.sched_group.all);
CU_ASSERT(default_config.sched_group.control);
CU_ASSERT(default_config.sched_group.worker);
}
+
static void scheduler_test_capa(void)
{
odp_schedule_capability_t sched_capa;
@@ -2481,6 +2489,121 @@ static void scheduler_test_ordered_and_plain(void)
scheduler_test_sched_and_plain(ODP_SCHED_SYNC_ORDERED);
}
+static int atomicity_test_run(void *arg)
+{
+ thread_args_t *args = (thread_args_t *)arg;
+ odp_event_t ev;
+ odp_queue_t atomic_queue = args->globals->atomicity_q.handle;
+ odp_queue_t from;
+ odp_atomic_u32_t *state;
+ uint32_t old;
+ uint32_t num_processed = 0;
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ while (num_processed < ATOMICITY_ROUNDS) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == atomic_queue);
+ if (from != atomic_queue) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ state = odp_queue_context(from);
+ CU_ASSERT_FATAL(state != NULL);
+
+ old = 0;
+ CU_ASSERT_FATAL(odp_atomic_cas_acq_rel_u32(state, &old, 1));
+
+ /* Hold atomic context a while to better reveal possible atomicity bugs */
+ odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+
+ old = 1;
+ CU_ASSERT_FATAL(odp_atomic_cas_acq_rel_u32(state, &old, 0));
+
+ CU_ASSERT_FATAL(odp_queue_enq(from, ev) == 0);
+
+ num_processed++;
+ }
+
+ /* Release atomic context and get rid of possible prescheduled events */
+ odp_schedule_pause();
+ while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT)) != ODP_EVENT_INVALID)
+ CU_ASSERT_FATAL(odp_queue_enq(atomic_queue, ev) == 0);
+
+ if (args->num_workers > 1)
+ odp_barrier_wait(&globals->barrier);
+
+ odp_schedule_resume();
+ drain_queues();
+
+ return 0;
+}
+
+static void scheduler_test_atomicity(void)
+{
+ odp_shm_t shm;
+ test_globals_t *globals;
+ thread_args_t *args;
+ odp_pool_t pool;
+ odp_queue_t queue;
+ odp_queue_param_t queue_param;
+ int i;
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ globals = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(globals != NULL);
+
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ args = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(args != NULL);
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.size = globals->max_sched_queue_size;
+ queue_param.context = &globals->atomicity_q.state;
+ queue_param.context_len = sizeof(globals->atomicity_q.state);
+
+ queue = odp_queue_create("atomicity_test", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (i = 0; i < BUFS_PER_QUEUE; i++) {
+ odp_buffer_t buf = odp_buffer_alloc(pool);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ CU_ASSERT_FATAL(odp_queue_enq(queue, odp_buffer_to_event(buf)) == 0);
+ }
+ globals->atomicity_q.handle = queue;
+ odp_atomic_init_u32(&globals->atomicity_q.state, 0);
+
+ /* Create and launch worker threads */
+ /* Test runs also on the main thread */
+ args->num_workers = globals->num_workers;
+ args->cu_thr.numthrds = globals->num_workers - 1;
+ if (args->cu_thr.numthrds > 0)
+ odp_cunit_thread_create(atomicity_test_run, &args->cu_thr);
+
+ atomicity_test_run(args);
+
+ /* Wait for worker threads to terminate */
+ if (args->cu_thr.numthrds > 0)
+ odp_cunit_thread_exit(&args->cu_thr);
+
+ odp_queue_destroy(globals->atomicity_q.handle);
+}
+
static int create_queues(test_globals_t *globals)
{
int i, j, prios, rc;
@@ -3048,6 +3171,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_ordered),
ODP_TEST_INFO(scheduler_test_atomic_and_plain),
ODP_TEST_INFO(scheduler_test_ordered_and_plain),
+ ODP_TEST_INFO(scheduler_test_atomicity),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
index 2b1efa8af..4974dcb5d 100644
--- a/test/validation/api/time/time.c
+++ b/test/validation/api/time/time.c
@@ -223,6 +223,16 @@ static void time_test_global_cmp(void)
time_test_cmp(odp_time_global, odp_time_global_from_ns);
}
+static void time_test_local_strict_cmp(void)
+{
+ time_test_cmp(odp_time_local_strict, odp_time_local_from_ns);
+}
+
+static void time_test_global_strict_cmp(void)
+{
+ time_test_cmp(odp_time_global_strict, odp_time_global_from_ns);
+}
+
/* check that a time difference gives a reasonable result */
static void time_test_diff(time_cb time_cur,
time_from_ns_cb time_from_ns,
@@ -318,6 +328,16 @@ static void time_test_global_diff(void)
time_test_diff(odp_time_global, odp_time_global_from_ns, global_res);
}
+static void time_test_local_strict_diff(void)
+{
+ time_test_diff(odp_time_local_strict, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_strict_diff(void)
+{
+ time_test_diff(odp_time_global_strict, odp_time_global_from_ns, global_res);
+}
+
/* check that a time sum gives a reasonable result */
static void time_test_sum(time_cb time_cur,
time_from_ns_cb time_from_ns,
@@ -370,6 +390,16 @@ static void time_test_global_sum(void)
time_test_sum(odp_time_global, odp_time_global_from_ns, global_res);
}
+static void time_test_local_strict_sum(void)
+{
+ time_test_sum(odp_time_local_strict, odp_time_local_from_ns, local_res);
+}
+
+static void time_test_global_strict_sum(void)
+{
+ time_test_sum(odp_time_global_strict, odp_time_global_from_ns, global_res);
+}
+
static void time_test_wait_until(time_cb time_cur, time_from_ns_cb time_from_ns)
{
int i;
@@ -488,16 +518,6 @@ static void time_test_accuracy(time_cb time_cur, time_from_ns_cb time_from_ns)
CU_ASSERT(sec_t > sec_c * 0.95);
}
-static void time_test_local_accuracy(void)
-{
- time_test_accuracy(odp_time_local, odp_time_local_from_ns);
-}
-
-static void time_test_global_accuracy(void)
-{
- time_test_accuracy(odp_time_global, odp_time_global_from_ns);
-}
-
static void time_test_accuracy_nsec(time_nsec_cb time_nsec)
{
uint64_t t1, t2, diff;
@@ -533,6 +553,26 @@ static void time_test_accuracy_nsec(time_nsec_cb time_nsec)
CU_ASSERT(sec_t > sec_c * 0.95);
}
+static void time_test_local_accuracy(void)
+{
+ time_test_accuracy(odp_time_local, odp_time_local_from_ns);
+}
+
+static void time_test_global_accuracy(void)
+{
+ time_test_accuracy(odp_time_global, odp_time_global_from_ns);
+}
+
+static void time_test_local_strict_accuracy(void)
+{
+ time_test_accuracy(odp_time_local_strict, odp_time_local_from_ns);
+}
+
+static void time_test_global_strict_accuracy(void)
+{
+ time_test_accuracy(odp_time_global_strict, odp_time_global_from_ns);
+}
+
static void time_test_local_accuracy_nsec(void)
{
time_test_accuracy_nsec(odp_time_local_ns);
@@ -543,26 +583,46 @@ static void time_test_global_accuracy_nsec(void)
time_test_accuracy_nsec(odp_time_global_ns);
}
+static void time_test_local_strict_accuracy_nsec(void)
+{
+ time_test_accuracy_nsec(odp_time_local_strict_ns);
+}
+
+static void time_test_global_strict_accuracy_nsec(void)
+{
+ time_test_accuracy_nsec(odp_time_global_strict_ns);
+}
+
odp_testinfo_t time_suite_time[] = {
ODP_TEST_INFO(time_test_constants),
ODP_TEST_INFO(time_test_local_res),
ODP_TEST_INFO(time_test_local_conversion),
- ODP_TEST_INFO(time_test_monotony),
ODP_TEST_INFO(time_test_local_cmp),
ODP_TEST_INFO(time_test_local_diff),
ODP_TEST_INFO(time_test_local_sum),
- ODP_TEST_INFO(time_test_local_wait_until),
- ODP_TEST_INFO(time_test_wait_ns),
- ODP_TEST_INFO(time_test_local_accuracy),
ODP_TEST_INFO(time_test_global_res),
ODP_TEST_INFO(time_test_global_conversion),
ODP_TEST_INFO(time_test_global_cmp),
ODP_TEST_INFO(time_test_global_diff),
ODP_TEST_INFO(time_test_global_sum),
+ ODP_TEST_INFO(time_test_wait_ns),
+ ODP_TEST_INFO(time_test_monotony),
+ ODP_TEST_INFO(time_test_local_wait_until),
ODP_TEST_INFO(time_test_global_wait_until),
+ ODP_TEST_INFO(time_test_local_accuracy),
ODP_TEST_INFO(time_test_global_accuracy),
ODP_TEST_INFO(time_test_local_accuracy_nsec),
ODP_TEST_INFO(time_test_global_accuracy_nsec),
+ ODP_TEST_INFO(time_test_local_strict_diff),
+ ODP_TEST_INFO(time_test_local_strict_sum),
+ ODP_TEST_INFO(time_test_local_strict_cmp),
+ ODP_TEST_INFO(time_test_global_strict_diff),
+ ODP_TEST_INFO(time_test_global_strict_sum),
+ ODP_TEST_INFO(time_test_global_strict_cmp),
+ ODP_TEST_INFO(time_test_local_strict_accuracy),
+ ODP_TEST_INFO(time_test_global_strict_accuracy),
+ ODP_TEST_INFO(time_test_local_strict_accuracy_nsec),
+ ODP_TEST_INFO(time_test_global_strict_accuracy_nsec),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index f2cc93cb8..177f6f82b 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -47,6 +47,9 @@ struct thread_args {
};
typedef struct {
+ /* Clock source support flags */
+ uint8_t clk_supported[ODP_CLOCK_NUM_SRC];
+
/* Default resolution / timeout parameters */
struct {
uint64_t res_ns;
@@ -88,6 +91,7 @@ static int timer_global_init(odp_instance_t *inst)
odp_timer_res_capability_t res_capa;
uint64_t res_ns, min_tmo, max_tmo;
unsigned int range;
+ int i;
if (odph_options(&helper_options)) {
fprintf(stderr, "error: odph_options() failed.\n");
@@ -121,7 +125,7 @@ static int timer_global_init(odp_instance_t *inst)
odp_schedule_config(NULL);
memset(&capa, 0, sizeof(capa));
- if (odp_timer_capability(ODP_CLOCK_CPU, &capa)) {
+ if (odp_timer_capability(ODP_CLOCK_DEFAULT, &capa)) {
fprintf(stderr, "Timer capability failed\n");
return -1;
}
@@ -134,7 +138,7 @@ static int timer_global_init(odp_instance_t *inst)
memset(&res_capa, 0, sizeof(res_capa));
res_capa.res_ns = res_ns;
- if (odp_timer_res_capability(ODP_CLOCK_CPU, &res_capa)) {
+ if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &res_capa)) {
fprintf(stderr, "Timer resolution capability failed\n");
return -1;
}
@@ -156,12 +160,19 @@ static int timer_global_init(odp_instance_t *inst)
}
/* Default parameters for test cases */
+ global_mem->clk_supported[0] = 1;
global_mem->param.res_ns = res_ns;
global_mem->param.min_tmo = min_tmo;
global_mem->param.max_tmo = max_tmo;
global_mem->param.queue_type_plain = capa.queue_type_plain;
global_mem->param.queue_type_sched = capa.queue_type_sched;
+ /* Check which other source clocks are supported */
+ for (i = 1; i < ODP_CLOCK_NUM_SRC; i++) {
+ if (odp_timer_capability(ODP_CLOCK_SRC_0 + i, &capa) == 0)
+ global_mem->clk_supported[i] = 1;
+ }
+
return 0;
}
@@ -206,14 +217,14 @@ check_plain_queue_support(void)
return ODP_TEST_INACTIVE;
}
-static void timer_test_capa(void)
+static void timer_test_capa_run(odp_timer_clk_src_t clk_src)
{
odp_timer_capability_t capa;
odp_timer_res_capability_t res_capa;
int ret;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT(capa.highest_res_ns == capa.max_res.res_ns);
@@ -234,7 +245,7 @@ static void timer_test_capa(void)
memset(&res_capa, 0, sizeof(res_capa));
res_capa.res_ns = capa.max_res.res_ns;
- ret = odp_timer_res_capability(ODP_CLOCK_CPU, &res_capa);
+ ret = odp_timer_res_capability(clk_src, &res_capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT(res_capa.res_ns == capa.max_res.res_ns);
CU_ASSERT(res_capa.min_tmo == capa.max_res.min_tmo);
@@ -244,7 +255,7 @@ static void timer_test_capa(void)
memset(&res_capa, 0, sizeof(res_capa));
res_capa.res_hz = capa.max_res.res_hz;
- ret = odp_timer_res_capability(ODP_CLOCK_CPU, &res_capa);
+ ret = odp_timer_res_capability(clk_src, &res_capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT(res_capa.res_hz == capa.max_res.res_hz);
CU_ASSERT(res_capa.min_tmo == capa.max_res.min_tmo);
@@ -254,7 +265,7 @@ static void timer_test_capa(void)
memset(&res_capa, 0, sizeof(res_capa));
res_capa.max_tmo = capa.max_tmo.max_tmo;
- ret = odp_timer_res_capability(ODP_CLOCK_CPU, &res_capa);
+ ret = odp_timer_res_capability(clk_src, &res_capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT(res_capa.max_tmo == capa.max_tmo.max_tmo);
CU_ASSERT(res_capa.min_tmo == capa.max_tmo.min_tmo);
@@ -262,6 +273,31 @@ static void timer_test_capa(void)
CU_ASSERT(res_capa.res_hz == capa.max_tmo.res_hz);
}
+static void timer_test_capa(void)
+{
+ odp_timer_clk_src_t clk_src;
+ int i;
+
+ /* Check that all API clock source enumeration values exist */
+ CU_ASSERT_FATAL(ODP_CLOCK_DEFAULT == ODP_CLOCK_SRC_0);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 1 == ODP_CLOCK_SRC_1);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 2 == ODP_CLOCK_SRC_2);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 3 == ODP_CLOCK_SRC_3);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 4 == ODP_CLOCK_SRC_4);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 5 == ODP_CLOCK_SRC_5);
+ CU_ASSERT_FATAL(ODP_CLOCK_SRC_5 + 1 == ODP_CLOCK_NUM_SRC);
+ CU_ASSERT_FATAL(ODP_CLOCK_CPU == ODP_CLOCK_DEFAULT);
+ CU_ASSERT_FATAL(ODP_CLOCK_EXT == ODP_CLOCK_SRC_1);
+
+ for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
+ clk_src = ODP_CLOCK_SRC_0 + i;
+ if (global_mem->clk_supported[i]) {
+ ODPH_DBG("\nTesting clock source: %i\n", clk_src);
+ timer_test_capa_run(clk_src);
+ }
+ }
+}
+
static void timer_test_timeout_pool_alloc(void)
{
odp_pool_t pool;
@@ -359,7 +395,7 @@ static void timer_pool_create_destroy(void)
int ret;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_queue_param_init(&queue_param);
@@ -378,7 +414,7 @@ static void timer_pool_create_destroy(void)
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = 100;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_CPU;
+ tparam.clk_src = ODP_CLOCK_DEFAULT;
tp[0] = odp_timer_pool_create("timer_pool_a", &tparam);
CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
@@ -444,7 +480,7 @@ static void timer_pool_max_res(void)
int ret, i;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_pool_param_init(&pool_param);
@@ -481,7 +517,7 @@ static void timer_pool_max_res(void)
tp_param.max_tmo = capa.max_res.max_tmo;
tp_param.num_timers = 100;
tp_param.priv = 0;
- tp_param.clk_src = ODP_CLOCK_CPU;
+ tp_param.clk_src = ODP_CLOCK_DEFAULT;
tp = odp_timer_pool_create("high_res_tp", &tp_param);
CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
@@ -545,7 +581,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
period_ns = 2 * global_mem->param.min_tmo;
timer_param.max_tmo = global_mem->param.max_tmo;
timer_param.num_timers = num;
- timer_param.clk_src = ODP_CLOCK_CPU;
+ timer_param.clk_src = ODP_CLOCK_DEFAULT;
timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
if (timer_pool == ODP_TIMER_POOL_INVALID)
@@ -614,12 +650,12 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
ret = odp_timer_set_rel(timer[i], (i + 1) * period_tick, &ev);
- if (ret == ODP_TIMER_TOOEARLY)
- ODPH_DBG("Too early %i\n", i);
- else if (ret == ODP_TIMER_TOOLATE)
- ODPH_DBG("Too late %i\n", i);
- else if (ret == ODP_TIMER_NOEVENT)
- ODPH_DBG("No event %i\n", i);
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %i.\n", i);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too far %i.\n", i);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %i\n", i);
CU_ASSERT(ret == ODP_TIMER_SUCCESS);
}
@@ -740,7 +776,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = num + 1;
tparam.priv = priv;
- tparam.clk_src = ODP_CLOCK_CPU;
+ tparam.clk_src = ODP_CLOCK_DEFAULT;
ODPH_DBG("\nTimer pool parameters:\n");
ODPH_DBG(" res_ns %" PRIu64 "\n", tparam.res_ns);
@@ -792,12 +828,12 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
target_tick[i] = tick;
ODPH_DBG("abs timer tick %" PRIu64 "\n", tick);
- if (ret == ODP_TIMER_TOOEARLY)
- ODPH_DBG("Too early %" PRIu64 "\n", tick);
- else if (ret == ODP_TIMER_TOOLATE)
- ODPH_DBG("Too late %" PRIu64 "\n", tick);
- else if (ret == ODP_TIMER_NOEVENT)
- ODPH_DBG("No event %" PRIu64 "\n", tick);
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %" PRIu64 ".\n", tick);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too far %" PRIu64 ".\n", tick);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %" PRIu64 "\n", tick);
CU_ASSERT(ret == ODP_TIMER_SUCCESS);
}
@@ -902,7 +938,7 @@ static void timer_test_cancel(void)
int ret;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_CPU, &capa);
+ ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_pool_param_init(&params);
@@ -920,7 +956,7 @@ static void timer_test_cancel(void)
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = 1;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_CPU;
+ tparam.clk_src = ODP_CLOCK_DEFAULT;
tp = odp_timer_pool_create(NULL, &tparam);
if (tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
@@ -1008,7 +1044,7 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
odp_timer_t timer[num];
memset(&timer_capa, 0, sizeof(timer_capa));
- ret = odp_timer_capability(ODP_CLOCK_CPU, &timer_capa);
+ ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa);
CU_ASSERT_FATAL(ret == 0);
if (max_res) {
@@ -1028,7 +1064,7 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
timer_param.min_tmo = min_tmo;
timer_param.max_tmo = max_tmo;
timer_param.num_timers = num;
- timer_param.clk_src = ODP_CLOCK_CPU;
+ timer_param.clk_src = ODP_CLOCK_DEFAULT;
timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
if (timer_pool == ODP_TIMER_POOL_INVALID)
@@ -1085,12 +1121,12 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
t1 = odp_time_local();
ret = odp_timer_set_rel(timer[i], tmo_tick, &ev);
- if (ret == ODP_TIMER_TOOEARLY)
- ODPH_DBG("Too early %i\n", i);
- else if (ret == ODP_TIMER_TOOLATE)
- ODPH_DBG("Too late %i\n", i);
- else if (ret == ODP_TIMER_NOEVENT)
- ODPH_DBG("No event %i\n", i);
+ if (ret == ODP_TIMER_TOO_NEAR)
+ ODPH_DBG("Timer set failed. Too near %i.\n", i);
+ else if (ret == ODP_TIMER_TOO_FAR)
+ ODPH_DBG("Timer set failed. Too late %i.\n", i);
+ else if (ret == ODP_TIMER_FAIL)
+ ODPH_DBG("Timer set failed %i\n", i);
CU_ASSERT(ret == ODP_TIMER_SUCCESS);
@@ -1364,7 +1400,7 @@ static int worker_entrypoint(void *arg)
tck = odp_timer_current_tick(tp) +
odp_timer_ns_to_tick(tp, nsec);
timer_rc = odp_timer_set_abs(tt[i].tim, tck, &tt[i].ev);
- if (timer_rc == ODP_TIMER_TOOEARLY) {
+ if (timer_rc == ODP_TIMER_TOO_NEAR) {
ODPH_ERR("Missed tick, setting timer\n");
} else if (timer_rc != ODP_TIMER_SUCCESS) {
ODPH_ERR("Failed to set timer: %d\n", timer_rc);
@@ -1431,11 +1467,11 @@ static int worker_entrypoint(void *arg)
cur_tick = odp_timer_current_tick(tp);
rc = odp_timer_set_rel(tt[i].tim, tck, &tt[i].ev);
- if (rc == ODP_TIMER_TOOEARLY) {
- CU_FAIL("Failed to set timer: TOO EARLY");
- } else if (rc == ODP_TIMER_TOOLATE) {
- CU_FAIL("Failed to set timer: TOO LATE");
- } else if (rc == ODP_TIMER_NOEVENT) {
+ if (rc == ODP_TIMER_TOO_NEAR) {
+ CU_FAIL("Failed to set timer: TOO NEAR");
+ } else if (rc == ODP_TIMER_TOO_FAR) {
+ CU_FAIL("Failed to set timer: TOO FAR");
+ } else if (rc == ODP_TIMER_FAIL) {
/* Set/reset failed, timer already expired */
ntoolate++;
} else if (rc == ODP_TIMER_SUCCESS) {
@@ -1561,7 +1597,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
num_workers = 1;
num_timers = num_workers * NTIMERS;
- CU_ASSERT_FATAL(!odp_timer_capability(ODP_CLOCK_CPU, &timer_capa));
+ CU_ASSERT_FATAL(!odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa));
if (timer_capa.max_timers && timer_capa.max_timers < num_timers)
num_timers = timer_capa.max_timers;
@@ -1594,7 +1630,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
tparam.max_tmo = max_tmo;
tparam.num_timers = num_timers;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_CPU;
+ tparam.clk_src = ODP_CLOCK_DEFAULT;
global_mem->tp = odp_timer_pool_create(NAME, &tparam);
if (global_mem->tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
diff --git a/test/validation/api/traffic_mngr/traffic_mngr.c b/test/validation/api/traffic_mngr/traffic_mngr.c
index 13f39457d..55004692d 100644
--- a/test/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/validation/api/traffic_mngr/traffic_mngr.c
@@ -113,6 +113,8 @@
#define TM_PERCENT(percent) ((uint32_t)(100 * percent))
+#define ARRAY_SIZE(a) (sizeof((a)) / sizeof((a)[0]))
+
typedef enum {
SHAPER_PROFILE, SCHED_PROFILE, THRESHOLD_PROFILE, WRED_PROFILE
} profile_kind_t;
@@ -281,6 +283,11 @@ static uint32_t num_odp_tm_systems;
static odp_tm_capabilities_t tm_capabilities;
+static bool dynamic_shaper_update = true;
+static bool dynamic_sched_update = true;
+static bool dynamic_threshold_update = true;
+static bool dynamic_wred_update = true;
+
static odp_tm_shaper_t shaper_profiles[NUM_SHAPER_PROFILES];
static odp_tm_sched_t sched_profiles[NUM_SCHED_PROFILES];
static odp_tm_threshold_t threshold_profiles[NUM_THRESHOLD_PROFILES];
@@ -317,8 +324,8 @@ static uint32_t num_ifaces;
static odp_pool_t pools[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
static odp_pktio_t pktios[MAX_NUM_IFACES];
+static odp_bool_t pktio_started[MAX_NUM_IFACES];
static odp_pktin_queue_t pktins[MAX_NUM_IFACES];
-static odp_pktout_queue_t pktouts[MAX_NUM_IFACES];
static odp_pktin_queue_t rcv_pktin;
static odp_pktio_t xmt_pktio;
@@ -328,6 +335,8 @@ static odph_ethaddr_t dst_mac;
static uint32_t cpu_unique_id;
static uint32_t cpu_tcp_seq_num;
+static int8_t suite_inactive;
+
static void busy_wait(uint64_t nanoseconds)
{
odp_time_t start_time, end_time;
@@ -481,7 +490,6 @@ static int open_pktios(void)
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
- pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
for (iface = 0; iface < num_ifaces; iface++) {
snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s",
@@ -494,10 +502,37 @@ static int open_pktios(void)
}
pools[iface] = pkt_pool;
- pktio = odp_pktio_open(iface_name[iface], pkt_pool,
- &pktio_param);
- if (pktio == ODP_PKTIO_INVALID)
- pktio = odp_pktio_lookup(iface_name[iface]);
+
+ /* Zero'th device is always PKTOUT TM as we use it from XMIT */
+ if (iface == 0) {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_TM;
+
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+
+ /* On failure check if pktio can be opened in non-TM mode.
+ * If non-TM mode works, then we can assume that PKTIO
+ * does not support TM
+ */
+ if (pktio == ODP_PKTIO_INVALID) {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+
+ /* Return >0 to indicate no TM support */
+ if (pktio != ODP_PKTIO_INVALID) {
+ odp_pktio_close(pktio);
+ return 1;
+ }
+ }
+ } else {
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DISABLED;
+
+ pktio = odp_pktio_open(iface_name[iface], pkt_pool,
+ &pktio_param);
+ }
+
+ pktios[iface] = pktio;
if (pktio == ODP_PKTIO_INVALID) {
ODPH_ERR("odp_pktio_open() failed\n");
return -1;
@@ -505,25 +540,16 @@ static int open_pktios(void)
/* Set defaults for PktIn and PktOut queues */
(void)odp_pktin_queue_config(pktio, NULL);
- (void)odp_pktout_queue_config(pktio, NULL);
rc = odp_pktio_promisc_mode_set(pktio, true);
if (rc != 0)
printf("****** promisc_mode_set failed ******\n");
- pktios[iface] = pktio;
-
if (odp_pktin_queue(pktio, &pktins[iface], 1) != 1) {
odp_pktio_close(pktio);
ODPH_ERR("odp_pktio_open() failed: no pktin queue\n");
return -1;
}
- if (odp_pktout_queue(pktio, &pktouts[iface], 1) != 1) {
- odp_pktio_close(pktio);
- ODPH_ERR("odp_pktio_open() failed: no pktout queue\n");
- return -1;
- }
-
rc = -1;
if (iface == 0)
rc = odp_pktio_mac_addr(pktio, &src_mac,
@@ -547,6 +573,7 @@ static int open_pktios(void)
ODPH_ERR("odp_pktio_start() failed\n");
return -1;
}
+ pktio_started[1] = true;
} else {
xmt_pktio = pktios[0];
rcv_pktin = pktins[0];
@@ -557,6 +584,7 @@ static int open_pktios(void)
ODPH_ERR("odp_pktio_start() failed\n");
return -1;
}
+ pktio_started[0] = true;
/* Now wait until the link or links are up. */
rc = wait_linkup(pktios[0]);
@@ -1658,12 +1686,26 @@ static int create_tm_system(void)
return -1;
}
+ /* Update dynamic capability flags from created tm system */
+ dynamic_shaper_update = tm_capabilities.dynamic_shaper_update;
+ dynamic_sched_update = tm_capabilities.dynamic_sched_update;
+ dynamic_threshold_update = tm_capabilities.dynamic_threshold_update;
+ dynamic_wred_update = tm_capabilities.dynamic_wred_update;
+
found_odp_tm = odp_tm_find(tm_name, &requirements, &egress);
if ((found_odp_tm == ODP_TM_INVALID) || (found_odp_tm != odp_tm)) {
ODPH_ERR("odp_tm_find() failed\n");
return -1;
}
+ /* Start TM system */
+ CU_ASSERT((rc = odp_tm_start(odp_tm)) == 0);
+ if (rc != 0) {
+ ODPH_ERR("odp_tm_start() failed for tm: %" PRIx64 "\n",
+ odp_tm_to_u64(odp_tm));
+ return -1;
+ }
+
return 0;
}
@@ -2042,11 +2084,16 @@ static int destroy_tm_systems(void)
/* Close/free the TM systems. */
for (idx = 0; idx < num_odp_tm_systems; idx++) {
+ if (odp_tm_stop(odp_tm_systems[idx]) != 0)
+ return -1;
+
if (destroy_tm_subtree(root_node_descs[idx]) != 0)
return -1;
if (odp_tm_destroy(odp_tm_systems[idx]) != 0)
return -1;
+
+ odp_tm_systems[idx] = ODP_TM_INVALID;
}
/* Close/free the TM profiles. */
@@ -2058,7 +2105,9 @@ static int destroy_tm_systems(void)
static int traffic_mngr_suite_init(void)
{
+ odp_tm_capabilities_t capabilities_array[MAX_CAPABILITIES];
uint32_t payload_len, copy_len;
+ int ret, i;
/* Initialize some global variables. */
num_pkts_made = 0;
@@ -2094,9 +2143,45 @@ static int traffic_mngr_suite_init(void)
iface_name[0], iface_name[1]);
}
- if (open_pktios() != 0)
+ pktios[0] = ODP_PKTIO_INVALID;
+ pktios[1] = ODP_PKTIO_INVALID;
+
+ ret = open_pktios();
+ if (ret < 0)
+ return -1;
+
+ /* Positive return indicates, that pktio open failed with out mode as TM
+ * but succeeded with direct mode.
+ */
+ if (ret > 0)
+ goto skip_tests;
+
+ /* Fetch initial dynamic update capabilities, it will be updated
+ * later after TM system is created.
+ */
+ ret = odp_tm_capabilities(capabilities_array, MAX_CAPABILITIES);
+ if (ret <= 0)
return -1;
+ for (i = 0; i < ret; i++) {
+ if (!capabilities_array[i].dynamic_shaper_update)
+ dynamic_shaper_update = false;
+
+ if (!capabilities_array[i].dynamic_sched_update)
+ dynamic_sched_update = false;
+
+ if (!capabilities_array[i].dynamic_threshold_update)
+ dynamic_threshold_update = false;
+
+ if (!capabilities_array[i].dynamic_wred_update)
+ dynamic_wred_update = false;
+ }
+
+ return 0;
+skip_tests:
+ /* Mark all tests as inactive under this suite */
+ odp_cunit_set_inactive();
+ suite_inactive++;
return 0;
}
@@ -2107,14 +2192,22 @@ static int traffic_mngr_suite_term(void)
/* Close the pktios and associated packet pools. */
free_rcvd_pkts();
for (iface = 0; iface < num_ifaces; iface++) {
- if (odp_pktio_stop(pktios[iface]) != 0)
- return -1;
+ /* Skip pktios not initialized */
+ if (pktios[iface] != ODP_PKTIO_INVALID) {
+ if (pktio_started[iface] &&
+ odp_pktio_stop(pktios[iface]) != 0)
+ return -1;
- if (odp_pktio_close(pktios[iface]) != 0)
- return -1;
+ if (odp_pktio_close(pktios[iface]) != 0)
+ return -1;
+ pktios[iface] = ODP_PKTIO_INVALID;
+ pktio_started[iface] = false;
+ }
if (odp_pool_destroy(pools[iface]) != 0)
return -1;
+
+ pools[iface] = ODP_POOL_INVALID;
}
if (odp_cunit_print_inactive())
@@ -2138,9 +2231,9 @@ static void check_shaper_profile(char *shaper_name, uint32_t shaper_idx)
memset(&shaper_params, 0, sizeof(shaper_params));
rc = odp_tm_shaper_params_read(profile, &shaper_params);
CU_ASSERT(rc == 0);
- CU_ASSERT(approx_eq64(shaper_params.commit_bps,
+ CU_ASSERT(approx_eq64(shaper_params.commit_rate,
shaper_idx * MIN_COMMIT_BW));
- CU_ASSERT(approx_eq64(shaper_params.peak_bps,
+ CU_ASSERT(approx_eq64(shaper_params.peak_rate,
shaper_idx * MIN_PEAK_BW));
CU_ASSERT(approx_eq32(shaper_params.commit_burst,
shaper_idx * MIN_COMMIT_BURST));
@@ -2165,8 +2258,8 @@ static void traffic_mngr_test_shaper_profile(void)
for (idx = 1; idx <= NUM_SHAPER_TEST_PROFILES; idx++) {
snprintf(shaper_name, sizeof(shaper_name),
"shaper_profile_%" PRIu32, idx);
- shaper_params.commit_bps = idx * MIN_COMMIT_BW;
- shaper_params.peak_bps = idx * MIN_PEAK_BW;
+ shaper_params.commit_rate = idx * MIN_COMMIT_BW;
+ shaper_params.peak_rate = idx * MIN_PEAK_BW;
shaper_params.commit_burst = idx * MIN_COMMIT_BURST;
shaper_params.peak_burst = idx * MIN_PEAK_BURST;
@@ -2415,6 +2508,7 @@ static int set_shaper(const char *node_name,
odp_tm_shaper_params_t shaper_params;
odp_tm_shaper_t shaper_profile;
odp_tm_node_t tm_node;
+ int rc;
tm_node = find_tm_node(0, node_name);
if (tm_node == ODP_TM_INVALID) {
@@ -2424,13 +2518,20 @@ static int set_shaper(const char *node_name,
}
odp_tm_shaper_params_init(&shaper_params);
- shaper_params.commit_bps = commit_bps;
- shaper_params.peak_bps = 0;
+ shaper_params.commit_rate = commit_bps;
+ shaper_params.peak_rate = 0;
shaper_params.commit_burst = commit_burst_in_bits;
shaper_params.peak_burst = 0;
shaper_params.shaper_len_adjust = 0;
shaper_params.dual_rate = 0;
+ if (!dynamic_shaper_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
/* First see if a shaper profile already exists with this name, in
* which case we use that profile, else create a new one. */
shaper_profile = odp_tm_shaper_lookup(shaper_name);
@@ -2443,7 +2544,13 @@ static int set_shaper(const char *node_name,
num_shaper_profiles++;
}
- return odp_tm_node_shaper_config(tm_node, shaper_profile);
+ rc = odp_tm_node_shaper_config(tm_node, shaper_profile);
+
+ if (!dynamic_shaper_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
}
static int traffic_mngr_check_shaper(void)
@@ -2617,6 +2724,13 @@ static int set_sched_fanin(const char *node_name,
if (node_desc == NULL)
return -1;
+ if (!dynamic_sched_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
fanin_cnt = MIN(node_desc->num_children, FANIN_RATIO);
for (fanin = 0; fanin < fanin_cnt; fanin++) {
odp_tm_sched_params_init(&sched_params);
@@ -2653,10 +2767,15 @@ static int set_sched_fanin(const char *node_name,
rc = odp_tm_node_sched_config(tm_node, fanin_node,
sched_profile);
if (rc != 0)
- return -1;
+ goto exit;
}
- return 0;
+exit:
+ if (!dynamic_sched_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
}
static int test_sched_queue_priority(const char *shaper_name,
@@ -2711,8 +2830,13 @@ static int test_sched_queue_priority(const char *shaper_name,
busy_wait(100 * ODP_TIME_MSEC_IN_NS);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, shaper_name, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkt_cnt + 4, 64 * 1000);
@@ -2730,6 +2854,8 @@ static int test_sched_queue_priority(const char *shaper_name,
CU_ASSERT(pkts_in_order == pkt_cnt);
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, shaper_name, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2817,8 +2943,13 @@ static int test_sched_node_priority(const char *shaper_name,
busy_wait(100 * ODP_TIME_MSEC_IN_NS);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, shaper_name, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkts_sent, 64 * 1000);
@@ -2830,6 +2961,8 @@ static int test_sched_node_priority(const char *shaper_name,
0, false, false);
CU_ASSERT(pkts_in_order == total_pkt_cnt);
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, shaper_name, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2910,8 +3043,13 @@ static int test_sched_wfq(const char *sched_base_name,
busy_wait(1000000); /* wait 1 millisecond */
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, shaper_name, 0, 0);
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
pkt_cnt + 4, 64 * 1000);
@@ -2923,6 +3061,8 @@ static int test_sched_wfq(const char *sched_base_name,
CU_ASSERT(rcv_rate_stats(&rcv_stats[fanin], pkt_class) == 0);
}
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, shaper_name, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
return 0;
@@ -2935,6 +3075,13 @@ static int set_queue_thresholds(odp_tm_queue_t tm_queue,
odp_tm_threshold_t threshold_profile;
int ret;
+ if (!dynamic_threshold_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
/* First see if a threshold profile already exists with this name, in
* which case we use that profile, else create a new one. */
threshold_profile = odp_tm_thresholds_lookup(threshold_name);
@@ -2942,17 +3089,25 @@ static int set_queue_thresholds(odp_tm_queue_t tm_queue,
ret = odp_tm_thresholds_params_update(threshold_profile,
threshold_params);
if (ret)
- return ret;
+ goto exit;
} else {
threshold_profile = odp_tm_threshold_create(threshold_name,
threshold_params);
- if (threshold_profile == ODP_TM_INVALID)
- return -1;
+ if (threshold_profile == ODP_TM_INVALID) {
+ ret = -1;
+ goto exit;
+ }
threshold_profiles[num_threshold_profiles] = threshold_profile;
num_threshold_profiles++;
}
- return odp_tm_queue_threshold_config(tm_queue, threshold_profile);
+ ret = odp_tm_queue_threshold_config(tm_queue, threshold_profile);
+exit:
+ if (!dynamic_threshold_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return ret;
}
static int test_threshold(const char *threshold_name,
@@ -3049,6 +3204,7 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
{
odp_tm_wred_params_t wred_params;
odp_tm_wred_t wred_profile;
+ int rc;
odp_tm_wred_params_init(&wred_params);
if (use_dual_slope) {
@@ -3066,6 +3222,13 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
wred_params.enable_wred = true;
wred_params.use_byte_fullness = use_byte_fullness;
+ if (!dynamic_wred_update) {
+ /* Stop TM system before update when dynamic update is not
+ * supported.
+ */
+ CU_ASSERT_FATAL(odp_tm_stop(odp_tm_systems[0]) == 0);
+ }
+
/* First see if a wred profile already exists with this name, in
* which case we use that profile, else create a new one. */
wred_profile = odp_tm_wred_lookup(wred_name);
@@ -3084,7 +3247,14 @@ static int set_queue_wred(odp_tm_queue_t tm_queue,
}
}
- return odp_tm_queue_wred_config(tm_queue, pkt_color, wred_profile);
+ rc = odp_tm_queue_wred_config(tm_queue, pkt_color, wred_profile);
+
+ if (!dynamic_wred_update) {
+ /* Start TM system, post update */
+ CU_ASSERT_FATAL(odp_tm_start(odp_tm_systems[0]) == 0);
+ }
+ return rc;
+
}
static int test_byte_wred(const char *wred_name,
@@ -3149,8 +3319,14 @@ static int test_byte_wred(const char *wred_name,
pkts_sent = send_pkts(tm_queue, num_test_pkts);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, shaper_name, 0, 0);
+
num_rcv_pkts = receive_pkts(odp_tm_systems[0], rcv_pktin,
num_fill_pkts + pkts_sent, 64 * 1000);
@@ -3160,6 +3336,8 @@ static int test_byte_wred(const char *wred_name,
if (wred_pkt_cnts == NULL)
return -1;
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, shaper_name, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
@@ -3234,8 +3412,14 @@ static int test_pkt_wred(const char *wred_name,
pkts_sent = send_pkts(tm_queue, num_test_pkts);
- /* Disable the shaper, so as to get the pkts out quicker. */
- set_shaper(node_name, shaper_name, 0, 0);
+ /* Disable the shaper, so as to get the pkts out quicker.
+ * We cannot do this if dynamic shaper update is not supported. Without
+ * dynamic update support set_shaper() can cause packet drops due to
+ * start/stop.
+ */
+ if (dynamic_shaper_update)
+ set_shaper(node_name, shaper_name, 0, 0);
+
ret = receive_pkts(odp_tm_systems[0], rcv_pktin,
num_fill_pkts + pkts_sent, 64 * 1000);
if (ret < 0)
@@ -3249,6 +3433,8 @@ static int test_pkt_wred(const char *wred_name,
if (wred_pkt_cnts == NULL)
return -1;
+ /* Disable shaper in case it is still enabled */
+ set_shaper(node_name, shaper_name, 0, 0);
flush_leftover_pkts(odp_tm_systems[0], rcv_pktin);
CU_ASSERT(odp_tm_is_idle(odp_tm_systems[0]));
@@ -4099,5 +4285,8 @@ int main(int argc, char *argv[])
if (ret == 0)
ret = odp_cunit_run();
+ /* Exit with 77 in order to indicate that test is skipped completely */
+ if (!ret && suite_inactive == (ARRAY_SIZE(traffic_mngr_suites) - 1))
+ return 77;
return ret;
}