aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2020-06-09 08:09:33 +0300
committerGitHub <noreply@github.com>2020-06-09 08:09:33 +0300
commit11314a0b7585edec8f9956dd00d1bc12f4fe3210 (patch)
tree86c023292bb6614ee2219d3f4cb37fbd6336de88
parent102173c31bf1af979488af3d3d4175d162ac0454 (diff)
parent9d4a142edd7f960541a64b03b491674a4bf3ddea (diff)
Merge pull request #94
Merge and port odp-linux patches up to 0153b816e
-rw-r--r--.travis.yml8
-rw-r--r--config/odp-linux-generic.conf6
-rw-r--r--configure.ac11
-rw-r--r--example/hello/odp_hello.c39
-rw-r--r--include/odp/api/spec/classification.h10
-rw-r--r--include/odp/api/spec/cpu.h39
-rw-r--r--include/odp/api/spec/crypto.h2
-rw-r--r--include/odp/api/spec/pool.h62
-rw-r--r--include/odp/api/spec/queue.h46
-rw-r--r--include/odp/api/spec/queue_types.h6
-rw-r--r--include/odp/api/spec/schedule.h32
-rw-r--r--include/odp/api/spec/shared_memory.h19
-rw-r--r--include/odp/api/spec/timer.h6
-rw-r--r--include/odp/api/spec/traffic_mngr.h24
-rw-r--r--m4/odp_libconfig.m420
-rw-r--r--platform/linux-dpdk/include/odp_config_internal.h5
-rw-r--r--platform/linux-dpdk/m4/configure.m43
-rw-r--r--platform/linux-dpdk/m4/odp_libconfig.m428
-rw-r--r--platform/linux-dpdk/odp_crypto.c2
-rw-r--r--platform/linux-dpdk/odp_pool.c78
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h7
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable.h2
-rw-r--r--platform/linux-generic/m4/configure.m42
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m428
-rw-r--r--platform/linux-generic/odp_classification.c2
-rw-r--r--platform/linux-generic/odp_crypto_null.c2
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c2
-rw-r--r--platform/linux-generic/odp_packet_io.c5
-rw-r--r--platform/linux-generic/odp_pool.c137
-rw-r--r--platform/linux-generic/odp_schedule_basic.c10
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c22
-rw-r--r--platform/linux-generic/odp_schedule_sp.c10
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c63
-rw-r--r--platform/linux-generic/pktio/stats/ethtool_stats.c10
-rw-r--r--platform/linux-generic/test/pktio_ipc/ipc_common.c6
-rw-r--r--platform/linux-generic/test/pktio_ipc/ipc_common.h7
-rwxr-xr-xscripts/git-transplant.py3
-rw-r--r--test/performance/odp_l2fwd.c221
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c2
-rw-r--r--test/validation/api/classification/odp_classification_tests.c2
-rw-r--r--test/validation/api/pool/pool.c174
-rw-r--r--test/validation/api/queue/queue.c3
-rw-r--r--test/validation/api/shmem/shmem.c96
-rw-r--r--test/validation/api/system/system.c40
-rw-r--r--test/validation/api/timer/timer.c17
45 files changed, 1000 insertions, 319 deletions
diff --git a/.travis.yml b/.travis.yml
index 5ff79bfd8..1aa85fc96 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -172,6 +172,14 @@ jobs:
-e CC="${CC}"
-e CONF=""
${DOCKER_NAMESPACE}/travis-odp-${OS}-${ARCH} /odp/scripts/ci/out_of_tree.sh
+ - stage: test
+ env: TEST=gcc-10
+ compiler: gcc-10
+ script:
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-ubuntu_20.04-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- stage: "build only"
env: TEST=documentation
compiler: gcc
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf
index 6bc9753c3..ced837a29 100644
--- a/config/odp-linux-generic.conf
+++ b/config/odp-linux-generic.conf
@@ -58,14 +58,16 @@ shm: {
# Pool options
pool: {
- # Thread local cache size. Value must be a multiple of burst_size
+ # Default thread local cache size. Cache size in pool parameters is
+ # initialized to this value. Value must be a multiple of burst_size
# (min 2 x burst_size).
#
# The total maximum number of cached events is the number of threads
# using the pool multiplied with local_cache_size.
local_cache_size = 256
- # Transfer size between local cache and global pool.
+ # Transfer size between local cache and global pool. Must be larger
+ # than zero.
burst_size = 32
# Packet pool options
diff --git a/configure.ac b/configure.ac
index 538b3994b..b12001f25 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@ AC_PREREQ([2.5])
##########################################################################
m4_define([odpapi_generation_version], [1])
m4_define([odpapi_major_version], [23])
-m4_define([odpapi_minor_version], [4])
+m4_define([odpapi_minor_version], [5])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
@@ -133,6 +133,15 @@ ODP_CHECK_CFLAG([-Wformat-overflow=0])
# header structures). Generate only warnings on those, not errors.
ODP_CHECK_CFLAG([-Wno-error=address-of-packed-member])
+# GCC 10 sometimes gets confused about object sizes and gives bogus warnings.
+# Make the affected warnings generate only warnings, not errors.
+AS_IF([test "$GCC" == yes],
+ AS_IF([test `$CC -dumpversion` -ge 10],
+ ODP_CHECK_CFLAG([-Wno-error=array-bounds])
+ ODP_CHECK_CFLAG([-Wno-error=stringop-overflow])
+ )
+)
+
ODP_CFLAGS="$ODP_CFLAGS -std=c99"
ODP_CXXFLAGS="$ODP_CXXFLAGS -std=c++11 -Wno-deprecated-register"
diff --git a/example/hello/odp_hello.c b/example/hello/odp_hello.c
index 4e762bb88..391406946 100644
--- a/example/hello/odp_hello.c
+++ b/example/hello/odp_hello.c
@@ -10,43 +10,29 @@
* anything else than the ODP API header file.
*/
-/* Linux CPU affinity */
-#define _GNU_SOURCE
-#include <sched.h>
-
-/* Linux PID */
-#include <sys/types.h>
-#include <unistd.h>
-
#include <stdio.h>
#include <string.h>
#include <odp_api.h>
typedef struct {
- int cpu;
int num;
} options_t;
static int parse_args(int argc, char *argv[], options_t *opt)
{
- static const char * const args[] = {"-c", "-n"};
+ static const char * const args[] = {"-n"};
int i, tmp;
for (i = 1; i < argc; i++) {
- if ((strcmp(argv[i], args[0]) == 0) &&
+ if ((strcmp(argv[i], args[0]) == 0) && argv[i + 1] &&
(sscanf(argv[i + 1], "%i", &tmp) == 1)) {
- opt->cpu = tmp;
- i++;
- } else if ((strcmp(argv[i], args[1]) == 0) &&
- (sscanf(argv[i + 1], "%i", &tmp) == 1)) {
opt->num = tmp;
i++;
} else {
printf("\nUsage:\n"
- " %s CPU number\n"
- " %s Number of iterations\n\n",
- args[0], args[1]);
+ " [%s Number of iterations]\n\n",
+ args[0]);
return -1;
}
}
@@ -58,36 +44,19 @@ int main(int argc, char *argv[])
{
odp_instance_t inst;
options_t opt;
- pid_t pid;
- cpu_set_t cpu_set;
int i;
- odp_cpumask_t mask;
memset(&opt, 0, sizeof(opt));
- opt.cpu = 0;
opt.num = 1;
if (parse_args(argc, argv, &opt))
return -1;
- pid = getpid();
-
if (odp_init_global(&inst, NULL, NULL)) {
printf("Global init failed.\n");
return -1;
}
- odp_cpumask_default_control(&mask, 0);
- opt.cpu = odp_cpumask_first(&mask);
-
- CPU_ZERO(&cpu_set);
- CPU_SET(opt.cpu, &cpu_set);
-
- if (sched_setaffinity(pid, sizeof(cpu_set_t), &cpu_set)) {
- printf("Set CPU affinity failed.\n");
- return -1;
- }
-
if (odp_init_local(inst, ODP_THREAD_CONTROL)) {
printf("Local init failed.\n");
return -1;
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index 748404dae..5fd546484 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -308,6 +308,8 @@ int odp_cls_capability(odp_cls_capability_t *capability);
* Create a class-of-service
*
* The use of class-of-service name is optional. Unique names are not required.
+ * Use odp_cls_cos_param_init() to initialize parameters into their default
+ * values.
*
* @param name Name of the class-of-service or NULL. Maximum string
* length is ODP_COS_NAME_LEN.
@@ -320,7 +322,8 @@ int odp_cls_capability(odp_cls_capability_t *capability);
* and pool associated with a class of service and when any one of these values
* are configured as INVALID then the packets assigned to the CoS gets dropped.
*/
-odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param);
+odp_cos_t odp_cls_cos_create(const char *name,
+ const odp_cls_cos_param_t *param);
/**
* Queue hash result
@@ -612,6 +615,8 @@ typedef struct odp_pmr_param_t {
void odp_cls_pmr_param_init(odp_pmr_param_t *param);
/**
+ * Create a packet matching rule
+ *
* Create a packet match rule between source and destination class of service.
* This packet matching rule is applied on all packets arriving at the source
* class of service and packets satisfying this PMR are sent to the destination
@@ -624,6 +629,9 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param);
* of inspecting the return value when installing such rules, and perform
* appropriate fallback action.
*
+ * Use odp_cls_pmr_param_init() to initialize parameters into their default
+ * values.
+ *
* @param terms Array of odp_pmr_param_t entries, one entry per term
* desired.
* @param num_terms Number of terms in the match rule.
diff --git a/include/odp/api/spec/cpu.h b/include/odp/api/spec/cpu.h
index e424742c6..bacd0fac5 100644
--- a/include/odp/api/spec/cpu.h
+++ b/include/odp/api/spec/cpu.h
@@ -68,44 +68,48 @@ const char *odp_cpu_model_str_id(int id);
/**
* Current CPU frequency in Hz
*
- * Returns current frequency of this CPU
+ * Returns current frequency of this CPU. Returns zero if the frequency
+ * request is not supported.
*
* @return CPU frequency in Hz
- * @retval 0 on failure
+ * @retval 0 Not supported or a failure
*/
uint64_t odp_cpu_hz(void);
/**
* Current CPU frequency of a CPU (in Hz)
*
- * Returns current frequency of specified CPU
+ * Returns current frequency of the specified CPU. Returns zero if the frequency
+ * request is not supported.
*
* @param id CPU ID
*
* @return CPU frequency in Hz
- * @retval 0 on failure
+ * @retval 0 Not supported or a failure
*/
uint64_t odp_cpu_hz_id(int id);
/**
* Maximum CPU frequency in Hz
*
- * Returns maximum frequency of this CPU
+ * Returns the maximum frequency of this CPU. Returns zero if the frequency
+ * request is not supported.
*
* @return CPU frequency in Hz
- * @retval 0 on failure
+ * @retval 0 Not supported or a failure
*/
uint64_t odp_cpu_hz_max(void);
/**
* Maximum CPU frequency of a CPU (in Hz)
*
- * Returns maximum frequency of specified CPU
+ * Returns the maximum frequency of the specified CPU. Returns zero if the
+ * frequency request is not supported.
*
* @param id CPU ID
*
* @return CPU frequency in Hz
- * @retval 0 on failure
+ * @retval 0 Not supported or a failure
*/
uint64_t odp_cpu_hz_max_id(int id);
@@ -115,14 +119,15 @@ uint64_t odp_cpu_hz_max_id(int id);
* Return current CPU cycle count. Cycle count may not be reset at ODP init
* and thus may wrap back to zero between two calls. Use odp_cpu_cycles_max()
* to read the maximum count value after which it wraps. Cycle count frequency
- * follows the CPU frequency and thus may change at any time. The count may
- * advance in steps larger than one. Use odp_cpu_cycles_resolution() to read
- * the step size.
+ * follows the CPU frequency and thus may change at any time. Cycle count should
+ * not be used for time measurements due to the possibility of frequency
+ * variation. The count may advance in steps larger than one. Use
+ * odp_cpu_cycles_resolution() to read the step size.
*
- * @note Do not use CPU count for time measurements since the frequency may
- * vary.
+ * Returns zero if CPU cycle counter is not supported.
*
* @return Current CPU cycle count
+ * @retval 0 Not supported
*/
uint64_t odp_cpu_cycles(void);
@@ -143,9 +148,11 @@ uint64_t odp_cpu_cycles_diff(uint64_t c2, uint64_t c1);
/**
* Maximum CPU cycle count
*
- * Maximum CPU cycle count value before it wraps back to zero.
+ * Maximum CPU cycle count value before it wraps back to zero. Returns zero
+ * if CPU cycle counter is not supported.
*
* @return Maximum CPU cycle count value
+ * @retval 0 Not supported
*/
uint64_t odp_cpu_cycles_max(void);
@@ -153,9 +160,11 @@ uint64_t odp_cpu_cycles_max(void);
* Resolution of CPU cycle count
*
* CPU cycle count may advance in steps larger than one. This function returns
- * resolution of odp_cpu_cycles() in CPU cycles.
+ * resolution of odp_cpu_cycles() in CPU cycles. Returns zero if CPU cycle
+ * counter is not supported.
*
* @return CPU cycle count resolution in CPU cycles
+ * @retval 0 Not supported
*/
uint64_t odp_cpu_cycles_resolution(void);
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index d26b76b3a..ae96214e2 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -1029,7 +1029,7 @@ int odp_crypto_auth_capability(odp_auth_alg_t auth,
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_crypto_session_create(odp_crypto_session_param_t *param,
+int odp_crypto_session_create(const odp_crypto_session_param_t *param,
odp_crypto_session_t *session,
odp_crypto_ses_create_err_t *status);
diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h
index 0fb9e9dc8..cdc7f5fef 100644
--- a/include/odp/api/spec/pool.h
+++ b/include/odp/api/spec/pool.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -69,6 +70,12 @@ typedef struct odp_pool_capability_t {
* The value of zero means that limited only by the available
* memory size for the pool. */
uint32_t max_num;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
} buf;
/** Packet pool capabilities */
@@ -148,6 +155,12 @@ typedef struct odp_pool_capability_t {
* Maximum number of packet pool subparameters. Valid range is
* 0 ... ODP_POOL_MAX_SUBPARAMS. */
uint8_t max_num_subparam;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
} pkt;
/** Timeout pool capabilities */
@@ -160,6 +173,12 @@ typedef struct odp_pool_capability_t {
* The value of zero means that limited only by the available
* memory size for the pool. */
uint32_t max_num;
+
+ /** Minimum size of thread local cache */
+ uint32_t min_cache_size;
+
+ /** Maximum size of thread local cache */
+ uint32_t max_cache_size;
} tmo;
} odp_pool_capability_t;
@@ -190,11 +209,6 @@ typedef struct odp_pool_pkt_subparam_t {
/**
* Pool parameters
- *
- * A note for all pool types: a single thread may not be able to allocate all
- * 'num' elements from the pool at any particular time, as implementations are
- * allowed to store some elements (per thread and HW engine) for caching
- * purposes.
*/
typedef struct odp_pool_param_t {
/** Pool type */
@@ -215,6 +229,22 @@ typedef struct odp_pool_param_t {
* Default will always be a multiple of 8.
*/
uint32_t align;
+
+ /** Maximum number of buffers cached locally per thread
+ *
+ * A non-zero value allows implementation to cache buffers
+ * locally per each thread. Thread local caching may improve
+ * performance, but requires application to take account that
+ * some buffers may be stored locally per thread and thus are
+ * not available for allocation from other threads.
+ *
+ * This is the maximum number of buffers to be cached per
+ * thread. The actual cache size is implementation specific.
+ * The value must not be less than 'min_cache_size' or exceed
+ * 'max_cache_size' capability. The default value is
+ * implementation specific and set by odp_pool_param_init().
+ */
+ uint32_t cache_size;
} buf;
/** Parameters for packet pools */
@@ -250,7 +280,7 @@ typedef struct odp_pool_param_t {
/** Maximum packet length that will be allocated from
* the pool. The maximum value is defined by pool capability
- * pkt.max_len. Use 0 for default (the pool maximum).
+ * pkt.max_len. Use 0 for default.
*/
uint32_t max_len;
@@ -309,12 +339,24 @@ typedef struct odp_pool_param_t {
* simultaneously (e.g. due to subpool design).
*/
odp_pool_pkt_subparam_t sub[ODP_POOL_MAX_SUBPARAMS];
+
+ /** Maximum number of packets cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
} pkt;
/** Parameters for timeout pools */
struct {
/** Number of timeouts in the pool */
uint32_t num;
+
+ /** Maximum number of timeouts cached locally per thread
+ *
+ * See buf.cache_size documentation for details.
+ */
+ uint32_t cache_size;
} tmo;
} odp_pool_param_t;
@@ -331,17 +373,17 @@ typedef struct odp_pool_param_t {
*
* This routine is used to create a pool. The use of pool name is optional.
* Unique names are not required. However, odp_pool_lookup() returns only a
- * single matching pool.
+ * single matching pool. Use odp_pool_param_init() to initialize parameters
+ * into their default values.
*
* @param name Name of the pool or NULL. Maximum string length is
* ODP_POOL_NAME_LEN.
- * @param params Pool parameters.
+ * @param param Pool parameters.
*
* @return Handle of the created pool
* @retval ODP_POOL_INVALID Pool could not be created
*/
-
-odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params);
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *param);
/**
* Destroy a pool previously created by odp_pool_create()
diff --git a/include/odp/api/spec/queue.h b/include/odp/api/spec/queue.h
index f6ddf5d71..519b30a24 100644
--- a/include/odp/api/spec/queue.h
+++ b/include/odp/api/spec/queue.h
@@ -130,10 +130,16 @@ int odp_queue_context_set(odp_queue_t queue, void *context, uint32_t len);
void *odp_queue_context(odp_queue_t queue);
/**
- * Queue enqueue
+ * Enqueue an event to a queue
*
- * Enqueue the 'ev' on 'queue'. On failure the event is not consumed, the caller
- * has to take care of it.
+ * Enqueues the event into the queue. The caller loses ownership of the event on
+ * a successful call. The event is not enqueued on failure, and the caller
+ * maintains ownership of it.
+ *
+ * When successful, this function acts as a release memory barrier between
+ * the sender (the calling thread) and the receiver of the event. The receiver
+ * sees correctly the memory stores done by the sender before it enqueued
+ * the event.
*
* @param queue Queue handle
* @param ev Event handle
@@ -146,14 +152,15 @@ int odp_queue_enq(odp_queue_t queue, odp_event_t ev);
/**
* Enqueue multiple events to a queue
*
- * Enqueue the events from 'events[]' on 'queue'. A successful call returns the
- * actual number of events enqueued. If return value is less than 'num', the
- * remaining events at the end of events[] are not consumed, and the caller
- * has to take care of them.
+ * Like odp_queue_enq(), but enqueues multiple events into the queue. Events are
+ * stored into the queue in the order they are in the array. A successful
+ * call returns the actual number of events enqueued. If return value is less
+ * than 'num', the remaining events at the end of events[] are not enqueued,
+ * and the caller maintains ownership of those.
*
* @param queue Queue handle
* @param events Array of event handles
- * @param num Number of event handles to enqueue
+ * @param num Number of events to enqueue
*
* @return Number of events actually enqueued (0 ... num)
* @retval <0 on failure
@@ -161,27 +168,34 @@ int odp_queue_enq(odp_queue_t queue, odp_event_t ev);
int odp_queue_enq_multi(odp_queue_t queue, const odp_event_t events[], int num);
/**
- * Queue dequeue
+ * Dequeue an event from a queue
+ *
+ * Returns the next event from head of the queue, or ODP_EVENT_INVALID when the
+ * queue is empty. Cannot be used for ODP_QUEUE_TYPE_SCHED type queues
+ * (use odp_schedule() instead).
*
- * Dequeues next event from head of the queue. Cannot be used for
- * ODP_QUEUE_TYPE_SCHED type queues (use odp_schedule() instead).
+ * When successful, this function acts as an acquire memory barrier between
+ * the sender and the receiver (the calling thread) of the event. The receiver
+ * sees correctly the memory stores done by the sender before it enqueued
+ * the event.
*
* @param queue Queue handle
*
* @return Event handle
- * @retval ODP_EVENT_INVALID on failure (e.g. queue empty)
+ * @retval ODP_EVENT_INVALID on failure, or when the queue is empty
*/
odp_event_t odp_queue_deq(odp_queue_t queue);
/**
* Dequeue multiple events from a queue
*
- * Dequeues multiple events from head of the queue. Cannot be used for
- * ODP_QUEUE_TYPE_SCHED type queues (use odp_schedule() instead).
+ * Like odp_queue_deq(), but dequeues multiple events from head of the queue.
+ * Cannot be used for ODP_QUEUE_TYPE_SCHED type queues (use odp_schedule_multi()
+ * instead). A successful call returns the actual number of events dequeued.
*
- * @param queue Queue handle
+ * @param queue Queue handle
* @param[out] events Array of event handles for output
- * @param num Maximum number of events to dequeue
+ * @param num Maximum number of events to dequeue
* @return Number of events actually dequeued (0 ... num)
* @retval <0 on failure
diff --git a/include/odp/api/spec/queue_types.h b/include/odp/api/spec/queue_types.h
index 3ed9246dc..a6899c31c 100644
--- a/include/odp/api/spec/queue_types.h
+++ b/include/odp/api/spec/queue_types.h
@@ -194,7 +194,8 @@ typedef struct odp_queue_capability_t {
* supported when zero. */
uint32_t max_num;
- /** Maximum queue size */
+ /** Maximum queue size. The value of zero means that
+ * there is no size limit. */
uint32_t max_size;
} lockfree;
@@ -207,7 +208,8 @@ typedef struct odp_queue_capability_t {
* supported when zero. */
uint32_t max_num;
- /** Maximum queue size */
+ /** Maximum queue size. The value of zero means that
+ * there is no size limit. */
uint32_t max_size;
} waitfree;
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index 07d856fd9..716c09f51 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -56,20 +56,34 @@ extern "C" {
uint64_t odp_schedule_wait_time(uint64_t ns);
/**
- * Schedule
+ * Schedule an event
*
- * Schedules all queues created with ODP_QUEUE_TYPE_SCHED type. Returns
- * next highest priority event which is available for the calling thread.
- * Outputs the source queue of the event. If there's no event available, waits
+ * Run event scheduler to find the next highest priority event which is
+ * available for the calling thread. Only queues that have been created with
+ * ODP_QUEUE_TYPE_SCHED type are connected to the scheduler. Optionally,
+ * outputs the source queue of the event. If there's no event available, waits
* for an event according to the wait parameter setting. Returns
* ODP_EVENT_INVALID if reaches end of the wait period.
*
* When returns an event, the thread holds the queue synchronization context
- * (atomic or ordered) until the next odp_schedule() or odp_schedule_multi()
- * call. The next call implicitly releases the current context and potentially
- * returns with a new context. User can allow early context release (e.g., see
- * odp_schedule_release_atomic() and odp_schedule_release_ordered()) for
- * performance optimization.
+ * (atomic or ordered) until the next schedule call (e.g. odp_schedule() or
+ * odp_schedule_multi()). The next call implicitly releases the current context
+ * and potentially returns with a new context. User can allow early context
+ * release (e.g., see odp_schedule_release_atomic() and
+ * odp_schedule_release_ordered()) for performance optimization.
+ *
+ * When successful, this function acts as an acquire memory barrier between
+ * the sender and the receiver (the calling thread) of the event. The receiver
+ * sees correctly the memory stores done by the sender before it enqueued
+ * the event.
+ *
+ * When the event was scheduled from an atomic queue, this function acts as
+ * an acquire memory barrier between the previous holder of the same atomic
+ * synchronization context and the calling thread. When the context is released,
+ * a release memory barrier is performed towards the next holder of the context.
+ * This ensures that memory stores done when holding an atomic context are
+ * correctly visible to other threads that will subsequently hold the same
+ * atomic context.
*
* @param from Output parameter for the source queue (where the event was
* dequeued from). Ignored if NULL.
diff --git a/include/odp/api/spec/shared_memory.h b/include/odp/api/spec/shared_memory.h
index 58ecaa920..6d4066f15 100644
--- a/include/odp/api/spec/shared_memory.h
+++ b/include/odp/api/spec/shared_memory.h
@@ -44,6 +44,14 @@ extern "C" {
/**
* Application SW only, no HW access
+ *
+ * @deprecated When set, application will not share the reserved memory with HW
+ * accelerators. However, leaving this flag to zero does not guarantee that
+ * the reserved memory can be accessed from HW, and thus usage of this flag is
+ * considered deprecated. If HW accessible memory is required, set
+ * ODP_SHM_HW_ACCESS instead.
+ *
+ * This flag must not be combined with ODP_SHM_HW_ACCESS.
*/
#define ODP_SHM_SW_ONLY 0x1
@@ -79,6 +87,17 @@ extern "C" {
#define ODP_SHM_HP 0x10
/**
+ * Share memory with HW accelerators
+ *
+ * When set, this flag guarantees that the reserved memory is accessible
+ * by both CPUs and HW accelerators of the device. This may require e.g. that
+ * the odp_shm_reserve() call configures the memory to be accessible through
+ * an Input-Output Memory Management Unit (IOMMU). The reserve call will return
+ * failure if such configuration is not supported.
+ */
+#define ODP_SHM_HW_ACCESS 0x20
+
+/**
* Shared memory block info
*/
typedef struct odp_shm_info_t {
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index 3466e23a0..a48e79bf5 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -489,7 +489,11 @@ int odp_timeout_fresh(odp_timeout_t tmo);
odp_timer_t odp_timeout_timer(odp_timeout_t tmo);
/**
- * Return expiration tick for the timeout
+ * Timeout expiration tick
+ *
+ * Returns the absolute expiration time (in timer ticks) that was used to set
+ * (or reset) the timer. For timers set with absolute expiration time this
+ * equals the provided tick value.
*
* @param tmo Timeout handle
*
diff --git a/include/odp/api/spec/traffic_mngr.h b/include/odp/api/spec/traffic_mngr.h
index b09d2675f..03294c820 100644
--- a/include/odp/api/spec/traffic_mngr.h
+++ b/include/odp/api/spec/traffic_mngr.h
@@ -804,7 +804,7 @@ void odp_tm_shaper_params_init(odp_tm_shaper_params_t *params);
* profile object.
*/
odp_tm_shaper_t odp_tm_shaper_create(const char *name,
- odp_tm_shaper_params_t *params);
+ const odp_tm_shaper_params_t *params);
/** Destroy shaper profile object
*
@@ -845,7 +845,7 @@ int odp_tm_shaper_params_read(odp_tm_shaper_t shaper_profile,
* @return Returns < 0 upon failure or 0 upon success.
*/
int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile,
- odp_tm_shaper_params_t *params);
+ const odp_tm_shaper_params_t *params);
/** odp_tm_shaper_lookup() can be used to find the shaper profile object
* created with the specified name.
@@ -914,7 +914,7 @@ void odp_tm_sched_params_init(odp_tm_sched_params_t *params);
* object.
*/
odp_tm_sched_t odp_tm_sched_create(const char *name,
- odp_tm_sched_params_t *params);
+ const odp_tm_sched_params_t *params);
/** Destroy scheduler profile object
*
@@ -955,7 +955,7 @@ int odp_tm_sched_params_read(odp_tm_sched_t sched_profile,
* @return Returns < 0 upon failure or 0 upon success.
*/
int odp_tm_sched_params_update(odp_tm_sched_t sched_profile,
- odp_tm_sched_params_t *params);
+ const odp_tm_sched_params_t *params);
/** odp_tm_sched_lookup() can be used to find the scheduler profile object
* created with the specified name.
@@ -1004,7 +1004,8 @@ void odp_tm_threshold_params_init(odp_tm_threshold_params_t *params);
* profile object.
*/
odp_tm_threshold_t odp_tm_threshold_create(const char *name,
- odp_tm_threshold_params_t *params);
+ const odp_tm_threshold_params_t
+ *params);
/** Destroy a queue threshold profile object
*
@@ -1046,7 +1047,7 @@ int odp_tm_thresholds_params_read(odp_tm_threshold_t threshold_profile,
* @return Returns < 0 upon failure or 0 upon success.
*/
int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile,
- odp_tm_threshold_params_t *params);
+ const odp_tm_threshold_params_t *params);
/** odp_tm_thresholds_lookup() can be used to find the queue thresholds
* profile object created with the specified name.
@@ -1140,7 +1141,7 @@ void odp_tm_wred_params_init(odp_tm_wred_params_t *params);
* object.
*/
odp_tm_wred_t odp_tm_wred_create(const char *name,
- odp_tm_wred_params_t *params);
+ const odp_tm_wred_params_t *params);
/** Destroy WRED profile object
*
@@ -1181,7 +1182,7 @@ int odp_tm_wred_params_read(odp_tm_wred_t wred_profile,
* @return Returns < 0 upon failure or 0 upon success.
*/
int odp_tm_wred_params_update(odp_tm_wred_t wred_profile,
- odp_tm_wred_params_t *params);
+ const odp_tm_wred_params_t *params);
/** odp_tm_wred_lookup() can be used to find the WRED profile object created
* with the specified name.
@@ -1259,9 +1260,8 @@ void odp_tm_node_params_init(odp_tm_node_params_t *params);
* @return Returns ODP_TM_INVALID upon failure, otherwise returns
* a valid odp_tm_node_t handle if successful.
*/
-odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
- const char *name,
- odp_tm_node_params_t *params);
+odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, const char *name,
+ const odp_tm_node_params_t *params);
/** Destroy a tm_node object.
*
@@ -1426,7 +1426,7 @@ void odp_tm_queue_params_init(odp_tm_queue_params_t *params);
* odp_tm_queue_t handle.
*/
odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
- odp_tm_queue_params_t *params);
+ const odp_tm_queue_params_t *params);
/** Destroy an tm_queue object. The odp_tm_queue_destroy frees the resources
* used by a tm_queue_t object. The tm_queue to be destroyed MUST not be
diff --git a/m4/odp_libconfig.m4 b/m4/odp_libconfig.m4
index 6b5bca268..29cc8c85b 100644
--- a/m4/odp_libconfig.m4
+++ b/m4/odp_libconfig.m4
@@ -1,5 +1,5 @@
-# ODP_LIBCONFIG(PLATFORM)
-# -----------------------
+# ODP_LIBCONFIG(PLATFORM, CONFIG-FILE-PATH)
+# -----------------------------------------
AC_DEFUN([ODP_LIBCONFIG],
[dnl
##########################################################################
@@ -14,6 +14,18 @@ AC_CHECK_PROGS([OD], [od])
AC_PROG_SED
AS_IF([test -z "$OD"], [AC_MSG_ERROR([Could not find 'od'])])
+##########################################################################
+# Check default configuration file
+##########################################################################
+AS_IF([test -z "$2"] || [test ! -f $2],
+ [AC_MSG_ERROR([Default configuration file not found])], [])
+
+conf_ver=$_ODP_CONFIG_VERSION_GENERATION.$_ODP_CONFIG_VERSION_MAJOR.$_ODP_CONFIG_VERSION_MINOR
+file_ver=`$SED 's/ //g' $2 | $GREP -oP '(?<=config_file_version=").*?(?=")'`
+
+AS_IF([test "x$conf_ver" = "x$file_ver"], [],
+ [AC_MSG_ERROR([Configuration file version mismatch (_ODP_CONFIG_VERSION=$conf_ver config_file_version=$file_ver)])])
+
odp_use_config=true
##########################################################################
# Create a header file odp_libconfig_config.h which containins null
@@ -22,9 +34,9 @@ odp_use_config=true
AC_CONFIG_COMMANDS([platform/$1/include/odp_libconfig_config.h],
[mkdir -p platform/$1/include
(echo "static const char config_builtin[[]] = {"; \
- $OD -An -v -tx1 < ${srcdir}/config/odp-$1.conf | \
+ $OD -An -v -tx1 < $CONFIG_FILE | \
$SED -e 's/[[0-9a-f]]\+/0x\0,/g' ; \
echo "0x00 };") > \
platform/$1/include/odp_libconfig_config.h],
- [with_platform=$with_platform OD=$OD SED=$SED])
+ [with_platform=$1 OD=$OD SED=$SED CONFIG_FILE=$2])
]) # ODP_LIBCONFIG
diff --git a/platform/linux-dpdk/include/odp_config_internal.h b/platform/linux-dpdk/include/odp_config_internal.h
index a4af017d4..0813514e4 100644
--- a/platform/linux-dpdk/include/odp_config_internal.h
+++ b/platform/linux-dpdk/include/odp_config_internal.h
@@ -145,11 +145,6 @@ extern "C" {
*/
#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
-/*
- * Maximum number of events in a thread local pool cache
- */
-#define CONFIG_POOL_CACHE_SIZE 256
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/m4/configure.m4 b/platform/linux-dpdk/m4/configure.m4
index 2b75f0049..5172d9edd 100644
--- a/platform/linux-dpdk/m4/configure.m4
+++ b/platform/linux-dpdk/m4/configure.m4
@@ -17,7 +17,8 @@ AC_ARG_WITH([openssl],
AS_IF([test "$with_openssl" != "no"],
[ODP_OPENSSL])
AM_CONDITIONAL([WITH_OPENSSL], [test x$with_openssl != xno])
-ODP_LIBCONFIG([linux-dpdk])
+
+m4_include([platform/linux-dpdk/m4/odp_libconfig.m4])
m4_include([platform/linux-dpdk/m4/odp_pcapng.m4])
ODP_SCHEDULER
diff --git a/platform/linux-dpdk/m4/odp_libconfig.m4 b/platform/linux-dpdk/m4/odp_libconfig.m4
new file mode 100644
index 000000000..2ab6aa047
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_libconfig.m4
@@ -0,0 +1,28 @@
+##########################################################################
+# Configuration file version
+##########################################################################
+m4_define([_odp_config_version_generation], [0])
+m4_define([_odp_config_version_major], [1])
+m4_define([_odp_config_version_minor], [9])
+
+m4_define([_odp_config_version],
+ [_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
+
+_ODP_CONFIG_VERSION_GENERATION=_odp_config_version_generation
+AC_SUBST(_ODP_CONFIG_VERSION_GENERATION)
+_ODP_CONFIG_VERSION_MAJOR=_odp_config_version_major
+AC_SUBST(_ODP_CONFIG_VERSION_MAJOR)
+_ODP_CONFIG_VERSION_MINOR=_odp_config_version_minor
+AC_SUBST(_ODP_CONFIG_VERSION_MINOR)
+
+##########################################################################
+# Set optional path for the default configuration file
+##########################################################################
+default_config_path="${srcdir}/config/odp-linux-dpdk.conf"
+
+AC_ARG_WITH([config-file],
+AS_HELP_STRING([--with-config-file=FILE path to the default configuration file],
+ [(this file must include all configuration options).]),
+ [default_config_path=$withval], [])
+
+ODP_LIBCONFIG([linux-dpdk], [$default_config_path])
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
index b0ff761cb..7dcc91c79 100644
--- a/platform/linux-dpdk/odp_crypto.c
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -1339,7 +1339,7 @@ static int crypto_fill_aead_xform(struct rte_crypto_sym_xform *aead_xform,
return 0;
}
-int odp_crypto_session_create(odp_crypto_session_param_t *param,
+int odp_crypto_session_create(const odp_crypto_session_param_t *param,
odp_crypto_session_t *session_out,
odp_crypto_ses_create_err_t *status)
{
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 1c5edf76d..2f0ab3004 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -170,6 +170,8 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.min_cache_size = 0;
+ capa->buf.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
/* Packet pools */
capa->pkt.max_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
@@ -183,10 +185,14 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->pkt.min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
capa->pkt.max_seg_len = CONFIG_PACKET_SEG_LEN_MAX;
capa->pkt.max_uarea_size = MAX_SIZE;
+ capa->pkt.min_cache_size = 0;
+ capa->pkt.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+ capa->tmo.min_cache_size = 0;
+ capa->tmo.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
return 0;
}
@@ -266,7 +272,7 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
} \
} while (0)
-static int check_params(odp_pool_param_t *params)
+static int check_params(const odp_pool_param_t *params)
{
odp_pool_capability_t capa;
@@ -290,6 +296,12 @@ static int check_params(odp_pool_param_t *params)
return -1;
}
+ if (params->buf.cache_size > capa.buf.max_cache_size) {
+ ODP_ERR("buf.cache_size too large %u\n",
+ params->buf.cache_size);
+ return -1;
+ }
+
break;
case ODP_POOL_PACKET:
@@ -326,7 +338,14 @@ static int check_params(odp_pool_param_t *params)
}
if (params->pkt.headroom > CONFIG_PACKET_HEADROOM) {
- ODP_ERR("Packet headroom size not supported\n");
+ ODP_ERR("pkt.headroom too large %u\n",
+ params->pkt.headroom);
+ return -1;
+ }
+
+ if (params->pkt.cache_size > capa.pkt.max_cache_size) {
+ ODP_ERR("pkt.cache_size too large %u\n",
+ params->pkt.cache_size);
return -1;
}
@@ -337,6 +356,13 @@ static int check_params(odp_pool_param_t *params)
ODP_ERR("tmo.num too large %u\n", params->tmo.num);
return -1;
}
+
+ if (params->tmo.cache_size > capa.tmo.max_cache_size) {
+ ODP_ERR("tmo.cache_size too large %u\n",
+ params->tmo.cache_size);
+ return -1;
+ }
+
break;
default:
@@ -347,35 +373,39 @@ static int check_params(odp_pool_param_t *params)
return 0;
}
-static unsigned int calc_cache_size(uint32_t num)
+static unsigned int calc_cache_size(uint32_t pool_size, uint32_t max_num)
{
- unsigned int cache_size = 0;
- unsigned int i;
+ unsigned int cache_size;
+ unsigned int max_supported = pool_size / 1.5;
int num_threads = odp_global_ro.init_param.num_control +
odp_global_ro.init_param.num_worker;
- if (RTE_MEMPOOL_CACHE_MAX_SIZE == 0)
+ if (max_num == 0)
return 0;
- i = ceil((double)num / RTE_MEMPOOL_CACHE_MAX_SIZE);
- i = RTE_MAX(i, 2UL);
- for (; i <= (num / 2); i++)
- if ((num % i) == 0) {
- cache_size = num / i;
+ cache_size = RTE_MIN(max_num, max_supported);
+
+ while (cache_size) {
+ if ((pool_size % cache_size) == 0)
break;
- }
- if (odp_unlikely(cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- (uint32_t)cache_size * 1.5 > num)) {
- ODP_ERR("Cache size calculation failed: %d\n", cache_size);
- cache_size = 0;
+ cache_size--;
+ }
+
+ if (odp_unlikely(cache_size == 0)) {
+ cache_size = RTE_MIN(max_num, max_supported);
+ ODP_DBG("Using nonoptimal cache size: %d\n", cache_size);
}
+ /* Cache size of one exposes DPDK implementation bug */
+ if (cache_size == 1)
+ cache_size = 0;
+
ODP_DBG("Cache_size: %d\n", cache_size);
if (num_threads && cache_size) {
unsigned int total_cache_size = num_threads * cache_size;
- if (total_cache_size >= num)
+ if (total_cache_size >= pool_size)
ODP_DBG("Entire pool fits into thread local caches. "
"Pool starvation may occur if the pool is used "
"by multiple threads.\n");
@@ -396,7 +426,7 @@ static void format_pool_name(const char *name, char *rte_name)
} while (rte_mempool_lookup(rte_name) != NULL);
}
-odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
{
struct rte_pktmbuf_pool_private mbp_ctor_arg;
struct mbuf_ctor_arg mb_ctor_arg;
@@ -436,6 +466,7 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
case ODP_POOL_BUFFER:
buf_align = params->buf.align;
blk_size = params->buf.size;
+ cache_size = params->buf.cache_size;
/* Validate requested buffer alignment */
if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
@@ -468,6 +499,7 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
tailroom = CONFIG_PACKET_TAILROOM;
min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
min_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+ cache_size = params->pkt.cache_size;
blk_size = min_seg_len;
if (params->pkt.seg_len > blk_size)
@@ -510,6 +542,8 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
hdr_size = sizeof(odp_timeout_hdr_t);
mbp_ctor_arg.mbuf_data_room_size = 0;
num = params->tmo.num;
+ cache_size = params->tmo.cache_size;
+
ODP_DBG("type: tmo name: %s num: %u\n",
pool_name, num);
break;
@@ -532,7 +566,7 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
ODP_DBG("Metadata size: %u, mb_size %d\n",
mb_ctor_arg.seg_buf_offset, mb_size);
- cache_size = calc_cache_size(num);
+ cache_size = calc_cache_size(num, cache_size);
format_pool_name(pool_name, rte_name);
@@ -728,6 +762,10 @@ void odp_pool_param_init(odp_pool_param_t *params)
{
memset(params, 0, sizeof(odp_pool_param_t));
params->pkt.headroom = CONFIG_PACKET_HEADROOM;
+ params->buf.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ params->pkt.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ params->tmo.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index a74f46606..a8947ddd9 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -27,9 +27,8 @@ extern "C" {
#include <odp/api/plat/strong_types.h>
typedef struct ODP_ALIGNED_CACHE pool_cache_t {
- uint32_t size; /* Size of cache */
- uint32_t burst_size; /* Cache burst size */
- uint32_t num; /* Number of buffers in cache */
+ /* Number of buffers in cache */
+ uint32_t cache_num;
/* Cached buffers */
odp_buffer_hdr_t *buf_hdr[CONFIG_POOL_CACHE_MAX_SIZE];
@@ -56,6 +55,8 @@ typedef struct pool_t {
odp_pool_t pool_hdl;
uint32_t pool_idx;
uint32_t ring_mask;
+ uint32_t cache_size;
+ uint32_t burst_size;
odp_shm_t shm;
odp_shm_t uarea_shm;
uint64_t shm_size;
diff --git a/platform/linux-generic/include/odp_schedule_scalable.h b/platform/linux-generic/include/odp_schedule_scalable.h
index 16052a6af..88986be5f 100644
--- a/platform/linux-generic/include/odp_schedule_scalable.h
+++ b/platform/linux-generic/include/odp_schedule_scalable.h
@@ -89,6 +89,7 @@ typedef struct ODP_ALIGNED_CACHE {
#define cons_type qschst_type
#endif
odp_schedule_group_t sched_grp;
+ uint32_t loop_check[CONFIG_NUM_CPU_IDS];
} sched_elem_t;
/* Number of scheduling groups */
@@ -138,6 +139,7 @@ typedef struct ODP_ALIGNED_CACHE {
bitset_t ODP_ALIGNED_CACHE rvec_free;
/* Reordering contexts to allocate from */
reorder_context_t ODP_ALIGNED_CACHE rvec[TS_RVEC_SIZE];
+ uint32_t loop_cnt; /*Counter to check pktio ingress queue dead loop */
} sched_scalable_thread_state_t;
void sched_update_enq(sched_elem_t *q, uint32_t actual);
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index 82fe6fe9c..bcd55b7c5 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -23,7 +23,7 @@ AS_IF([test "x$with_pcap" != xno],
[ODP_PCAP([with_pcap=yes]‚[with_pcap=no])])
AM_CONDITIONAL([HAVE_PCAP], [test x$have_pcap = xyes])
-ODP_LIBCONFIG([linux-generic])
+m4_include([platform/linux-generic/m4/odp_libconfig.m4])
m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_netmap.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
new file mode 100644
index 000000000..f042d65d7
--- /dev/null
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -0,0 +1,28 @@
+##########################################################################
+# Configuration file version
+##########################################################################
+m4_define([_odp_config_version_generation], [0])
+m4_define([_odp_config_version_major], [1])
+m4_define([_odp_config_version_minor], [13])
+
+m4_define([_odp_config_version],
+ [_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
+
+_ODP_CONFIG_VERSION_GENERATION=_odp_config_version_generation
+AC_SUBST(_ODP_CONFIG_VERSION_GENERATION)
+_ODP_CONFIG_VERSION_MAJOR=_odp_config_version_major
+AC_SUBST(_ODP_CONFIG_VERSION_MAJOR)
+_ODP_CONFIG_VERSION_MINOR=_odp_config_version_minor
+AC_SUBST(_ODP_CONFIG_VERSION_MINOR)
+
+##########################################################################
+# Set optional path for the default configuration file
+##########################################################################
+default_config_path="${srcdir}/config/odp-$with_platform.conf"
+
+AC_ARG_WITH([config-file],
+AS_HELP_STRING([--with-config-file=FILE path to the default configuration file],
+ [(this file must include all configuration options).]),
+ [default_config_path=$withval], [])
+
+ODP_LIBCONFIG([$with_platform], [$default_config_path])
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 8c6588444..8637e611f 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -204,7 +204,7 @@ static inline void _cls_queue_unwind(uint32_t tbl_index, uint32_t j)
odp_queue_destroy(queue_grp_tbl->s.queue[tbl_index + --j]);
}
-odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
+odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param)
{
uint32_t i, j;
odp_queue_t queue;
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
index 633cf4751..8dfac007d 100644
--- a/platform/linux-generic/odp_crypto_null.c
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -181,7 +181,7 @@ int odp_crypto_auth_capability(odp_auth_alg_t auth,
}
int
-odp_crypto_session_create(odp_crypto_session_param_t *param,
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
odp_crypto_session_t *session_out,
odp_crypto_ses_create_err_t *status)
{
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
index fb4a0ddad..98a13ce4a 100644
--- a/platform/linux-generic/odp_crypto_openssl.c
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -2073,7 +2073,7 @@ int odp_crypto_auth_capability(odp_auth_alg_t auth,
}
int
-odp_crypto_session_create(odp_crypto_session_param_t *param,
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
odp_crypto_session_t *session_out,
odp_crypto_ses_create_err_t *status)
{
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 94f4d620e..1cd5745aa 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -1303,9 +1303,12 @@ void odp_pktio_print(odp_pktio_t hdl)
len += snprintf(&str[len], n - len,
" index %i\n", odp_pktio_index(hdl));
len += snprintf(&str[len], n - len,
- " handle (u64) %" PRIu64 "\n",
+ " handle 0x%" PRIx64 "\n",
odp_pktio_to_u64(hdl));
len += snprintf(&str[len], n - len,
+ " pool handle 0x%" PRIx64 "\n",
+ odp_pool_to_u64(entry->s.pool));
+ len += snprintf(&str[len], n - len,
" state %s\n",
entry->s.state == PKTIO_STATE_STARTED ? "start" :
(entry->s.state == PKTIO_STATE_STOPPED ? "stop" :
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index e1964f840..d31f210e7 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -54,6 +54,14 @@ ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_SIZE < 0xffff,
typedef struct pool_local_t {
pool_cache_t *cache[ODP_CONFIG_POOLS];
int thr_id;
+
+ /* Number of event allocs and frees by this thread. */
+ struct {
+ uint64_t num_alloc;
+ uint64_t num_free;
+
+ } stat[ODP_CONFIG_POOLS];
+
} pool_local_t;
pool_global_t *_odp_pool_glb;
@@ -84,15 +92,12 @@ static inline pool_t *pool_from_buf(odp_buffer_t buf)
static inline void cache_init(pool_cache_t *cache)
{
memset(cache, 0, sizeof(pool_cache_t));
-
- cache->size = _odp_pool_glb->config.local_cache_size;
- cache->burst_size = _odp_pool_glb->config.burst_size;
}
static inline uint32_t cache_pop(pool_cache_t *cache,
odp_buffer_hdr_t *buf_hdr[], int max_num)
{
- uint32_t cache_num = cache->num;
+ uint32_t cache_num = cache->cache_num;
uint32_t num_ch = max_num;
uint32_t cache_begin;
uint32_t i;
@@ -106,7 +111,7 @@ static inline uint32_t cache_pop(pool_cache_t *cache,
for (i = 0; i < num_ch; i++)
buf_hdr[i] = cache->buf_hdr[cache_begin + i];
- cache->num = cache_num - num_ch;
+ cache->cache_num = cache_num - num_ch;
return num_ch;
}
@@ -114,13 +119,13 @@ static inline uint32_t cache_pop(pool_cache_t *cache,
static inline void cache_push(pool_cache_t *cache, odp_buffer_hdr_t *buf_hdr[],
uint32_t num)
{
- uint32_t cache_num = cache->num;
+ uint32_t cache_num = cache->cache_num;
uint32_t i;
for (i = 0; i < num; i++)
cache->buf_hdr[cache_num + i] = buf_hdr[i];
- cache->num = cache_num + num;
+ cache->cache_num = cache_num + num;
}
static void cache_flush(pool_cache_t *cache, pool_t *pool)
@@ -336,6 +341,18 @@ int _odp_pool_term_local(void)
pool_t *pool = pool_entry(i);
cache_flush(local.cache[i], pool);
+
+ if (ODP_DEBUG == 1) {
+ uint64_t num_alloc = local.stat[i].num_alloc;
+ uint64_t num_free = local.stat[i].num_free;
+
+ if (num_alloc || num_free) {
+ ODP_DBG("Pool[%i] stats: thr %i, "
+ "allocs % " PRIu64 ", "
+ "frees % " PRIu64 "\n",
+ i, local.thr_id, num_alloc, num_free);
+ }
+ }
}
return 0;
@@ -486,19 +503,20 @@ static bool shm_is_from_huge_pages(odp_shm_t shm)
return (info.page_size >= huge_page_size);
}
-static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
+static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t shmflags)
{
pool_t *pool;
uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
uint32_t seg_len, align, num, hdr_size, block_size;
- uint32_t max_len;
+ uint32_t max_len, cache_size, burst_size;
uint32_t ring_size;
uint32_t num_extra = 0;
- int name_len;
- const char *postfix = "_uarea";
- char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)];
+ const char *max_prefix = "pool_000_uarea_";
+ int max_prefix_len = strlen(max_prefix);
+ char shm_name[ODP_POOL_NAME_LEN + max_prefix_len];
+ char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
align = 0;
@@ -533,11 +551,13 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
seg_len = 0;
max_len = 0;
uarea_size = 0;
+ cache_size = 0;
switch (params->type) {
case ODP_POOL_BUFFER:
num = params->buf.num;
seg_len = params->buf.size;
+ cache_size = params->buf.cache_size;
break;
case ODP_POOL_PACKET:
@@ -572,10 +592,12 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
tailroom = CONFIG_PACKET_TAILROOM;
num = params->pkt.num;
uarea_size = params->pkt.uarea_size;
+ cache_size = params->pkt.cache_size;
break;
case ODP_POOL_TIMEOUT:
num = params->tmo.num;
+ cache_size = params->tmo.cache_size;
break;
default:
@@ -601,9 +623,9 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
pool->name[ODP_POOL_NAME_LEN - 1] = 0;
}
- name_len = strlen(pool->name);
- memcpy(uarea_name, pool->name, name_len);
- strcpy(&uarea_name[name_len], postfix);
+ /* Format SHM names from prefix, pool index and pool name. */
+ sprintf(shm_name, "pool_%03i_%s", pool->pool_idx, pool->name);
+ sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
pool->params = *params;
pool->block_offset = 0;
@@ -671,7 +693,21 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
pool->ext_desc = NULL;
pool->ext_destroy = NULL;
- shm = odp_shm_reserve(pool->name, pool->shm_size, ODP_PAGE_SIZE,
+ pool->cache_size = 0;
+ pool->burst_size = 1;
+
+ if (cache_size > 1) {
+ cache_size = (cache_size / 2) * 2;
+ burst_size = _odp_pool_glb->config.burst_size;
+
+ if ((cache_size / burst_size) < 2)
+ burst_size = cache_size / 2;
+
+ pool->cache_size = cache_size;
+ pool->burst_size = burst_size;
+ }
+
+ shm = odp_shm_reserve(shm_name, pool->shm_size, ODP_PAGE_SIZE,
shmflags);
pool->shm = shm;
@@ -724,24 +760,23 @@ error:
return ODP_POOL_INVALID;
}
-static int check_params(odp_pool_param_t *params)
+static int check_params(const odp_pool_param_t *params)
{
odp_pool_capability_t capa;
- odp_bool_t cache_warning = false;
- uint32_t cache_size = _odp_pool_glb->config.local_cache_size;
+ uint32_t cache_size, num;
int num_threads = odp_global_ro.init_param.num_control +
odp_global_ro.init_param.num_worker;
if (!params || odp_pool_capability(&capa) < 0)
return -1;
- if (num_threads)
- cache_size = num_threads * cache_size;
+ num = 0;
+ cache_size = 0;
switch (params->type) {
case ODP_POOL_BUFFER:
- if (params->buf.num <= cache_size)
- cache_warning = true;
+ num = params->buf.num;
+ cache_size = params->buf.cache_size;
if (params->buf.num > capa.buf.max_num) {
ODP_ERR("buf.num too large %u\n", params->buf.num);
@@ -761,8 +796,8 @@ static int check_params(odp_pool_param_t *params)
break;
case ODP_POOL_PACKET:
- if (params->pkt.num <= cache_size)
- cache_warning = true;
+ num = params->pkt.num;
+ cache_size = params->pkt.cache_size;
if (params->pkt.num > capa.pkt.max_num) {
ODP_ERR("pkt.num too large %u\n", params->pkt.num);
@@ -807,8 +842,8 @@ static int check_params(odp_pool_param_t *params)
break;
case ODP_POOL_TIMEOUT:
- if (params->tmo.num <= cache_size)
- cache_warning = true;
+ num = params->tmo.num;
+ cache_size = params->tmo.cache_size;
if (params->tmo.num > capa.tmo.max_num) {
ODP_ERR("tmo.num too large %u\n", params->tmo.num);
@@ -821,7 +856,12 @@ static int check_params(odp_pool_param_t *params)
return -1;
}
- if (cache_warning)
+ if (cache_size > CONFIG_POOL_CACHE_MAX_SIZE) {
+ ODP_ERR("Too large cache size %u\n", cache_size);
+ return -1;
+ }
+
+ if (num <= (num_threads * cache_size))
ODP_DBG("Entire pool fits into thread local caches. Pool "
"starvation may occur if the pool is used by multiple "
"threads.\n");
@@ -829,7 +869,7 @@ static int check_params(odp_pool_param_t *params)
return 0;
}
-odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
{
uint32_t shm_flags = 0;
@@ -935,12 +975,13 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
{
- pool_cache_t *cache = local.cache[pool->pool_idx];
+ uint32_t pool_idx = pool->pool_idx;
+ pool_cache_t *cache = local.cache[pool_idx];
ring_ptr_t *ring;
odp_buffer_hdr_t *hdr;
- uint32_t mask, num_ch, i;
+ uint32_t mask, num_ch, num_alloc, i;
uint32_t num_deq = 0;
- uint32_t burst_size = cache->burst_size;
+ uint32_t burst_size = pool->burst_size;
/* First pull packets from local cache */
num_ch = cache_pop(cache, buf_hdr, max_num);
@@ -980,16 +1021,25 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_hdr_t *buf_hdr[], int max_num)
cache_push(cache, &hdr_tmp[num_deq], cache_num);
}
- return num_ch + num_deq;
+ num_alloc = num_ch + num_deq;
+
+ if (ODP_DEBUG == 1)
+ local.stat[pool_idx].num_alloc += num_alloc;
+
+ return num_alloc;
}
static inline void buffer_free_to_pool(pool_t *pool,
odp_buffer_hdr_t *buf_hdr[], int num)
{
- pool_cache_t *cache = local.cache[pool->pool_idx];
+ uint32_t pool_idx = pool->pool_idx;
+ pool_cache_t *cache = local.cache[pool_idx];
ring_ptr_t *ring;
uint32_t cache_num, mask;
- uint32_t cache_size = cache->size;
+ uint32_t cache_size = pool->cache_size;
+
+ if (ODP_DEBUG == 1)
+ local.stat[pool_idx].num_free += num;
/* Special case of a very large free. Move directly to
* the global pool. */
@@ -1004,10 +1054,10 @@ static inline void buffer_free_to_pool(pool_t *pool,
/* Make room into local cache if needed. Do at least burst size
* transfer. */
- cache_num = cache->num;
+ cache_num = cache->cache_num;
if (odp_unlikely((int)(cache_size - cache_num) < num)) {
- int burst = cache->burst_size;
+ int burst = pool->burst_size;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
@@ -1110,6 +1160,8 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.min_cache_size = 0;
+ capa->buf.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
/* Packet pools */
capa->pkt.max_pools = max_pools;
@@ -1123,10 +1175,14 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->pkt.min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
capa->pkt.max_seg_len = max_seg_len;
capa->pkt.max_uarea_size = MAX_SIZE;
+ capa->pkt.min_cache_size = 0;
+ capa->pkt.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+ capa->tmo.min_cache_size = 0;
+ capa->tmo.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
return 0;
}
@@ -1163,6 +1219,8 @@ void odp_pool_print(odp_pool_t pool_hdl)
ODP_PRINT(" base addr %p\n", pool->base_addr);
ODP_PRINT(" uarea shm size %" PRIu64 "\n", pool->uarea_shm_size);
ODP_PRINT(" uarea base addr %p\n", pool->uarea_base_addr);
+ ODP_PRINT(" cache size %u\n", pool->cache_size);
+ ODP_PRINT(" burst size %u\n", pool->burst_size);
ODP_PRINT("\n");
}
@@ -1175,8 +1233,13 @@ odp_pool_t odp_buffer_pool(odp_buffer_t buf)
void odp_pool_param_init(odp_pool_param_t *params)
{
+ uint32_t default_cache_size = _odp_pool_glb->config.local_cache_size;
+
memset(params, 0, sizeof(odp_pool_param_t));
params->pkt.headroom = CONFIG_PACKET_HEADROOM;
+ params->buf.cache_size = default_cache_size;
+ params->pkt.cache_size = default_cache_size;
+ params->tmo.cache_size = default_cache_size;
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 9908dd1c7..d222cca1d 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -5,6 +5,14 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in prio_queue_t.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
#include <odp/api/schedule.h>
#include <odp_schedule_if.h>
#include <odp/api/align.h>
@@ -154,7 +162,7 @@ typedef struct ODP_ALIGNED_CACHE {
ring_u32_t ring;
/* Ring data: queue indexes */
- uint32_t queue_index[MAX_RING_SIZE];
+ uint32_t queue_index[MAX_RING_SIZE]; /* overlaps with ring.data[] */
} prio_queue_t;
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index e993f2963..39fa498e8 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -87,6 +87,7 @@ static int thread_state_init(int tidx)
ts->rvec_free = (1ULL << TS_RVEC_SIZE) - 1;
ts->num_schedq = 0;
ts->sg_sem = 1; /* Start with sched group semaphore changed */
+ ts->loop_cnt = 0;
memset(ts->sg_actual, 0, sizeof(ts->sg_actual));
for (i = 0; i < TS_RVEC_SIZE; i++) {
ts->rvec[i].rvec_free = &ts->rvec_free;
@@ -731,7 +732,6 @@ static void pktio_start(int pktio_idx,
static void pktio_stop(sched_elem_t *elem)
{
- elem->cons_type &= ~FLAG_PKTIN; /* Clear pktin queue flag */
sched_pktin_rem(elem->sched_grp);
if (__atomic_sub_fetch(&global->poll_count[elem->pktio_idx],
1, __ATOMIC_RELAXED) == 0) {
@@ -874,9 +874,9 @@ events_dequeued:
static int _schedule(odp_queue_t *from, odp_event_t ev[], int num_evts)
{
sched_scalable_thread_state_t *ts;
- sched_elem_t *first;
sched_elem_t *atomq;
int num;
+ int cpu_id;
uint32_t i;
ts = sched_ts;
@@ -946,10 +946,13 @@ dequeue_atomic:
update_sg_membership(ts);
}
+ cpu_id = odp_cpu_id();
/* Scan our schedq list from beginning to end */
- for (i = 0, first = NULL; i < ts->num_schedq; i++, first = NULL) {
+ for (i = 0; i < ts->num_schedq; i++) {
sched_queue_t *schedq = ts->schedq_list[i];
sched_elem_t *elem;
+
+ ts->loop_cnt++;
restart_same:
elem = schedq_peek(schedq);
if (odp_unlikely(elem == NULL)) {
@@ -958,10 +961,12 @@ restart_same:
}
if (is_pktin(elem)) {
/* Pktio ingress queue */
- if (first == NULL)
- first = elem;
- else if (elem == first) /* Wrapped around */
- continue; /* Go to next schedq */
+ if (elem->schedq != schedq) { /* Low priority schedq*/
+ if (elem->loop_check[cpu_id] != ts->loop_cnt)
+ elem->loop_check[cpu_id] = ts->loop_cnt;
+ else /* Wrapped around */
+ continue; /* Go to next schedq */
+ }
if (odp_unlikely(!schedq_cond_pop(schedq, elem)))
goto restart_same;
@@ -1822,8 +1827,7 @@ static int schedule_init_global(void)
sizeof(sched_queue_t);
max_alloc = min_alloc;
pool = _odp_ishm_pool_create("sched_shm_pool", pool_size,
- min_alloc, max_alloc,
- _ODP_ISHM_SINGLE_VA);
+ min_alloc, max_alloc, 0);
if (pool == NULL) {
ODP_ERR("Failed to allocate shared memory pool "
"for sched\n");
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index b65dc6cb8..c0eb4a419 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -5,6 +5,14 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in prio_queue_t.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
#include <odp/api/ticketlock.h>
#include <odp/api/thread.h>
#include <odp/api/plat/thread_inlines.h>
@@ -77,7 +85,7 @@ typedef struct ODP_ALIGNED_CACHE {
ring_u32_t ring;
/* Ring data: queue indexes */
- uint32_t ring_idx[RING_SIZE];
+ uint32_t ring_idx[RING_SIZE]; /* overlaps with ring.data[] */
} prio_queue_t;
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index 0acfa9c76..de9cbfb73 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -604,7 +604,7 @@ static uint64_t tm_max_time_delta(uint64_t rate)
return (1ULL << (26 + 30)) / rate;
}
-static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
+static void tm_shaper_params_cvt_to(const odp_tm_shaper_params_t *shaper_params,
tm_shaper_params_t *tm_shaper_params)
{
uint64_t commit_rate, peak_rate, max_commit_time_delta, highest_rate;
@@ -612,8 +612,8 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
uint32_t min_time_delta;
int64_t commit_burst, peak_burst;
- commit_rate = tm_bps_to_rate(odp_shaper_params->commit_bps);
- if ((odp_shaper_params->commit_bps == 0) || (commit_rate == 0)) {
+ commit_rate = tm_bps_to_rate(shaper_params->commit_bps);
+ if ((shaper_params->commit_bps == 0) || (commit_rate == 0)) {
tm_shaper_params->max_commit_time_delta = 0;
tm_shaper_params->max_peak_time_delta = 0;
tm_shaper_params->commit_rate = 0;
@@ -628,17 +628,17 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
}
max_commit_time_delta = tm_max_time_delta(commit_rate);
- commit_burst = (int64_t)odp_shaper_params->commit_burst;
+ commit_burst = (int64_t)shaper_params->commit_burst;
- peak_rate = tm_bps_to_rate(odp_shaper_params->peak_bps);
- if ((odp_shaper_params->peak_bps == 0) || (peak_rate == 0)) {
+ peak_rate = tm_bps_to_rate(shaper_params->peak_bps);
+ if ((shaper_params->peak_bps == 0) || (peak_rate == 0)) {
peak_rate = 0;
max_peak_time_delta = 0;
peak_burst = 0;
min_time_delta = (uint32_t)((1 << 26) / commit_rate);
} else {
max_peak_time_delta = tm_max_time_delta(peak_rate);
- peak_burst = (int64_t)odp_shaper_params->peak_burst;
+ peak_burst = (int64_t)shaper_params->peak_burst;
highest_rate = MAX(commit_rate, peak_rate);
min_time_delta = (uint32_t)((1 << 26) / highest_rate);
}
@@ -651,8 +651,8 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
tm_shaper_params->max_commit = commit_burst << (26 - 3);
tm_shaper_params->max_peak = peak_burst << (26 - 3);
tm_shaper_params->min_time_delta = min_time_delta;
- tm_shaper_params->len_adjust = odp_shaper_params->shaper_len_adjust;
- tm_shaper_params->dual_rate = odp_shaper_params->dual_rate;
+ tm_shaper_params->len_adjust = shaper_params->shaper_len_adjust;
+ tm_shaper_params->dual_rate = shaper_params->dual_rate;
tm_shaper_params->enabled = 1;
}
@@ -3212,7 +3212,7 @@ void odp_tm_shaper_params_init(odp_tm_shaper_params_t *params)
}
odp_tm_shaper_t odp_tm_shaper_create(const char *name,
- odp_tm_shaper_params_t *params)
+ const odp_tm_shaper_params_t *params)
{
tm_shaper_params_t *profile_obj;
odp_tm_shaper_t shaper_handle;
@@ -3264,7 +3264,7 @@ int odp_tm_shaper_params_read(odp_tm_shaper_t shaper_profile,
}
int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile,
- odp_tm_shaper_params_t *params)
+ const odp_tm_shaper_params_t *params)
{
tm_shaper_params_t *profile_obj;
@@ -3301,15 +3301,15 @@ void odp_tm_sched_params_init(odp_tm_sched_params_t *params)
memset(params, 0, sizeof(odp_tm_sched_params_t));
}
-static void tm_sched_params_cvt_to(odp_tm_sched_params_t *odp_sched_params,
+static void tm_sched_params_cvt_to(const odp_tm_sched_params_t *sched_params,
tm_sched_params_t *tm_sched_params)
{
odp_tm_sched_mode_t sched_mode;
uint32_t priority, weight, inv_weight;
for (priority = 0; priority < ODP_TM_MAX_PRIORITIES; priority++) {
- sched_mode = odp_sched_params->sched_modes[priority];
- weight = odp_sched_params->sched_weights[priority];
+ sched_mode = sched_params->sched_modes[priority];
+ weight = sched_params->sched_weights[priority];
if (weight == 0)
inv_weight = 0;
else
@@ -3337,7 +3337,7 @@ static void tm_sched_params_cvt_from(tm_sched_params_t *tm_sched_params,
}
odp_tm_sched_t odp_tm_sched_create(const char *name,
- odp_tm_sched_params_t *params)
+ const odp_tm_sched_params_t *params)
{
tm_sched_params_t *profile_obj;
_odp_int_name_t name_tbl_id;
@@ -3389,7 +3389,7 @@ int odp_tm_sched_params_read(odp_tm_sched_t sched_profile,
}
int odp_tm_sched_params_update(odp_tm_sched_t sched_profile,
- odp_tm_sched_params_t *params)
+ const odp_tm_sched_params_t *params)
{
tm_sched_params_t *profile_obj;
@@ -3427,7 +3427,8 @@ void odp_tm_threshold_params_init(odp_tm_threshold_params_t *params)
}
odp_tm_threshold_t odp_tm_threshold_create(const char *name,
- odp_tm_threshold_params_t *params)
+ const odp_tm_threshold_params_t
+ *params)
{
tm_queue_thresholds_t *profile_obj;
odp_tm_threshold_t threshold_handle;
@@ -3486,7 +3487,7 @@ int odp_tm_thresholds_params_read(odp_tm_threshold_t threshold_profile,
}
int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile,
- odp_tm_threshold_params_t *params)
+ const odp_tm_threshold_params_t *params)
{
tm_queue_thresholds_t *profile_obj;
@@ -3530,15 +3531,15 @@ void odp_tm_wred_params_init(odp_tm_wred_params_t *params)
memset(params, 0, sizeof(odp_tm_wred_params_t));
}
-static void tm_wred_params_cvt_to(odp_tm_wred_params_t *odp_tm_wred_params,
+static void tm_wred_params_cvt_to(const odp_tm_wred_params_t *params,
tm_wred_params_t *wred_params)
{
- wred_params->min_threshold = odp_tm_wred_params->min_threshold;
- wred_params->med_threshold = odp_tm_wred_params->med_threshold;
- wred_params->med_drop_prob = odp_tm_wred_params->med_drop_prob;
- wred_params->max_drop_prob = odp_tm_wred_params->max_drop_prob;
- wred_params->enable_wred = odp_tm_wred_params->enable_wred;
- wred_params->use_byte_fullness = odp_tm_wred_params->use_byte_fullness;
+ wred_params->min_threshold = params->min_threshold;
+ wred_params->med_threshold = params->med_threshold;
+ wred_params->med_drop_prob = params->med_drop_prob;
+ wred_params->max_drop_prob = params->max_drop_prob;
+ wred_params->enable_wred = params->enable_wred;
+ wred_params->use_byte_fullness = params->use_byte_fullness;
}
static void tm_wred_params_cvt_from(tm_wred_params_t *wred_params,
@@ -3552,7 +3553,8 @@ static void tm_wred_params_cvt_from(tm_wred_params_t *wred_params,
odp_tm_wred_params->use_byte_fullness = wred_params->use_byte_fullness;
}
-odp_tm_wred_t odp_tm_wred_create(const char *name, odp_tm_wred_params_t *params)
+odp_tm_wred_t odp_tm_wred_create(const char *name,
+ const odp_tm_wred_params_t *params)
{
tm_wred_params_t *profile_obj;
odp_tm_wred_t wred_handle;
@@ -3605,7 +3607,7 @@ int odp_tm_wred_params_read(odp_tm_wred_t wred_profile,
}
int odp_tm_wred_params_update(odp_tm_wred_t wred_profile,
- odp_tm_wred_params_t *params)
+ const odp_tm_wred_params_t *params)
{
tm_wred_params_t *wred_params;
@@ -3642,9 +3644,8 @@ void odp_tm_node_params_init(odp_tm_node_params_t *params)
memset(params, 0, sizeof(odp_tm_node_params_t));
}
-odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
- const char *name,
- odp_tm_node_params_t *params)
+odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, const char *name,
+ const odp_tm_node_params_t *params)
{
odp_tm_level_requirements_t *requirements;
_odp_int_sorted_list_t sorted_list;
@@ -3938,7 +3939,7 @@ void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
}
odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
- odp_tm_queue_params_t *params)
+ const odp_tm_queue_params_t *params)
{
_odp_int_pkt_queue_t _odp_int_pkt_queue;
tm_queue_obj_t *queue_obj;
diff --git a/platform/linux-generic/pktio/stats/ethtool_stats.c b/platform/linux-generic/pktio/stats/ethtool_stats.c
index 1ac3f6db2..f4f03c162 100644
--- a/platform/linux-generic/pktio/stats/ethtool_stats.c
+++ b/platform/linux-generic/pktio/stats/ethtool_stats.c
@@ -18,11 +18,19 @@
#include <odp_debug_internal.h>
#include <odp_errno_define.h>
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in sset_info.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
{
struct {
struct ethtool_sset_info hdr;
- uint32_t buf[1];
+ uint32_t buf[1]; /* overlaps with hdr.data[] */
} sset_info;
struct ethtool_drvinfo drvinfo;
uint32_t len;
diff --git a/platform/linux-generic/test/pktio_ipc/ipc_common.c b/platform/linux-generic/test/pktio_ipc/ipc_common.c
index f2f73a5a1..b064177bc 100644
--- a/platform/linux-generic/test/pktio_ipc/ipc_common.c
+++ b/platform/linux-generic/test/pktio_ipc/ipc_common.c
@@ -139,10 +139,8 @@ void print_info(char *progname)
{
odp_sys_info_print();
- printf("Running ODP appl: \"%s\"\n"
- "-----------------\n"
- "Using IF: %s\n",
- progname, pktio_name);
+ printf("Running ODP appl: \"%s\"\n",
+ progname);
printf("\n\n");
fflush(NULL);
}
diff --git a/platform/linux-generic/test/pktio_ipc/ipc_common.h b/platform/linux-generic/test/pktio_ipc/ipc_common.h
index d8e7a2f4e..64ce64325 100644
--- a/platform/linux-generic/test/pktio_ipc/ipc_common.h
+++ b/platform/linux-generic/test/pktio_ipc/ipc_common.h
@@ -64,14 +64,11 @@ typedef struct ODP_PACKED {
odp_u32be_t magic;
} pkt_tail_t;
-/** Application argument */
-char *pktio_name;
-
/** Run time in seconds */
-int run_time_sec;
+extern int run_time_sec;
/** PID of the master process */
-int master_pid;
+extern int master_pid;
/* helper funcs */
void parse_args(int argc, char *argv[]);
diff --git a/scripts/git-transplant.py b/scripts/git-transplant.py
index 4de62c3c5..69bcf72b5 100755
--- a/scripts/git-transplant.py
+++ b/scripts/git-transplant.py
@@ -78,7 +78,8 @@ for dirname, dirnames, filenames in os.walk(overlay_dir):
wholefilechanges = "git log --oneline --ancestry-path --name-status " + \
interval + " " + orig_dir + " |grep \"^A\""
try:
- output = subprocess.check_output([wholefilechanges], shell=True)
+ output = subprocess.check_output([wholefilechanges], shell=True,
+ encoding='utf8')
except subprocess.CalledProcessError:
output = ""
pass
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 4bacc5309..d899aa743 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -23,10 +23,10 @@
/* Maximum number of worker threads */
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
-/* Size of the shared memory block */
-#define POOL_PKT_NUM (16 * 1024)
+/* Default number of packets per pool */
+#define DEFAULT_NUM_PKT (16 * 1024)
-/* Buffer size of the packet pool buffer */
+/* Packet length to pool create */
#define POOL_PKT_LEN 1536
/* Maximum number of packet in a burst */
@@ -70,7 +70,8 @@ static inline int sched_mode(pktin_mode_t in_mode)
* Parsed command line application arguments
*/
typedef struct {
- int extra_check; /* Some extra checks have been enabled */
+ /* Some extra features (e.g. error checks) have been enabled */
+ int extra_feat;
unsigned int cpu_count;
int if_count; /* Number of interfaces to be used */
int addr_count; /* Number of dst addresses to be used */
@@ -85,10 +86,13 @@ typedef struct {
int dst_change; /* Change destination eth addresses */
int src_change; /* Change source eth addresses */
int error_check; /* Check packet errors */
+ int packet_copy; /* Packet copy */
int chksum; /* Checksum offload */
int sched_mode; /* Scheduler mode */
int num_groups; /* Number of scheduling groups */
int burst_rx; /* Receive burst size */
+ int pool_per_if; /* Create pool per interface */
+ uint32_t num_pkt; /* Number of packets per pool */
int verbose; /* Verbose output */
} appl_args_t;
@@ -101,6 +105,8 @@ typedef union ODP_ALIGNED_CACHE {
uint64_t rx_drops;
/* Packets dropped due to transmit error */
uint64_t tx_drops;
+ /* Number of failed packet copies */
+ uint64_t copy_fails;
} s;
uint8_t padding[ODP_CACHE_LINE_SIZE];
@@ -280,6 +286,28 @@ static inline void chksum_insert(odp_packet_t *pkt_tbl, int pkts)
}
}
+static inline int copy_packets(odp_packet_t *pkt_tbl, int pkts)
+{
+ odp_packet_t old_pkt, new_pkt;
+ odp_pool_t pool;
+ int i;
+ int copy_fails = 0;
+
+ for (i = 0; i < pkts; i++) {
+ old_pkt = pkt_tbl[i];
+ pool = odp_packet_pool(old_pkt);
+ new_pkt = odp_packet_copy(old_pkt, pool);
+ if (odp_likely(new_pkt != ODP_PACKET_INVALID)) {
+ pkt_tbl[i] = new_pkt;
+ odp_packet_free(old_pkt);
+ } else {
+ copy_fails++;
+ }
+ }
+
+ return copy_fails;
+}
+
/*
* Packet IO worker thread using scheduled queues
*
@@ -353,7 +381,14 @@ static int run_worker_sched_mode(void *arg)
odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts);
- if (odp_unlikely(gbl_args->appl.extra_check)) {
+ if (odp_unlikely(gbl_args->appl.extra_feat)) {
+ if (gbl_args->appl.packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
if (gbl_args->appl.chksum)
chksum_insert(pkt_tbl, pkts);
@@ -480,7 +515,14 @@ static int run_worker_plain_queue_mode(void *arg)
odp_packet_from_event_multi(pkt_tbl, event, pkts);
- if (odp_unlikely(gbl_args->appl.extra_check)) {
+ if (odp_unlikely(gbl_args->appl.extra_feat)) {
+ if (gbl_args->appl.packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
if (gbl_args->appl.chksum)
chksum_insert(pkt_tbl, pkts);
@@ -605,7 +647,14 @@ static int run_worker_direct_mode(void *arg)
if (odp_unlikely(pkts <= 0))
continue;
- if (odp_unlikely(gbl_args->appl.extra_check)) {
+ if (odp_unlikely(gbl_args->appl.extra_feat)) {
+ if (gbl_args->appl.packet_copy) {
+ int fails;
+
+ fails = copy_packets(pkt_tbl, pkts);
+ stats->s.copy_fails += fails;
+ }
+
if (gbl_args->appl.chksum)
chksum_insert(pkt_tbl, pkts);
@@ -711,9 +760,10 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
}
odp_pktio_config_init(&config);
- config.parser.layer = gbl_args->appl.extra_check ?
- ODP_PROTO_LAYER_ALL :
- ODP_PROTO_LAYER_NONE;
+
+ config.parser.layer = ODP_PROTO_LAYER_NONE;
+ if (gbl_args->appl.error_check || gbl_args->appl.chksum)
+ config.parser.layer = ODP_PROTO_LAYER_ALL;
if (gbl_args->appl.chksum) {
printf("Checksum offload enabled\n");
@@ -833,7 +883,7 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
uint64_t pkts = 0;
uint64_t pkts_prev = 0;
uint64_t pps;
- uint64_t rx_drops, tx_drops;
+ uint64_t rx_drops, tx_drops, copy_fails;
uint64_t maximum_pps = 0;
int i;
int elapsed = 0;
@@ -851,6 +901,7 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
pkts = 0;
rx_drops = 0;
tx_drops = 0;
+ copy_fails = 0;
sleep(timeout);
@@ -858,6 +909,7 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
pkts += thr_stats[i]->s.packets;
rx_drops += thr_stats[i]->s.rx_drops;
tx_drops += thr_stats[i]->s.tx_drops;
+ copy_fails += thr_stats[i]->s.copy_fails;
}
if (stats_enabled) {
pps = (pkts - pkts_prev) / timeout;
@@ -866,7 +918,10 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
printf("%" PRIu64 " pps, %" PRIu64 " max pps, ", pps,
maximum_pps);
- printf(" %" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
+ if (gbl_args->appl.packet_copy)
+ printf("%" PRIu64 " copy fails, ", copy_fails);
+
+ printf("%" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
rx_drops, tx_drops);
pkts_prev = pkts;
@@ -1150,6 +1205,13 @@ static void usage(char *progname)
" num: must not exceed number of interfaces or workers\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
+ " -p, --packet_copy 0: Don't copy packet (default)\n"
+ " 1: Create and send copy of the received packet.\n"
+ " Free the original packet.\n"
+ " -y, --pool_per_if 0: Share a single pool between all interfaces (default)\n"
+ " 1: Create a pool per interface\n"
+ " -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
+ " the maximum capability. Use 0 for the default.\n"
" -v, --verbose Verbose output.\n"
" -h, --help Display help and exit.\n\n"
"\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
@@ -1185,12 +1247,15 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"chksum", required_argument, NULL, 'k'},
{"groups", required_argument, NULL, 'g'},
{"burst_rx", required_argument, NULL, 'b'},
+ {"packet_copy", required_argument, NULL, 'p'},
+ {"pool_per_if", required_argument, NULL, 'y'},
+ {"num_pkt", required_argument, NULL, 'n'},
{"verbose", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:vh";
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:p:y:n:vh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1199,9 +1264,12 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->src_change = 1; /* change eth src address by default */
appl_args->num_groups = 0; /* use default group */
appl_args->error_check = 0; /* don't check packet errors by default */
+ appl_args->packet_copy = 0;
appl_args->burst_rx = 0;
appl_args->verbose = 0;
appl_args->chksum = 0; /* don't use checksum offload by default */
+ appl_args->pool_per_if = 0;
+ appl_args->num_pkt = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1333,6 +1401,15 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'b':
appl_args->burst_rx = atoi(optarg);
break;
+ case 'p':
+ appl_args->packet_copy = atoi(optarg);
+ break;
+ case 'y':
+ appl_args->pool_per_if = atoi(optarg);
+ break;
+ case 'n':
+ appl_args->num_pkt = atoi(optarg);
+ break;
case 'v':
appl_args->verbose = 1;
break;
@@ -1366,7 +1443,10 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
if (appl_args->burst_rx == 0)
appl_args->burst_rx = MAX_PKT_BURST;
- appl_args->extra_check = appl_args->error_check || appl_args->chksum;
+ appl_args->extra_feat = 0;
+ if (appl_args->error_check || appl_args->chksum ||
+ appl_args->packet_copy)
+ appl_args->extra_feat = 1;
optind = 1; /* reset 'extern optind' from the getopt lib */
}
@@ -1374,21 +1454,22 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
/*
* Print system and application info
*/
-static void print_info(char *progname, appl_args_t *appl_args)
+static void print_info(appl_args_t *appl_args)
{
int i;
odp_sys_info_print();
- printf("Running ODP appl: \"%s\"\n"
+ printf("\n"
+ "odp_l2fwd options\n"
"-----------------\n"
- "IF-count: %i\n"
- "Using IFs: ",
- progname, appl_args->if_count);
+ "IF-count: %i\n"
+ "Using IFs: ", appl_args->if_count);
+
for (i = 0; i < appl_args->if_count; ++i)
printf(" %s", appl_args->if_names[i]);
printf("\n"
- "Mode: ");
+ "Mode: ");
if (appl_args->in_mode == DIRECT_RECV)
printf("PKTIN_DIRECT, ");
else if (appl_args->in_mode == PLAIN_QUEUE)
@@ -1405,10 +1486,16 @@ static void print_info(char *progname, appl_args_t *appl_args)
else
printf("PKTOUT_DIRECT\n");
- printf("Burst size: %i\n", appl_args->burst_rx);
+ printf("Burst size: %i\n", appl_args->burst_rx);
+ printf("Number of pools: %i\n", appl_args->pool_per_if ?
+ appl_args->if_count : 1);
- printf("\n");
- fflush(NULL);
+ if (appl_args->extra_feat) {
+ printf("Extra features: %s%s%s\n",
+ appl_args->error_check ? "error_check " : "",
+ appl_args->chksum ? "chksum " : "",
+ appl_args->packet_copy ? "packet_copy" : "");
+ }
}
static void gbl_args_init(args_t *args)
@@ -1451,7 +1538,6 @@ int main(int argc, char *argv[])
odph_helper_options_t helper_options;
odph_thread_param_t thr_param[MAX_WORKERS];
odph_thread_common_param_t thr_common;
- odp_pool_t pool;
int i;
int num_workers, num_thr;
odp_shm_t shm;
@@ -1461,14 +1547,16 @@ int main(int argc, char *argv[])
odp_pool_param_t params;
int ret;
stats_t *stats[MAX_WORKERS];
- int if_count;
+ int if_count, num_pools;
int (*thr_run_func)(void *);
odp_instance_t instance;
int num_groups;
odp_schedule_group_t group[MAX_PKTIOS];
+ odp_pool_t pool_tbl[MAX_PKTIOS];
+ odp_pool_t pool;
odp_init_t init;
odp_pool_capability_t pool_capa;
- uint32_t pkt_len, pkt_num;
+ uint32_t pkt_len, num_pkt;
/* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
@@ -1529,7 +1617,7 @@ int main(int argc, char *argv[])
gbl_args->appl.sched_mode = 1;
/* Print both system and application information */
- print_info(NO_PATH(argv[0]), &gbl_args->appl);
+ print_info(&gbl_args->appl);
num_workers = MAX_WORKERS;
if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
@@ -1548,49 +1636,80 @@ int main(int argc, char *argv[])
num_groups = gbl_args->appl.num_groups;
- printf("num worker threads: %i\n", num_workers);
- printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n", cpumaskstr);
+ printf("Num worker threads: %i\n", num_workers);
+ printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("CPU mask: %s\n", cpumaskstr);
if (num_groups)
printf("num groups: %i\n", num_groups);
- printf("\n");
-
if (num_groups > if_count || num_groups > num_workers) {
ODPH_ERR("Too many groups. Number of groups may not exceed "
"number of interfaces or workers.\n");
exit(EXIT_FAILURE);
}
+ num_pools = 1;
+ if (gbl_args->appl.pool_per_if)
+ num_pools = if_count;
+
if (odp_pool_capability(&pool_capa)) {
ODPH_ERR("Error: pool capability failed\n");
return -1;
}
+ if (num_pools > (int)pool_capa.pkt.max_pools) {
+ ODPH_ERR("Error: Too many pools %i\n", num_pools);
+ return -1;
+ }
+
pkt_len = POOL_PKT_LEN;
- pkt_num = POOL_PKT_NUM;
- if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len)
+ if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len) {
pkt_len = pool_capa.pkt.max_len;
+ printf("\nWarning: packet length reduced to %u\n\n", pkt_len);
+ }
+
+ /* zero means default number of packets */
+ if (gbl_args->appl.num_pkt == 0)
+ num_pkt = DEFAULT_NUM_PKT;
+ else
+ num_pkt = gbl_args->appl.num_pkt;
+
+ if (pool_capa.pkt.max_num && num_pkt > pool_capa.pkt.max_num) {
+ if (gbl_args->appl.num_pkt == 0) {
+ num_pkt = pool_capa.pkt.max_num;
+ printf("\nWarning: number of packets reduced to %u\n\n",
+ num_pkt);
+ } else {
+ ODPH_ERR("Error: Too many packets %u. Maximum is %u.\n",
+ num_pkt, pool_capa.pkt.max_num);
+ return -1;
+ }
+ }
- if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num)
- pkt_num = pool_capa.pkt.max_num;
+ printf("Packets per pool: %u\n", num_pkt);
+ printf("Packet length: %u\n", pkt_len);
+ printf("\n\n");
/* Create packet pool */
odp_pool_param_init(&params);
params.pkt.seg_len = pkt_len;
params.pkt.len = pkt_len;
- params.pkt.num = pkt_num;
+ params.pkt.num = num_pkt;
params.type = ODP_POOL_PACKET;
- pool = odp_pool_create("packet pool", &params);
+ for (i = 0; i < num_pools; i++) {
+ pool_tbl[i] = odp_pool_create("packet pool", &params);
- if (pool == ODP_POOL_INVALID) {
- ODPH_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
+ if (pool_tbl[i] == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: pool create failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
+
+ if (gbl_args->appl.verbose)
+ odp_pool_print(pool_tbl[i]);
}
- odp_pool_print(pool);
if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES)
ODPH_DBG("Warning: max pktio index (%u) is too large\n",
@@ -1608,6 +1727,8 @@ int main(int argc, char *argv[])
create_groups(num_groups, group);
}
+ pool = pool_tbl[0];
+
for (i = 0; i < if_count; ++i) {
const char *dev = gbl_args->appl.if_names[i];
int num_rx, num_tx;
@@ -1626,6 +1747,9 @@ int main(int argc, char *argv[])
/* Round robin pktios to groups */
grp = group[i % num_groups];
+ if (gbl_args->appl.pool_per_if)
+ pool = pool_tbl[i];
+
if (create_pktio(dev, i, num_rx, num_tx, pool, grp))
exit(EXIT_FAILURE);
@@ -1701,6 +1825,9 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
+ if (gbl_args->appl.verbose)
+ odp_shm_print_all();
+
/* Start packet receive and transmit */
for (i = 0; i < if_count; ++i) {
odp_pktio_t pktio;
@@ -1749,9 +1876,11 @@ int main(int argc, char *argv[])
gbl_args = NULL;
odp_mb_full();
- if (odp_pool_destroy(pool)) {
- ODPH_ERR("Error: pool destroy\n");
- exit(EXIT_FAILURE);
+ for (i = 0; i < num_pools; i++) {
+ if (odp_pool_destroy(pool_tbl[i])) {
+ ODPH_ERR("Error: pool destroy failed %i\n", i);
+ exit(EXIT_FAILURE);
+ }
}
if (odp_shm_free(shm)) {
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index 7cbeca586..97d1084b7 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -11,7 +11,7 @@
static odp_pool_t pkt_pool;
/** sequence number of IP packets */
-odp_atomic_u32_t seq;
+static odp_atomic_u32_t seq;
static cls_packet_info_t default_pkt_info;
static odp_cls_capability_t cls_capa;
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index b86bfe272..378e48c46 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -25,7 +25,7 @@ static int global_num_l2_qos;
#define NUM_COS_PMR 1
#define NUM_COS_COMPOSITE 1
/** sequence number of IP packets */
-odp_atomic_u32_t seq;
+static odp_atomic_u32_t seq;
/* default packet info */
static cls_packet_info_t default_pkt_info;
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index c4e937295..24b47d4fb 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -7,8 +7,11 @@
#include <odp_api.h>
#include "odp_cunit_common.h"
-#define PKT_LEN 400
-#define PKT_NUM 500
+#define BUF_SIZE 1500
+#define BUF_NUM 1000
+#define TMO_NUM 1000
+#define PKT_LEN 400
+#define PKT_NUM 500
#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
typedef struct {
@@ -20,8 +23,8 @@ typedef struct {
static global_shared_mem_t *global_mem;
-static const int default_buffer_size = 1500;
-static const int default_buffer_num = 1000;
+static odp_pool_capability_t global_pool_capa;
+static odp_pool_param_t default_pool_param;
static void pool_create_destroy(odp_pool_param_t *param)
{
@@ -41,9 +44,9 @@ static void pool_test_create_destroy_buffer(void)
odp_pool_param_init(&param);
param.type = ODP_POOL_BUFFER;
- param.buf.size = default_buffer_size;
+ param.buf.size = BUF_SIZE;
param.buf.align = ODP_CACHE_LINE_SIZE;
- param.buf.num = default_buffer_num;
+ param.buf.num = BUF_NUM;
pool_create_destroy(&param);
}
@@ -55,8 +58,8 @@ static void pool_test_create_destroy_packet(void)
odp_pool_param_init(&param);
param.type = ODP_POOL_PACKET;
- param.pkt.len = default_buffer_size;
- param.pkt.num = default_buffer_num;
+ param.pkt.len = PKT_LEN;
+ param.pkt.num = PKT_NUM;
pool_create_destroy(&param);
}
@@ -68,7 +71,7 @@ static void pool_test_create_destroy_timeout(void)
odp_pool_param_init(&param);
param.type = ODP_POOL_TIMEOUT;
- param.tmo.num = default_buffer_num;
+ param.tmo.num = TMO_NUM;
pool_create_destroy(&param);
}
@@ -83,9 +86,9 @@ static void pool_test_lookup_info_print(void)
odp_pool_param_init(&param);
param.type = ODP_POOL_BUFFER;
- param.buf.size = default_buffer_size;
+ param.buf.size = BUF_SIZE;
param.buf.align = ODP_CACHE_LINE_SIZE;
- param.buf.num = default_buffer_num;
+ param.buf.num = BUF_NUM;
pool = odp_pool_create(pool_name, &param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
@@ -105,7 +108,56 @@ static void pool_test_lookup_info_print(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void pool_test_alloc_packet(void)
+static void alloc_buffer(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ odp_buffer_t buf[BUF_NUM];
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = BUF_NUM;
+ param.buf.size = BUF_SIZE;
+ param.pkt.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ buf[num] = odp_buffer_alloc(pool);
+ CU_ASSERT(buf[num] != ODP_BUFFER_INVALID);
+
+ if (buf[num] != ODP_BUFFER_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_buffer_free(buf[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_buffer(void)
+{
+ alloc_buffer(default_pool_param.buf.cache_size);
+}
+
+static void pool_test_alloc_buffer_min_cache(void)
+{
+ alloc_buffer(global_pool_capa.buf.min_cache_size);
+}
+
+static void pool_test_alloc_buffer_max_cache(void)
+{
+ alloc_buffer(global_pool_capa.buf.max_cache_size);
+}
+
+static void alloc_packet(uint32_t cache_size)
{
odp_pool_t pool;
odp_pool_param_t param;
@@ -117,6 +169,7 @@ static void pool_test_alloc_packet(void)
param.type = ODP_POOL_PACKET;
param.pkt.num = PKT_NUM;
param.pkt.len = PKT_LEN;
+ param.pkt.cache_size = cache_size;
pool = odp_pool_create(NULL, &param);
@@ -138,6 +191,21 @@ static void pool_test_alloc_packet(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void pool_test_alloc_packet(void)
+{
+ alloc_packet(default_pool_param.pkt.cache_size);
+}
+
+static void pool_test_alloc_packet_min_cache(void)
+{
+ alloc_packet(global_pool_capa.pkt.min_cache_size);
+}
+
+static void pool_test_alloc_packet_max_cache(void)
+{
+ alloc_packet(global_pool_capa.pkt.max_cache_size);
+}
+
static void pool_test_alloc_packet_subparam(void)
{
odp_pool_t pool;
@@ -198,6 +266,54 @@ static void pool_test_alloc_packet_subparam(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void alloc_timeout(uint32_t cache_size)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ odp_timeout_t tmo[TMO_NUM];
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = TMO_NUM;
+ param.tmo.cache_size = cache_size;
+
+ pool = odp_pool_create(NULL, &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ num = 0;
+
+ for (i = 0; i < PKT_NUM; i++) {
+ tmo[num] = odp_timeout_alloc(pool);
+ CU_ASSERT(tmo[num] != ODP_TIMEOUT_INVALID);
+
+ if (tmo[num] != ODP_TIMEOUT_INVALID)
+ num++;
+ }
+
+ for (i = 0; i < num; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_alloc_timeout(void)
+{
+ alloc_timeout(default_pool_param.tmo.cache_size);
+}
+
+static void pool_test_alloc_timeout_min_cache(void)
+{
+ alloc_timeout(global_pool_capa.tmo.min_cache_size);
+}
+
+static void pool_test_alloc_timeout_max_cache(void)
+{
+ alloc_timeout(global_pool_capa.tmo.max_cache_size);
+}
+
static void pool_test_info_packet(void)
{
odp_pool_t pool;
@@ -510,9 +626,9 @@ static int run_pool_test_create_after_fork(void *arg ODP_UNUSED)
odp_pool_param_init(&param);
param.type = ODP_POOL_BUFFER;
- param.buf.size = default_buffer_size;
+ param.buf.size = BUF_SIZE;
param.buf.align = ODP_CACHE_LINE_SIZE;
- param.buf.num = default_buffer_num;
+ param.buf.num = BUF_NUM;
pool = odp_pool_create(NULL, &param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
@@ -521,8 +637,7 @@ static int run_pool_test_create_after_fork(void *arg ODP_UNUSED)
odp_barrier_wait(&global_mem->init_barrier);
- buffer_alloc_loop(global_mem->pool, default_buffer_num,
- default_buffer_size);
+ buffer_alloc_loop(global_mem->pool, BUF_NUM, BUF_SIZE);
return CU_get_number_of_failures();
}
@@ -554,8 +669,7 @@ static void pool_test_create_after_fork(void)
/* Wait until thread 0 has created the test pool */
odp_barrier_wait(&global_mem->init_barrier);
- buffer_alloc_loop(global_mem->pool, default_buffer_num,
- default_buffer_size);
+ buffer_alloc_loop(global_mem->pool, BUF_NUM, BUF_SIZE);
/* Wait for all thread endings */
CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
@@ -565,12 +679,35 @@ static void pool_test_create_after_fork(void)
CU_ASSERT(!odp_shm_free(shm));
}
+static int pool_suite_init(void)
+{
+ memset(&global_pool_capa, 0, sizeof(odp_pool_capability_t));
+ memset(&default_pool_param, 0, sizeof(odp_pool_param_t));
+
+ if (odp_pool_capability(&global_pool_capa) < 0) {
+ printf("pool_capability failed in suite init\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&default_pool_param);
+
+ return 0;
+}
+
odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_create_destroy_buffer),
ODP_TEST_INFO(pool_test_create_destroy_packet),
ODP_TEST_INFO(pool_test_create_destroy_timeout),
+ ODP_TEST_INFO(pool_test_alloc_buffer),
+ ODP_TEST_INFO(pool_test_alloc_buffer_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_buffer_max_cache),
ODP_TEST_INFO(pool_test_alloc_packet),
+ ODP_TEST_INFO(pool_test_alloc_packet_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_packet_max_cache),
ODP_TEST_INFO(pool_test_alloc_packet_subparam),
+ ODP_TEST_INFO(pool_test_alloc_timeout),
+ ODP_TEST_INFO(pool_test_alloc_timeout_min_cache),
+ ODP_TEST_INFO(pool_test_alloc_timeout_max_cache),
ODP_TEST_INFO(pool_test_info_packet),
ODP_TEST_INFO(pool_test_lookup_info_print),
ODP_TEST_INFO(pool_test_info_data_range),
@@ -585,6 +722,7 @@ odp_testinfo_t pool_suite[] = {
odp_suiteinfo_t pool_suites[] = {
{ .name = "Pool tests",
.testinfo_tbl = pool_suite,
+ .init_func = pool_suite_init,
},
ODP_SUITE_INFO_NULL,
};
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index 807ca5291..9b4ebd44c 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -468,7 +468,8 @@ static void test_pair(odp_nonblocking_t nonblocking,
return;
}
- if (capa.plain.lockfree.max_size < max_burst)
+ if (capa.plain.lockfree.max_size &&
+ capa.plain.lockfree.max_size < max_burst)
max_burst = capa.plain.lockfree.max_size;
} else {
if (capa.plain.max_size && capa.plain.max_size < max_burst)
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 50b12e736..5d7900eb6 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -129,7 +129,7 @@ static int run_test_basic_thread(void *arg ODP_UNUSED)
/*
* test basic things: shmem creation, info, share, and free
*/
-static void shmem_test_basic(void)
+static void shmem_test_multi_thread(void)
{
pthrd_arg thrdarg;
odp_shm_t shm;
@@ -212,10 +212,27 @@ static void shmem_test_basic(void)
CU_ASSERT(0 == odp_shm_free(shm));
}
+static void shmem_test_reserve(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ addr = odp_shm_addr(shm);
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
/*
* test reserving memory from huge pages
*/
-static void shmem_test_hp(void)
+static void shmem_test_flag_hp(void)
{
odp_shm_t shm;
odp_shm_info_t info;
@@ -251,6 +268,73 @@ static void shmem_test_hp(void)
CU_ASSERT(odp_shm_free(shm) == 0);
}
+static void shmem_test_flag_proc(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_PROC);
+
+ if (shm == ODP_SHM_INVALID) {
+ printf(" ODP_SHM_PROC flag not supported\n");
+ return;
+ }
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static void shmem_test_flag_export(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE, ODP_SHM_EXPORT);
+
+ if (shm == ODP_SHM_INVALID) {
+ printf(" ODP_SHM_EXPORT flag not supported\n");
+ return;
+ }
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+static void shmem_test_flag_hw_access(void)
+{
+ odp_shm_t shm;
+ void *addr;
+
+ shm = odp_shm_reserve(MEM_NAME, MEDIUM_MEM, ALIGN_SIZE,
+ ODP_SHM_HW_ACCESS);
+
+ if (shm == ODP_SHM_INVALID) {
+ printf(" ODP_SHM_HW_ACCESS flag not supported\n");
+ return;
+ }
+
+ addr = odp_shm_addr(shm);
+
+ CU_ASSERT(addr != NULL);
+
+ if (addr)
+ memset(addr, 0, MEDIUM_MEM);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
/*
* maximum size reservation
*/
@@ -855,9 +939,13 @@ static void shmem_test_stress(void)
}
odp_testinfo_t shmem_suite[] = {
- ODP_TEST_INFO(shmem_test_basic),
- ODP_TEST_INFO(shmem_test_hp),
+ ODP_TEST_INFO(shmem_test_reserve),
+ ODP_TEST_INFO(shmem_test_flag_hp),
+ ODP_TEST_INFO(shmem_test_flag_proc),
+ ODP_TEST_INFO(shmem_test_flag_export),
+ ODP_TEST_INFO(shmem_test_flag_hw_access),
ODP_TEST_INFO(shmem_test_max_reserve),
+ ODP_TEST_INFO(shmem_test_multi_thread),
ODP_TEST_INFO(shmem_test_reserve_after_fork),
ODP_TEST_INFO(shmem_test_singleva_after_fork),
ODP_TEST_INFO(shmem_test_stress),
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
index 2069a99b2..cc5d007d1 100644
--- a/test/validation/api/system/system.c
+++ b/test/validation/api/system/system.c
@@ -17,6 +17,9 @@
#define GIGA_HZ 1000000000ULL
#define KILO_HZ 1000ULL
+/* 10 usec wait time assumes >100kHz resolution on CPU cycles counter */
+#define CPU_CYCLES_WAIT_NS 10000
+
static void test_version_api_str(void)
{
int char_ok = 0;
@@ -81,37 +84,37 @@ static void system_test_odp_cpu_count(void)
CU_ASSERT(0 < cpus);
}
-static void system_test_odp_cpu_cycles(void)
+static void system_test_cpu_cycles(void)
{
uint64_t c2, c1;
c1 = odp_cpu_cycles();
- odp_time_wait_ns(100);
+ odp_time_wait_ns(CPU_CYCLES_WAIT_NS);
c2 = odp_cpu_cycles();
CU_ASSERT(c2 != c1);
}
-static void system_test_odp_cpu_cycles_max(void)
+static void system_test_cpu_cycles_max(void)
{
uint64_t c2, c1;
uint64_t max1, max2;
max1 = odp_cpu_cycles_max();
- odp_time_wait_ns(100);
+ odp_time_wait_ns(CPU_CYCLES_WAIT_NS);
max2 = odp_cpu_cycles_max();
CU_ASSERT(max1 >= UINT32_MAX / 2);
CU_ASSERT(max1 == max2);
c1 = odp_cpu_cycles();
- odp_time_wait_ns(1000);
+ odp_time_wait_ns(CPU_CYCLES_WAIT_NS);
c2 = odp_cpu_cycles();
CU_ASSERT(c1 <= max1 && c2 <= max1);
}
-static void system_test_odp_cpu_cycles_resolution(void)
+static void system_test_cpu_cycles_resolution(void)
{
int i;
uint64_t res;
@@ -133,7 +136,7 @@ static void system_test_odp_cpu_cycles_resolution(void)
}
}
-static void system_test_odp_cpu_cycles_diff(void)
+static void system_test_cpu_cycles_diff(void)
{
int i;
uint64_t c2, c1, c3, max;
@@ -281,6 +284,17 @@ static void system_test_odp_sys_huge_page_size_all(void)
}
}
+static int system_check_cycle_counter(void)
+{
+ if (odp_cpu_cycles_max() == 0) {
+ fprintf(stderr, "Cycle counter is not supported, skipping "
+ "test\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
static int system_check_odp_cpu_hz(void)
{
if (odp_cpu_hz() == 0) {
@@ -429,10 +443,14 @@ odp_testinfo_t system_suite[] = {
system_check_odp_cpu_hz_max),
ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_max_id,
system_check_odp_cpu_hz_max_id),
- ODP_TEST_INFO(system_test_odp_cpu_cycles),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_max),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_resolution),
- ODP_TEST_INFO(system_test_odp_cpu_cycles_diff),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_max,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_resolution,
+ system_check_cycle_counter),
+ ODP_TEST_INFO_CONDITIONAL(system_test_cpu_cycles_diff,
+ system_check_cycle_counter),
ODP_TEST_INFO(system_test_info_print),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index c9a70205e..b2c4b8964 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -553,6 +553,9 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
uint64_t diff_period, diff_test;
odp_pool_param_t params;
odp_time_t t0, t1, t2;
+ odp_timer_t timer[num];
+ uint64_t target_tick[num];
+ void *user_ptr[num];
odp_pool_param_init(&params);
params.type = ODP_POOL_TIMEOUT;
@@ -610,11 +613,14 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
ev = odp_timeout_to_event(tmo);
CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
- tim = odp_timer_alloc(tp, queue, USER_PTR);
+ user_ptr[i] = (void *)(uintptr_t)i;
+ tim = odp_timer_alloc(tp, queue, user_ptr[i]);
CU_ASSERT_FATAL(tim != ODP_TIMER_INVALID);
+ timer[i] = tim;
tick = tick_base + ((i + 1) * period_tick);
ret = odp_timer_set_abs(tim, tick, &ev);
+ target_tick[i] = tick;
ODPH_DBG("abs timer tick %" PRIu64 "\n", tick);
if (ret == ODP_TIMER_TOOEARLY)
@@ -645,11 +651,16 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv)
tim = odp_timeout_timer(tmo);
tick = odp_timeout_tick(tmo);
+ CU_ASSERT(timer[num_tmo] == tim);
+ CU_ASSERT(target_tick[num_tmo] == tick);
+ CU_ASSERT(user_ptr[num_tmo] ==
+ odp_timeout_user_ptr(tmo));
CU_ASSERT(diff_period > (period_ns - (5 * res_ns)));
CU_ASSERT(diff_period < (period_ns + (5 * res_ns)));
- ODPH_DBG("timeout tick %" PRIu64 ", timeout period "
- "%" PRIu64 "\n", tick, diff_period);
+ ODPH_DBG("timeout tick %" PRIu64 ", target tick "
+ "%" PRIu64 ", timeout period %" PRIu64 "\n",
+ tick, target_tick[num_tmo], diff_period);
odp_timeout_free(tmo);
CU_ASSERT(odp_timer_free(tim) == ODP_EVENT_INVALID);