aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml6
-rw-r--r--CHANGELOG178
-rw-r--r--configure.ac10
-rw-r--r--doc/Makefile.am6
-rw-r--r--doc/driver-api-guide/.gitignore1
-rw-r--r--doc/driver-api-guide/Doxyfile14
-rw-r--r--doc/driver-api-guide/Makefile.am5
-rw-r--r--doc/driver-api-guide/odp.dox20
-rw-r--r--doc/m4/configure.m43
-rw-r--r--doc/users-guide/Makefile.am1
-rw-r--r--doc/users-guide/users-guide-crypto.adoc122
-rwxr-xr-xdoc/users-guide/users-guide.adoc148
-rw-r--r--example/generator/odp_generator.c4
-rw-r--r--example/ipsec/odp_ipsec.c32
-rw-r--r--example/ipsec/odp_ipsec_cache.c4
-rw-r--r--example/l3fwd/odp_l3fwd.c17
-rw-r--r--example/switch/odp_switch.c5
-rw-r--r--helper/Makefile.am8
-rw-r--r--helper/cuckootable.c757
-rw-r--r--helper/include/odp/helper/ip.h11
-rw-r--r--helper/include/odp/helper/tcp.h4
-rw-r--r--helper/iplookuptable.c937
-rw-r--r--helper/linux.c4
-rw-r--r--helper/odph_cuckootable.h82
-rw-r--r--helper/odph_iplookuptable.h58
-rw-r--r--helper/test/.gitignore2
-rw-r--r--helper/test/Makefile.am8
-rw-r--r--helper/test/cuckootable.c573
-rw-r--r--helper/test/iplookuptable.c174
-rw-r--r--include/odp/api/spec/align.h4
-rw-r--r--include/odp/api/spec/atomic.h5
-rw-r--r--include/odp/api/spec/barrier.h6
-rw-r--r--include/odp/api/spec/buffer.h4
-rw-r--r--include/odp/api/spec/byteorder.h7
-rw-r--r--include/odp/api/spec/classification.h16
-rw-r--r--include/odp/api/spec/compiler.h4
-rw-r--r--include/odp/api/spec/cpu.h4
-rw-r--r--include/odp/api/spec/cpumask.h4
-rw-r--r--include/odp/api/spec/crypto.h445
-rw-r--r--include/odp/api/spec/debug.h4
-rw-r--r--include/odp/api/spec/errno.h4
-rw-r--r--include/odp/api/spec/event.h4
-rw-r--r--include/odp/api/spec/hash.h4
-rw-r--r--include/odp/api/spec/hints.h4
-rw-r--r--include/odp/api/spec/init.h4
-rw-r--r--include/odp/api/spec/packet.h19
-rw-r--r--include/odp/api/spec/packet_flags.h4
-rw-r--r--include/odp/api/spec/packet_io.h21
-rw-r--r--include/odp/api/spec/packet_io_stats.h4
-rw-r--r--include/odp/api/spec/pool.h33
-rw-r--r--include/odp/api/spec/queue.h15
-rw-r--r--include/odp/api/spec/random.h76
-rw-r--r--include/odp/api/spec/rwlock.h4
-rw-r--r--include/odp/api/spec/rwlock_recursive.h4
-rw-r--r--include/odp/api/spec/schedule.h18
-rw-r--r--include/odp/api/spec/schedule_types.h4
-rw-r--r--include/odp/api/spec/shared_memory.h56
-rw-r--r--include/odp/api/spec/spinlock.h11
-rw-r--r--include/odp/api/spec/spinlock_recursive.h4
-rw-r--r--include/odp/api/spec/std_clib.h4
-rw-r--r--include/odp/api/spec/std_types.h4
-rw-r--r--include/odp/api/spec/sync.h5
-rw-r--r--include/odp/api/spec/system_info.h4
-rw-r--r--include/odp/api/spec/thread.h4
-rw-r--r--include/odp/api/spec/thrmask.h4
-rw-r--r--include/odp/api/spec/ticketlock.h4
-rw-r--r--include/odp/api/spec/time.h4
-rw-r--r--include/odp/api/spec/timer.h15
-rw-r--r--include/odp/api/spec/traffic_mngr.h4
-rw-r--r--include/odp/api/spec/version.h.in4
-rw-r--r--platform/linux-generic/Makefile.am21
-rw-r--r--platform/linux-generic/_fdserver.c673
-rw-r--r--platform/linux-generic/_ishm.c1716
-rw-r--r--platform/linux-generic/_ishmphy.c185
-rw-r--r--[l---------]platform/linux-generic/arch/arm/odp/api/cpu_arch.h25
-rw-r--r--[l---------]platform/linux-generic/arch/arm/odp_cpu_arch.c49
-rw-r--r--[l---------]platform/linux-generic/arch/arm/odp_sysinfo_parse.c28
-rw-r--r--[l---------]platform/linux-generic/arch/powerpc/odp_cpu_arch.c49
-rw-r--r--platform/linux-generic/include/_fdserver_internal.h39
-rw-r--r--platform/linux-generic/include/_ishm_internal.h52
-rw-r--r--platform/linux-generic/include/_ishmphy_internal.h25
-rw-r--r--platform/linux-generic/include/ishmphy_internal.h24
-rw-r--r--platform/linux-generic/include/odp/api/plat/atomic_types.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/barrier_types.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/byteorder_types.h8
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_types.h6
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_types.h8
-rw-r--r--platform/linux-generic/include/odp/api/plat/shared_memory_types.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_types.h2
-rw-r--r--platform/linux-generic/include/odp/visibility_begin.h (renamed from platform/linux-generic/include/odp/api/visibility_begin.h)0
-rw-r--r--platform/linux-generic/include/odp/visibility_end.h (renamed from platform/linux-generic/include/odp/api/visibility_end.h)0
-rw-r--r--platform/linux-generic/include/odp_align_internal.h34
-rw-r--r--platform/linux-generic/include/odp_buffer_inlines.h167
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h189
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h2
-rw-r--r--platform/linux-generic/include/odp_config_internal.h70
-rw-r--r--platform/linux-generic/include/odp_crypto_internal.h21
-rw-r--r--platform/linux-generic/include/odp_internal.h23
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h121
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h2
-rw-r--r--platform/linux-generic/include/odp_packet_io_ipc_internal.h27
-rw-r--r--platform/linux-generic/include/odp_packet_io_queue.h5
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h291
-rw-r--r--platform/linux-generic/include/odp_queue_internal.h33
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h176
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h15
-rw-r--r--platform/linux-generic/include/odp_schedule_internal.h50
-rw-r--r--platform/linux-generic/include/odp_schedule_ordered_internal.h25
-rw-r--r--platform/linux-generic/include/odp_shm_internal.h4
-rw-r--r--platform/linux-generic/include/odp_timer_internal.h4
-rw-r--r--platform/linux-generic/include/protocols/tcp.h4
-rw-r--r--platform/linux-generic/odp_barrier.c2
-rw-r--r--platform/linux-generic/odp_buffer.c22
-rw-r--r--platform/linux-generic/odp_classification.c36
-rw-r--r--platform/linux-generic/odp_crypto.c582
-rw-r--r--platform/linux-generic/odp_init.c102
-rw-r--r--platform/linux-generic/odp_packet.c1120
-rw-r--r--platform/linux-generic/odp_packet_io.c19
-rw-r--r--platform/linux-generic/odp_pool.c1499
-rw-r--r--platform/linux-generic/odp_queue.c91
-rw-r--r--platform/linux-generic/odp_schedule.c404
-rw-r--r--platform/linux-generic/odp_schedule_ordered.c818
-rw-r--r--platform/linux-generic/odp_schedule_sp.c301
-rw-r--r--platform/linux-generic/odp_shared_memory.c411
-rw-r--r--platform/linux-generic/odp_spinlock.c4
-rw-r--r--platform/linux-generic/odp_system_info.c181
-rw-r--r--platform/linux-generic/odp_timer.c17
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c28
-rw-r--r--platform/linux-generic/pktio/dpdk.c10
-rw-r--r--platform/linux-generic/pktio/ipc.c541
-rw-r--r--platform/linux-generic/pktio/loop.c4
-rw-r--r--platform/linux-generic/pktio/netmap.c14
-rw-r--r--platform/linux-generic/pktio/ring.c2
-rw-r--r--platform/linux-generic/pktio/socket.c42
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c10
-rwxr-xr-xscripts/build-pktio-dpdk4
-rw-r--r--test/common_plat/performance/odp_crypto.c56
-rw-r--r--test/common_plat/performance/odp_l2fwd.c7
-rw-r--r--test/common_plat/performance/odp_pktio_perf.c10
-rw-r--r--test/common_plat/performance/odp_scheduling.c8
-rw-r--r--test/common_plat/validation/api/atomic/atomic.c24
-rw-r--r--test/common_plat/validation/api/atomic/atomic.h1
-rw-r--r--test/common_plat/validation/api/barrier/barrier.c24
-rw-r--r--test/common_plat/validation/api/barrier/barrier.h1
-rw-r--r--test/common_plat/validation/api/buffer/buffer.c113
-rw-r--r--test/common_plat/validation/api/classification/odp_classification_basic.c4
-rw-r--r--test/common_plat/validation/api/crypto/crypto.c26
-rw-r--r--test/common_plat/validation/api/crypto/odp_crypto_test_inp.c120
-rw-r--r--test/common_plat/validation/api/lock/lock.c24
-rw-r--r--test/common_plat/validation/api/lock/lock.h1
-rw-r--r--test/common_plat/validation/api/packet/packet.c716
-rw-r--r--test/common_plat/validation/api/packet/packet.h5
-rw-r--r--test/common_plat/validation/api/pktio/pktio.c56
-rw-r--r--test/common_plat/validation/api/pool/pool.c7
-rw-r--r--test/common_plat/validation/api/queue/queue.c10
-rw-r--r--test/common_plat/validation/api/random/random.c48
-rw-r--r--test/common_plat/validation/api/random/random.h2
-rw-r--r--test/common_plat/validation/api/scheduler/scheduler.c18
-rw-r--r--test/common_plat/validation/api/shmem/shmem.c696
-rw-r--r--test/common_plat/validation/api/shmem/shmem.h5
-rw-r--r--test/common_plat/validation/api/timer/timer.c2
-rw-r--r--test/common_plat/validation/api/traffic_mngr/traffic_mngr.c4
-rw-r--r--test/linux-generic/pktio_ipc/ipc_common.c41
-rw-r--r--test/linux-generic/pktio_ipc/ipc_common.h15
-rw-r--r--test/linux-generic/pktio_ipc/pktio_ipc1.c57
-rw-r--r--test/linux-generic/pktio_ipc/pktio_ipc2.c59
-rwxr-xr-xtest/linux-generic/pktio_ipc/pktio_ipc_run.sh48
-rw-r--r--test/linux-generic/validation/api/shmem/.gitignore3
-rw-r--r--test/linux-generic/validation/api/shmem/Makefile.am22
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_linux.c230
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp1.c (renamed from test/linux-generic/validation/api/shmem/shmem_odp.c)10
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp1.h (renamed from test/linux-generic/validation/api/shmem/shmem_odp.h)0
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp2.c103
-rw-r--r--test/linux-generic/validation/api/shmem/shmem_odp2.h7
174 files changed, 12471 insertions, 4599 deletions
diff --git a/.travis.yml b/.travis.yml
index adf33076b..03e61b185 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -36,14 +36,14 @@ before_install:
- export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
# DPDK pktio
- TARGET=${TARGET:-"x86_64-native-linuxapp-gcc"}
- - git clone http://dpdk.org/git/dpdk dpdk
+ - git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v16.07 http://dpdk.org/git/dpdk dpdk
- pushd dpdk
- - git checkout -b bv16.07 v16.07
+ - git log --oneline --decorate
- make config T=${TARGET} O=${TARGET}
- pushd ${TARGET}
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
- popd
- - make install T=${TARGET} EXTRA_CFLAGS="-fPIC"
+ - make install T=${TARGET} EXTRA_CFLAGS="-fPIC" > /dev/null
- popd
script:
diff --git a/CHANGELOG b/CHANGELOG
index 17afe44f9..72bf22533 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,181 @@
+== OpenDataPlane (1.13.0.0)
+
+=== New Features
+
+==== APIs
+ODP v1.13.0.0 represents the initial preview of the Tiger Moth release series
+and as such introduces new APIs and extensions that will be built on as this
+release matures.
+
+===== Crypto Parameter Normalization
+Many ODP APIs take parameter structs of type `odp_xxx_param_t`. The crypto APIs,
+for historical reasons, failed to follow this convention, using instead structs
+of type `odp_crypto_params_t`, etc. These pluralized names are now deprecated
+and their singular forms are introduced as the preferred names for these
+structs. The old names are retained (for now) to ease migration, however
+applications should convert to the new forms at their convenience as these
+deprecated forms will be removed in the final Tiger Moth release.
+
+The changes introduced for this include:
+
+* `odp_crypto_op_params_t` => `odp_crypto_op_param_t`
+* `odp_crypto_session_params_t` => `odp_crypto_session_param_t`
+
+===== Crypto Decouple Key Length from Algorithm Specification
+To provide a more flexible programming for handling all possible
+key/digest/iv length combinations, the enums used for crypto specification
+are split to decouple lengths from algorithm names. The only exception
+is the SHA-2 family of hash routines, which have well-established naming
+conventions that incorporate digest lengths (SHA-224, SHA-256, etc.)
+
+Changes included with this restructure include:
+
+* The `odp_crypto_capability_t` structure returned by the
+`odp_crypto_capability()` API contains streamlined `odp_crypto_cipher_algos_t`
+and `odp_crypto_auth_algos_t` substructures.
+* A new `odp_crypto_cipher_capability()` API is added to return detailed
+information about available cipher capabilities.
+* A new `odp_crypto_auth_capability()` API is added to return detailed
+information about available authentication capabilities.
+
+===== `odp_crypto_session_param_init()` API
+For completeness the `odp_crypto_session_param_init()` API is provided to
+enable platform-independent initialization of the `odp_crypto_session_param_t`
+structure used as input to `odp_crypto_session_create()`.
+
+===== Bitfield and Byteorder Cleanup
+The `ODP_BITFIELD_ORDER` define is added to the ODP specification to parallel
+the existing `ODP_BYTEFIELD_ORDER` define. This will be set to the value
+`ODP_BIG_ENDIAN_BITFIELD` or `ODP_LITTLE_ENDIAN_BITFIELD`. This also addresses
+https://bugs.linaro.org/show_bug.cgi?id=2402[Bug 2402], however since fixing
+this required a small API change this was deferred until an API release
+boundary.
+
+===== Improved Name Argument Definitions in `odp_xxx_create()` Functions
+The use of name arguments to the various resource creation APIs has
+been standardized and documentation improved to clarify that unique
+names are not required and that these may be specified as NULL for
+anonymous resource creation. When non-unique resource names are used, it is
+unspecified which of these are returned by a corresponding lookup API.
+
+===== Pool Parameters for Packet Pools
+The `odp_pool_param_t` structure adds the new field `max_len` to be used in
+packet pools to advise the implementation of the maximum sized packet that
+the application will allocate with a single `odp_packet_alloc()` call. This
+may enable storage pool optimization.
+
+===== Packet Clarifications
+API documentation for `odp_packet_concat()` and `odp_packet_copy_from_pkt()`
+is clarified to specify that the source and destination packets supplied to
+these APIs should not refer to the same packet.
+
+===== Packet Allocation Length Clarification
+API documentation for `odp_packet_alloc()` is clarified to specify that
+the supplied length for requested packet allocation should be greater
+than zero.
+
+===== Random API Changes
+The APIs provided for working with random data have been revised. The third
+parameter to `odp_random_data()` is now of type `odp_random_kind_t`, which is
+an `enum` that is used to specify the quality of random data required. The
+kinds of random data defined are:
+
+`ODP_RANDOM_BASIC`::
+No specific quality guarantees. This is assumed to be pseudo-random data
+generated by software where speed of generation is more important than the
+quality of the results.This is the lowest kind of random.
+
+`ODP_RANDOM_CRYPTO`::
+Random data suitable for use in cryptographic operations.
+
+`ODP_RANDOM_TRUE`::
+True random data generated from a hardware entropy source. This is the
+highest kind of random.
+
+The `odp_random_max_kind()` API is provided that returns the highest kind of
+data available on this implementation. Requests for higher kinds than can be
+supplied will fail.
+
+For testing purposes it is often desirable to generate "random" sequences that
+are repeatable. To support this use case, the `odp_random_test_data()` API is
+introduced. This always returns BASIC random data but uses a user-supplied
+64-byte seed parameter that is update for each call and can be used to
+repeat the same sequence as needed.
+
+===== Shared Memory Improvements
+The `odp_shm_reserve()` API adds two new additional flags to support external
+memory.
+
+* `ODP_SHM_SINGLE_VA` guarantees that all ODP threads sharing this memory
+will see the block at the same virtual address regardless of whether threads
+are implemented as pthreads or processes and when `fork()` calls are made to
+create them.
+
+* `ODP_SHM_EXPORT` allows the memory block to be visible to other ODP
+instances. Other ODP instances can retrieve this block via the new
+`odp_shm_import()` API.
+
+===== Classification Clarifications
+The relationship between classification and queue configuration in the
+`odp_pktin_queue_param_t` structure is clarified to emphasize that
+classification subsumes queue-based I/O processing. This is an API
+documentation change only.
+
+=== Helpers
+New helper APIs are introduced for IP table lookup support for longest prefix
+matching as well as cuckoo hash tables. These are designed to provide analogs
+to functions available in DPDK to assist applications being ported to ODP.
+
+=== Performance Improvements
+The odp-linux reference implementation includes a number of improvements to
+make it more suitable for production use on platforms that rely on software
+implementations of key ODP APIs.
+
+==== Ring-based Pool Implementation
+Storage pools used for buffers and packets are now implemented via lockless
+ring structures that support burst mode read/writes to local caches for
+significantly improved multi-core scalability
+
+==== Buffer/Packet Header Optimizations
+The layout of the structs used to support buffers and packets has been
+optimized to reduce cache footprint and minimize path lengths in packet
+manipulation.
+
+==== Ordered Queue Improvements
+The implementation of ordered queues has been streamlined and made more
+scaleable in multi-core systems.
+
+==== Packet Segmentation Improvements
+The more advance segmentation capabilities present in the new ODP packet
+implementation are used to streamline the implementation of APIs like
+`odp_packet_concat()` and the head/tail extend/trunc APIs.
+
+=== Bug Fixes
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2405[Bug 2405]
+A number of "todos" were removed from the packet validation test suite.
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2472[Bug 2472]
+The CPU affinity is now correctly read from the cpuset.
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2496[Bug 2496]
+The PktIO validation test no longer uses invalid MAC addresses.
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2512[Bug 2512]
+The TCP checksum is now properly calculated for generated packets.
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2798[Bug 2798]
+The odp-linux reference implementation makes use of the OpenSSL library to
+support the `odp_random_xxx()` APIs and some crypto operations. To support
+OpenSSL versions prior to 1.1.0, which are not thread safe, support is added
+for OpenSSL locking callbacks that use ODP ticketlocks to provide OpenSSL thread
+safety.
+
+=== Known Issues
+
+==== https://bugs.linaro.org/show_bug.cgi?id=2812[Bug 2812]
+Make check fails on a single core VM in the process mode helper test.
+
== OpenDataPlane (1.12.0.0)
=== New Features
diff --git a/configure.ac b/configure.ac
index 0cf4384fe..9bd6aa1b0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,7 +3,7 @@ AC_PREREQ([2.5])
# Set correct API version
##########################################################################
m4_define([odpapi_generation_version], [1])
-m4_define([odpapi_major_version], [12])
+m4_define([odpapi_major_version], [13])
m4_define([odpapi_minor_version], [0])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
@@ -38,10 +38,10 @@ AM_SILENT_RULES([yes])
# 3. if interfaces were removed, then use C+1:0:0
##########################################################################
-ODP_LIBSO_VERSION=112:0:0
+ODP_LIBSO_VERSION=113:0:0
AC_SUBST(ODP_LIBSO_VERSION)
-ODPHELPER_LIBSO_VERSION=110:1:1
+ODPHELPER_LIBSO_VERSION=111:0:2
AC_SUBST(ODPHELPER_LIBSO_VERSION)
# Checks for programs.
@@ -221,7 +221,9 @@ DX_INIT_DOXYGEN($PACKAGE_NAME,
${srcdir}/doc/helper-guide/Doxyfile,
${builddir}/doc/helper-guide/output,
${srcdir}/doc/platform-api-guide/Doxyfile,
- ${builddir}/doc/platform-api-guide/output)
+ ${builddir}/doc/platform-api-guide/output,
+ ${srcdir}/doc/driver-api-guide/Doxyfile,
+ ${builddir}/doc/driver-api-guide/output)
##########################################################################
# Enable/disable ODP_DEBUG_PRINT
diff --git a/doc/Makefile.am b/doc/Makefile.am
index d49d84b6a..59d6a6c64 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,4 +1,8 @@
-SUBDIRS = application-api-guide helper-guide platform-api-guide
+SUBDIRS = \
+ application-api-guide \
+ helper-guide \
+ platform-api-guide \
+ driver-api-guide
if user_guide
SUBDIRS += implementers-guide users-guide process-guide
diff --git a/doc/driver-api-guide/.gitignore b/doc/driver-api-guide/.gitignore
new file mode 100644
index 000000000..53752db25
--- /dev/null
+++ b/doc/driver-api-guide/.gitignore
@@ -0,0 +1 @@
+output
diff --git a/doc/driver-api-guide/Doxyfile b/doc/driver-api-guide/Doxyfile
new file mode 100644
index 000000000..680d1d428
--- /dev/null
+++ b/doc/driver-api-guide/Doxyfile
@@ -0,0 +1,14 @@
+@INCLUDE = $(SRCDIR)/doc/Doxyfile_common
+
+PROJECT_NAME = "Driver Interface (drv) Reference Manual"
+PROJECT_NUMBER = $(VERSION)
+PROJECT_LOGO = $(SRCDIR)/doc/images/ODP-Logo-HQ.svg
+INPUT = $(SRCDIR)/doc/driver-api-guide \
+ $(SRCDIR)/include/odp/drv \
+ $(SRCDIR)/include/odp_drv.h
+EXCLUDE_PATTERNS = drv* odp_drv.h
+EXAMPLE_PATH = $(SRCDIR)/example $(SRCDIR)
+PREDEFINED = __GNUC__ \
+ "ODP_HANDLE_T(type)=odp_handle_t type" \
+ odpdrv_bool_t=int
+WARNINGS = NO
diff --git a/doc/driver-api-guide/Makefile.am b/doc/driver-api-guide/Makefile.am
new file mode 100644
index 000000000..4fc4755d1
--- /dev/null
+++ b/doc/driver-api-guide/Makefile.am
@@ -0,0 +1,5 @@
+EXTRA_DIST = \
+ odp.dox
+
+clean-local:
+ rm -rf output
diff --git a/doc/driver-api-guide/odp.dox b/doc/driver-api-guide/odp.dox
new file mode 100644
index 000000000..687a79e04
--- /dev/null
+++ b/doc/driver-api-guide/odp.dox
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @mainpage
+ *
+ * @section sec_1 Introduction
+ *
+ * OpenDataPlane (ODP) provides a driver interface
+
+ *
+ * @section contact Contact Details
+ * - The main web site is http://www.opendataplane.org/
+ * - The git repo is https://git.linaro.org/lng/odp.git
+ * - Bug tracking is https://bugs.linaro.org/buglist.cgi?product=OpenDataPlane
+ *
+ */
diff --git a/doc/m4/configure.m4 b/doc/m4/configure.m4
index ed9451dff..6e02f7617 100644
--- a/doc/m4/configure.m4
+++ b/doc/m4/configure.m4
@@ -42,4 +42,5 @@ AC_CONFIG_FILES([doc/application-api-guide/Makefile
doc/Makefile
doc/platform-api-guide/Makefile
doc/process-guide/Makefile
- doc/users-guide/Makefile])
+ doc/users-guide/Makefile
+ doc/driver-api-guide/Makefile])
diff --git a/doc/users-guide/Makefile.am b/doc/users-guide/Makefile.am
index a01c717c5..01b4df363 100644
--- a/doc/users-guide/Makefile.am
+++ b/doc/users-guide/Makefile.am
@@ -2,6 +2,7 @@ include ../Makefile.inc
SRC = $(top_srcdir)/doc/users-guide/users-guide.adoc \
$(top_srcdir)/doc/users-guide/users-guide-cls.adoc \
+ $(top_srcdir)/doc/users-guide/users-guide-crypto.adoc \
$(top_srcdir)/doc/users-guide/users-guide-packet.adoc \
$(top_srcdir)/doc/users-guide/users-guide-pktio.adoc \
$(top_srcdir)/doc/users-guide/users-guide-timer.adoc \
diff --git a/doc/users-guide/users-guide-crypto.adoc b/doc/users-guide/users-guide-crypto.adoc
new file mode 100644
index 000000000..c18e369bb
--- /dev/null
+++ b/doc/users-guide/users-guide-crypto.adoc
@@ -0,0 +1,122 @@
+== Cryptographic services
+
+ODP provides APIs to perform cryptographic operations required by various
+communication protocols (_e.g.,_ IPsec). ODP cryptographic APIs are session
+based.
+
+ODP provides APIs for following cryptographic services:
+
+* Ciphering
+* Authentication/data integrity via Keyed-Hashing (HMAC)
+* Random number generation
+* Crypto capability inquiries
+
+=== Crypto Sessions
+
+To apply a cryptographic operation to a packet a session must be created. All
+packets processed by a session share the parameters that define the session.
+
+ODP supports synchronous and asynchronous crypto sessions. For asynchronous
+sessions, the output of crypto operation is posted in a queue defined as
+the completion queue in its session parameters.
+
+ODP crypto APIs support chained operation sessions in which hashing and
+ciphering both can be achieved using a single session and operation call. The
+order of cipher and hashing can be controlled by the `auth_cipher_text`
+session parameter.
+
+Other Session parameters include algorithms, keys, initialization vector
+(optional), encode or decode, output queue for async mode and output packet
+pool for allocation of an output packet if required.
+
+=== Crypto operations
+
+After session creation, a cryptographic operation can be applied to a packet
+using the `odp_crypto_operation()` API. Applications may indicate a preference
+for synchronous or asynchronous processing in the session's `pref_mode`
+parameter. However crypto operations may complete synchronously even if an
+asynchronous preference is indicated, and applications must examine the
+`posted` output parameter from `odp_crypto_operation()` to determine whether
+the operation has completed or if an `ODP_EVENT_CRYPTO_COMPL` notification is
+expected. In the case of an async operation, the `posted` output parameter
+will be set to true.
+
+
+The operation arguments specify for each packet the areas that are to be
+encrypted or decrypted and authenticated. Also, there is an option of overriding
+the initialization vector specified in session parameters.
+
+An operation can be executed in in-place, out-of-place or new buffer mode.
+In in-place mode output packet is same as the input packet.
+In case of out-of-place mode output packet is different from input packet as
+specified by the application, while in new buffer mode implementation allocates
+a new output buffer from the session’s output pool.
+
+The application can also specify a context associated with a given operation
+that will be retained during async operation and can be retrieved via the
+completion event.
+
+Results of an asynchronous session will be posted as completion events to the
+session’s completion queue, which can be accessed directly or via the ODP
+scheduler. The completion event contains the status of the operation and the
+result. The application has the responsibility to free the completion event.
+
+=== Random number Generation
+
+ODP provides two APIs to generate various kinds of random data bytes. Random
+data is characterized by _kind_, which specifies the "quality" of the
+randomness required. ODP support three kinds of random data:
+
+ODP_RANDOM_BASIC:: No specific requirement other than the data appear to be
+uniformly distributed. Suitable for load-balancing or other non-cryptographic
+use.
+
+ODP_RANDOM_CRYPTO:: Data suitable for cryptographic use. This is a more
+stringent requirement that the data pass tests for statistical randomness.
+
+ODP_RANDOM_TRUE:: Data generated from a hardware entropy source rather than
+any software generated pseudo-random data. May not be available on all
+platforms.
+
+These form a hierarchy with BASIC being the lowest kind of random and TRUE
+behing the highest. The main API for accessing random data is:
+
+[source,c]
+-----
+int32_t odp_random_data(uint8_t buf, uint32_t len, odp_random_kind_t kind);
+-----
+
+The expectation is that lesser-quality random is easier and faster to generate
+while higher-quality random may take more time. Implementations are always free
+to substitute a higher kind of random than the one requested if they are able
+to do so more efficiently, however calls must return a failure indicator
+(rc < 0) if a higher kind of data is requested than the implementation can
+provide. This is most likely the case for ODP_RANDOM_TRUE since not all
+platforms have access to a true hardware random number generator.
+
+The `odp_random_max_kind()` API returns the highest kind of random data
+available on this implementation.
+
+For testing purposes it is often desirable to generate repeatable sequences
+of "random" data. To address this need ODP provides the additional API:
+
+[source,c]
+-----
+int32_t odp_random_test_data(uint8_t buf, uint32_t len, uint64_t *seed);
+-----
+
+This operates the same as `odp_random_data()` except that it always returns
+data of kind `ODP_RANDOM_BASIC` and an additional thread-local `seed`
+parameter is provide that specifies a seed value to use in generating the
+data. This value is updated on each call, so repeated calls with the same
+variable will generate a sequence of random data starting from the initial
+specified seed. If another sequence of calls is made starting with the same
+initial seed value, then `odp_random_test_data()` will return the same
+sequence of data bytes.
+
+=== Capability inquiries
+
+ODP provides the API `odp_crypto_capability()` to inquire the implementation’s
+crypto capabilities. This interface returns a the maximum number of crypto
+sessions supported as well as bitmasks for supported algorithms and hardware
+backed algorithms. \ No newline at end of file
diff --git a/doc/users-guide/users-guide.adoc b/doc/users-guide/users-guide.adoc
index 62f58336d..41c57d1ca 100755
--- a/doc/users-guide/users-guide.adoc
+++ b/doc/users-guide/users-guide.adoc
@@ -594,7 +594,9 @@ resource.
Blocks of shared memory can be created using the `odp_shm_reserve()` API
call. The call expects a shared memory block name, a block size, an alignment
requirement, and optional flags as parameters. It returns a `odp_shm_t`
-handle. The size and alignment requirement are given in bytes.
+handle. The size and alignment requirement are given in bytes. The provided
+name does not have to be unique, i.e. a given name can be used multiple times,
+when reserving different blocks.
.creating a block of shared memory
[source,c]
@@ -649,13 +651,19 @@ mapping the shared memory block. There is no fragmentation.
By default ODP threads are assumed to behave as cache coherent systems:
Any change performed on a shared memory block is guaranteed to eventually
become visible to other ODP threads sharing this memory block.
-(this behaviour may be altered by flags to `odp_shm_reserve()` in the future).
Nevertheless, there is no implicit memory barrier associated with any action
on shared memories: *When* a change performed by an ODP thread becomes visible
to another ODP thread is not known: An application using shared memory
blocks has to use some memory barrier provided by ODP to guarantee shared data
validity between ODP threads.
+The virtual address at which a given memory block is mapped in different ODP
+threads may differ from ODP thread to ODP thread, if ODP threads have separate
+virtual spaces (for instance if ODP threads are implemented as processes).
+However, the ODP_SHM_SINGLE_VA flag can be used at `odp_shm_reserve()` time
+to guarantee address uniqueness in all ODP threads, regardless of their
+implementation or creation time.
+
=== Lookup by name
As mentioned, shared memory handles can be sent from ODP threads to ODP
threads using any IPC mechanism, and then the block address retrieved.
@@ -664,7 +672,9 @@ block is to use the `odp_shm_lookup()` API function call.
This nevertheless requires the calling ODP thread to provide the name of the
shared memory block:
`odp_shm_lookup()` will return `ODP_SHM_INVALID` if no shared memory block
-with the provided name is known by ODP.
+with the provided name is known by ODP. When multiple blocks were reserved
+using the same name, the lookup function will return the handle of any
+of these blocks.
.retrieving a block handle and address from another ODP task
[source,c]
@@ -698,9 +708,49 @@ if (odp_shm_free(shm) != 0) {
}
----
+=== sharing memory with the external world
+ODP provides ways of sharing memory with entities located outside
+ODP instances:
+
+Sharing a block of memory with an external (non ODP) thread is achieved
+by setting the ODP_SHM_PROC flag at `odp_shm_reserve()` time.
+How the memory block is retrieved on the Operating System side is
+implementation and Operating System dependent.
+
+Sharing a block of memory with an external ODP instance (running
+on the same Operating System) is achieved
+by setting the ODP_SHM_EXPORT flag at `odp_shm_reserve()` time.
+A block of memory created with this flag in an ODP instance A, can be "mapped"
+into a remote ODP instance B (on the same OS) by using the
+`odp_shm_import()`, on ODP instance B:
+
+.sharing memory between ODP instances: instance A
+[source,c]
+----
+odp_shm_t shmA;
+shmA = odp_shm_reserve("memoryA", size, 0, ODP_SHM_EXPORT);
+----
+
+.sharing memory between ODP instances: instance B
+[source,c]
+----
+odp_shm_t shmB;
+odp_instance_t odpA;
+
+/* get ODP A instance handle by some OS method */
+odpA = ...
+
+/* get the shared memory exported by A:
+shmB = odp_shm_import("memoryA", odpA, "memoryB", 0, 0);
+----
+
+Note that the handles shmA and shmB are scoped by each ODP instance
+(you can not use them outside the ODP instance they belong to).
+Also note that both ODP instances have to call `odp_shm_free()` when done.
+
=== Memory creation flags
The last argument to odp_shm_reserve() is a set of ORed flags.
-Two flags are supported:
+The following flags are supported:
==== ODP_SHM_PROC
When this flag is given, the allocated shared memory will become visible
@@ -710,6 +760,12 @@ will be able to access the memory using native (non ODP) OS calls such as
Each ODP implementation should provide a description on exactly how
this mapping should be done on that specific platform.
+==== ODP_SHM_EXPORT
+When this flag is given, the allocated shared memory will become visible
+to other ODP instances running on the same OS.
+Other ODP instances willing to see this exported memory should use the
+`odp_shm_import()` ODP function.
+
==== ODP_SHM_SW_ONLY
This flag tells ODP that the shared memory will be used by the ODP application
software only: no HW (such as DMA, or other accelerator) will ever
@@ -719,6 +775,18 @@ implementation), except for `odp_shm_lookup()` and `odp_shm_free()`.
ODP implementations may use this flag as a hint for performance optimization,
or may as well ignore this flag.
+==== ODP_SHM_SINGLE_VA
+This flag is used to guarantee the uniqueness of the address at which
+the shared memory is mapped: without this flag, a given memory block may be
+mapped at different virtual addresses (assuming the target have virtual
+addresses) by different ODP threads. This means that the value returned by
+`odp_shm_addr()` would be different in different threads, in this case.
+Setting this flag guarantees that all ODP threads sharing this memory
+block will see it at the same address (`odp_shm_addr()` would return the
+same value on all ODP threads, for a given memory block, in this case)
+Note that ODP implementations may have restrictions of the amount of memory
+which can be allocated with this flag.
+
== Queues
Queues are the fundamental event sequencing mechanism provided by ODP and all
ODP applications make use of them either explicitly or implicitly. Queues are
@@ -950,77 +1018,7 @@ include::users-guide-pktio.adoc[]
include::users-guide-timer.adoc[]
-== Cryptographic services
-
-ODP provides APIs to perform cryptographic operations required by various
-communication protocols (e.g. IPSec). ODP cryptographic APIs are session based.
-
-ODP provides APIs for following cryptographic services:
-
-* Ciphering
-* Authentication/data integrity via Keyed-Hashing (HMAC)
-* Random number generation
-* Crypto capability inquiries
-
-=== Crypto Sessions
-
-To apply a cryptographic operation to a packet a session must be created. All
-packets processed by a session share the parameters that define the session.
-
-ODP supports synchronous and asynchronous crypto sessions. For asynchronous
-sessions, the output of crypto operation is posted in a queue defined as
-the completion queue in its session parameters.
-
-ODP crypto APIs support chained operation sessions in which hashing and ciphering
-both can be achieved using a single session and operation call. The order of
-cipher and hashing can be controlled by the `auth_cipher_text` session parameter.
-
-Other Session parameters include algorithms, keys, initialization vector
-(optional), encode or decode, output queue for async mode and output packet pool
-for allocation of an output packet if required.
-
-=== Crypto operations
-
-After session creation, a cryptographic operation can be applied to a packet
-using the `odp_crypto_operation()` API. Applications may indicate a preference
-for synchronous or asynchronous processing in the session's `pref_mode` parameter.
-However crypto operations may complete synchronously even if an asynchronous
-preference is indicated, and applications must examine the `posted` output
-parameter from `odp_crypto_operation()` to determine whether the operation has
-completed or if an `ODP_EVENT_CRYPTO_COMPL` notification is expected. In the case
-of an async operation, the `posted` output parameter will be set to true.
-
-
-The operation arguments specify for each packet the areas that are to be
-encrypted or decrypted and authenticated. Also, there is an option of overriding
-the initialization vector specified in session parameters.
-
-An operation can be executed in in-place, out-of-place or new buffer mode.
-In in-place mode output packet is same as the input packet.
-In case of out-of-place mode output packet is different from input packet as
-specified by the application, while in new buffer mode implementation allocates
-a new output buffer from the session’s output pool.
-
-The application can also specify a context associated with a given operation that
-will be retained during async operation and can be retrieved via the completion
-event.
-
-Results of an asynchronous session will be posted as completion events to the
-session’s completion queue, which can be accessed directly or via the ODP
-scheduler. The completion event contains the status of the operation and the
-result. The application has the responsibility to free the completion event.
-
-=== Random number Generation
-
-ODP provides an API `odp_random_data()` to generate random data bytes. It has
-an argument to specify whether to use system entropy source for random number
-generation or not.
-
-=== Capability inquiries
-
-ODP provides an API interface `odp_crypto_capability()` to inquire implementation’s
-crypto capabilities. This interface returns a bitmask for supported algorithms
-and hardware backed algorithms.
+include::users-guide-crypto.adoc[]
include::users-guide-tm.adoc[]
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c
index 48d7f5f00..ccd47f628 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -732,7 +732,7 @@ int main(int argc, char *argv[])
odp_timer_pool_start();
/* Create timeout pool */
- memset(&params, 0, sizeof(params));
+ odp_pool_param_init(&params);
params.tmo.num = tparams.num_timers; /* One timeout per timer */
params.type = ODP_POOL_TIMEOUT;
@@ -897,6 +897,8 @@ int main(int argc, char *argv[])
odp_timer_pool_destroy(tp);
if (0 != odp_pool_destroy(tmop))
fprintf(stderr, "unable to destroy pool \"tmop\"\n");
+ if (0 != odp_shm_free(shm))
+ fprintf(stderr, "unable to free \"shm\"\n");
odp_term_local();
odp_term_global(instance);
printf("Exit\n\n");
diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c
index 6a9a9febc..7e34d062d 100644
--- a/example/ipsec/odp_ipsec.c
+++ b/example/ipsec/odp_ipsec.c
@@ -148,7 +148,7 @@ typedef struct {
uint32_t dst_ip; /**< SA dest IP address */
/* Output only */
- odp_crypto_op_params_t params; /**< Parameters for crypto call */
+ odp_crypto_op_param_t params; /**< Parameters for crypto call */
uint32_t *ah_seq; /**< AH sequence number location */
uint32_t *esp_seq; /**< ESP sequence number location */
uint16_t *tun_hdr_id; /**< Tunnel header ID > */
@@ -644,7 +644,7 @@ pkt_disposition_e do_ipsec_in_classify(odp_packet_t pkt,
odph_ahhdr_t *ah = NULL;
odph_esphdr_t *esp = NULL;
ipsec_cache_entry_t *entry;
- odp_crypto_op_params_t params;
+ odp_crypto_op_param_t params;
odp_bool_t posted = 0;
/* Default to skip IPsec */
@@ -823,7 +823,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t pkt,
uint16_t ip_data_len = ipv4_data_len(ip);
uint8_t *ip_data = ipv4_data_p(ip);
ipsec_cache_entry_t *entry;
- odp_crypto_op_params_t params;
+ odp_crypto_op_param_t params;
int hdr_len = 0;
int trl_len = 0;
odph_ahhdr_t *ah = NULL;
@@ -1361,6 +1361,32 @@ main(int argc, char *argv[])
free(args->appl.if_names);
free(args->appl.if_str);
+
+ shm = odp_shm_lookup("shm_args");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_args failed\n");
+ shm = odp_shm_lookup("shm_ipsec_cache");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_ipsec_cache failed\n");
+ shm = odp_shm_lookup("shm_fwd_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_fwd_db failed\n");
+ shm = odp_shm_lookup("loopback_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free loopback_db failed\n");
+ shm = odp_shm_lookup("shm_sa_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_sa_db failed\n");
+ shm = odp_shm_lookup("shm_tun_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_tun_db failed\n");
+ shm = odp_shm_lookup("shm_sp_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free shm_sp_db failed\n");
+ shm = odp_shm_lookup("stream_db");
+ if (odp_shm_free(shm) != 0)
+ EXAMPLE_ERR("Error: shm free stream_db failed\n");
+
printf("Exit\n\n");
return 0;
diff --git a/example/ipsec/odp_ipsec_cache.c b/example/ipsec/odp_ipsec_cache.c
index 2bd44cf3e..b2a91c242 100644
--- a/example/ipsec/odp_ipsec_cache.c
+++ b/example/ipsec/odp_ipsec_cache.c
@@ -44,7 +44,7 @@ int create_ipsec_cache_entry(sa_db_entry_t *cipher_sa,
odp_queue_t completionq,
odp_pool_t out_pool)
{
- odp_crypto_session_params_t params;
+ odp_crypto_session_param_t params;
ipsec_cache_entry_t *entry;
odp_crypto_ses_create_err_t ses_create_rc;
odp_crypto_session_t session;
@@ -60,6 +60,8 @@ int create_ipsec_cache_entry(sa_db_entry_t *cipher_sa,
(cipher_sa->mode != auth_sa->mode))
return -1;
+ odp_crypto_session_param_init(&params);
+
/* Setup parameters and call crypto library to create session */
params.op = (in) ? ODP_CRYPTO_OP_DECODE : ODP_CRYPTO_OP_ENCODE;
params.auth_cipher_text = TRUE;
diff --git a/example/l3fwd/odp_l3fwd.c b/example/l3fwd/odp_l3fwd.c
index 8919bd387..441e812a7 100644
--- a/example/l3fwd/odp_l3fwd.c
+++ b/example/l3fwd/odp_l3fwd.c
@@ -942,6 +942,7 @@ int main(int argc, char **argv)
odph_odpthread_t thread_tbl[MAX_NB_WORKER];
odp_pool_t pool;
odp_pool_param_t params;
+ odp_shm_t shm;
odp_instance_t instance;
odph_odpthread_params_t thr_params;
odp_cpumask_t cpumask;
@@ -1119,6 +1120,22 @@ int main(int argc, char **argv)
for (i = 0; i < MAX_NB_ROUTE; i++)
free(args->route_str[i]);
+ shm = odp_shm_lookup("flow_table");
+ if (shm != ODP_SHM_INVALID && odp_shm_free(shm) != 0) {
+ printf("Error: shm free flow_table\n");
+ exit(EXIT_FAILURE);
+ }
+ shm = odp_shm_lookup("shm_fwd_db");
+ if (shm != ODP_SHM_INVALID && odp_shm_free(shm) != 0) {
+ printf("Error: shm free shm_fwd_db\n");
+ exit(EXIT_FAILURE);
+ }
+ shm = odp_shm_lookup("fib_lpm_sub");
+ if (shm != ODP_SHM_INVALID && odp_shm_free(shm) != 0) {
+ printf("Error: shm free fib_lpm_sub\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_pool_destroy(pool)) {
printf("Error: pool destroy\n");
exit(EXIT_FAILURE);
diff --git a/example/switch/odp_switch.c b/example/switch/odp_switch.c
index 4b944fe3b..f9c717601 100644
--- a/example/switch/odp_switch.c
+++ b/example/switch/odp_switch.c
@@ -1039,6 +1039,11 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ if (odp_shm_free(shm)) {
+ printf("Error: shm free\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_term_local()) {
printf("Error: term local\n");
exit(EXIT_FAILURE);
diff --git a/helper/Makefile.am b/helper/Makefile.am
index d09d9008e..9d0036de0 100644
--- a/helper/Makefile.am
+++ b/helper/Makefile.am
@@ -29,7 +29,9 @@ noinst_HEADERS = \
$(srcdir)/odph_debug.h \
$(srcdir)/odph_hashtable.h \
$(srcdir)/odph_lineartable.h \
- $(srcdir)/odph_list_internal.h
+ $(srcdir)/odph_cuckootable.h \
+ $(srcdir)/odph_list_internal.h \
+ $(srcdir)/odph_iplookuptable.h
__LIB__libodphelper_linux_la_SOURCES = \
eth.c \
@@ -37,6 +39,8 @@ __LIB__libodphelper_linux_la_SOURCES = \
chksum.c \
linux.c \
hashtable.c \
- lineartable.c
+ lineartable.c \
+ cuckootable.c \
+ iplookuptable.c
lib_LTLIBRARIES = $(LIB)/libodphelper-linux.la
diff --git a/helper/cuckootable.c b/helper/cuckootable.c
new file mode 100644
index 000000000..b4fce6c5b
--- /dev/null
+++ b/helper/cuckootable.c
@@ -0,0 +1,757 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+
+#include "odph_cuckootable.h"
+#include "odph_debug.h"
+#include <odp_api.h>
+
+/* More efficient access to a map of single ullong */
+#define ULLONG_FOR_EACH_1(IDX, MAP) \
+ for (; MAP && (((IDX) = __builtin_ctzll(MAP)), true); \
+ MAP = (MAP & (MAP - 1)))
+
+/** @magic word, write to the first byte of the memory block
+ * to indicate this block is used by a cuckoo hash table
+ */
+#define ODPH_CUCKOO_TABLE_MAGIC_WORD 0xDFDFFDFD
+
+/** Number of items per bucket. */
+#define HASH_BUCKET_ENTRIES 4
+
+#define NULL_SIGNATURE 0
+#define KEY_ALIGNMENT 16
+
+/** Maximum size of hash table that can be created. */
+#define HASH_ENTRIES_MAX 1048576
+
+/** @internal signature struct
+ * Structure storing both primary and secondary hashes
+ */
+struct cuckoo_table_signatures {
+ union {
+ struct {
+ uint32_t current;
+ uint32_t alt;
+ };
+ uint64_t sig;
+ };
+};
+
+/** @internal kay-value struct
+ * Structure that stores key-value pair
+ */
+struct cuckoo_table_key_value {
+ uint8_t *key;
+ uint8_t *value;
+};
+
+/** @internal bucket structure
+ * Put the elements with defferent keys but a same signature
+ * into a bucket, and each bucket has at most HASH_BUCKET_ENTRIES
+ * elements.
+ */
+struct cuckoo_table_bucket {
+ struct cuckoo_table_signatures signatures[HASH_BUCKET_ENTRIES];
+ /* Includes dummy key index that always contains index 0 */
+ odp_buffer_t key_buf[HASH_BUCKET_ENTRIES + 1];
+ uint8_t flag[HASH_BUCKET_ENTRIES];
+} ODP_ALIGNED_CACHE;
+
+/* More efficient access to a map of single ullong */
+#define ULLONG_FOR_EACH_1(IDX, MAP) \
+ for (; MAP && (((IDX) = __builtin_ctzll(MAP)), true); \
+ MAP = (MAP & (MAP - 1)))
+
+/** A hash table structure. */
+typedef struct {
+ /**< for check */
+ uint32_t magicword;
+ /**< Name of the hash. */
+ char name[ODPH_TABLE_NAME_LEN];
+ /**< Total table entries. */
+ uint32_t entries;
+ /**< Number of buckets in table. */
+ uint32_t num_buckets;
+ /**< Length of hash key. */
+ uint32_t key_len;
+ /**< Length of value. */
+ uint32_t value_len;
+ /**< Bitmask for getting bucket index from hash signature. */
+ uint32_t bucket_bitmask;
+ /**< Queue that stores all free key-value slots*/
+ odp_queue_t free_slots;
+ /** Table with buckets storing all the hash values and key indexes
+ to the key table*/
+ struct cuckoo_table_bucket *buckets;
+} odph_cuckoo_table_impl ODP_ALIGNED_CACHE;
+
+/**
+ * Aligns input parameter to the next power of 2
+ *
+ * @param x
+ * The integer value to algin
+ *
+ * @return
+ * Input parameter aligned to the next power of 2
+ */
+static inline uint32_t
+align32pow2(uint32_t x)
+{
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+
+ return x + 1;
+}
+
+/**
+ * Returns true if n is a power of 2
+ * @param n
+ * Number to check
+ * @return 1 if true, 0 otherwise
+ */
+static inline int
+is_power_of_2(uint32_t n)
+{
+ return n && !(n & (n - 1));
+}
+
+odph_table_t
+odph_cuckoo_table_lookup(const char *name)
+{
+ odph_cuckoo_table_impl *tbl;
+
+ if (name == NULL || strlen(name) >= ODPH_TABLE_NAME_LEN)
+ return NULL;
+
+ tbl = (odph_cuckoo_table_impl *)odp_shm_addr(odp_shm_lookup(name));
+ if (!tbl || tbl->magicword != ODPH_CUCKOO_TABLE_MAGIC_WORD)
+ return NULL;
+
+ if (strcmp(tbl->name, name))
+ return NULL;
+
+ return (odph_table_t)tbl;
+}
+
+odph_table_t
+odph_cuckoo_table_create(
+ const char *name, uint32_t capacity, uint32_t key_size,
+ uint32_t value_size)
+{
+ odph_cuckoo_table_impl *tbl;
+ odp_shm_t shm_tbl;
+
+ odp_pool_t pool;
+ odp_pool_param_t param;
+
+ odp_queue_t queue;
+ odp_queue_param_t qparam;
+
+ char pool_name[ODPH_TABLE_NAME_LEN + 3],
+ queue_name[ODPH_TABLE_NAME_LEN + 3];
+ unsigned i;
+ uint32_t impl_size, kv_entry_size,
+ bucket_num, bucket_size;
+
+ /* Check for valid parameters */
+ if (
+ (capacity > HASH_ENTRIES_MAX) ||
+ (capacity < HASH_BUCKET_ENTRIES) ||
+ (key_size == 0) ||
+ (strlen(name) == 0)) {
+ ODPH_DBG("invalid parameters\n");
+ return NULL;
+ }
+
+ /* Guarantee there's no existing */
+ tbl = (odph_cuckoo_table_impl *)odph_cuckoo_table_lookup(name);
+ if (tbl != NULL) {
+ ODPH_DBG("cuckoo hash table %s already exists\n", name);
+ return NULL;
+ }
+
+ /* Calculate the sizes of different parts of cuckoo hash table */
+ impl_size = sizeof(odph_cuckoo_table_impl);
+ kv_entry_size = sizeof(struct cuckoo_table_key_value)
+ + key_size + value_size;
+
+ bucket_num = align32pow2(capacity) / HASH_BUCKET_ENTRIES;
+ bucket_size = bucket_num * sizeof(struct cuckoo_table_bucket);
+
+ shm_tbl = odp_shm_reserve(
+ name, impl_size + bucket_size,
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+
+ if (shm_tbl == ODP_SHM_INVALID) {
+ ODPH_DBG(
+ "shm allocation failed for odph_cuckoo_table_impl %s\n",
+ name);
+ return NULL;
+ }
+
+ tbl = (odph_cuckoo_table_impl *)odp_shm_addr(shm_tbl);
+ memset(tbl, 0, impl_size + bucket_size);
+
+ /* header of this mem block is the table impl struct,
+ * then the bucket pool.
+ */
+ tbl->buckets = (struct cuckoo_table_bucket *)(
+ (char *)tbl + impl_size);
+
+ /* initialize key-value buffer pool */
+ snprintf(pool_name, sizeof(pool_name), "kv_%s", name);
+ pool = odp_pool_lookup(pool_name);
+
+ if (pool != ODP_POOL_INVALID)
+ odp_pool_destroy(pool);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = kv_entry_size;
+ param.buf.align = ODP_CACHE_LINE_SIZE;
+ param.buf.num = capacity;
+
+ pool = odp_pool_create(pool_name, &param);
+
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_DBG("failed to create key-value pool\n");
+ odp_shm_free(shm_tbl);
+ return NULL;
+ }
+
+ /* initialize free_slots queue */
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PLAIN;
+
+ snprintf(queue_name, sizeof(queue_name), "fs_%s", name);
+ queue = odp_queue_create(queue_name, &qparam);
+ if (queue == ODP_QUEUE_INVALID) {
+ ODPH_DBG("failed to create free_slots queue\n");
+ odp_pool_destroy(pool);
+ odp_shm_free(shm_tbl);
+ return NULL;
+ }
+
+ /* Setup hash context */
+ snprintf(tbl->name, sizeof(tbl->name), "%s", name);
+ tbl->magicword = ODPH_CUCKOO_TABLE_MAGIC_WORD;
+ tbl->entries = capacity;
+ tbl->key_len = key_size;
+ tbl->value_len = value_size;
+ tbl->num_buckets = bucket_num;
+ tbl->bucket_bitmask = bucket_num - 1;
+ tbl->free_slots = queue;
+
+ /* generate all free buffers, and put into queue */
+ for (i = 0; i < capacity; i++) {
+ odp_event_t ev = odp_buffer_to_event(
+ odp_buffer_alloc(pool));
+ if (ev == ODP_EVENT_INVALID) {
+ ODPH_DBG("failed to generate free slots\n");
+ odph_cuckoo_table_destroy((odph_table_t)tbl);
+ return NULL;
+ }
+
+ if (odp_queue_enq(queue, ev) < 0) {
+ ODPH_DBG("failed to enqueue free slots\n");
+ odph_cuckoo_table_destroy((odph_table_t)tbl);
+ return NULL;
+ }
+ }
+
+ return (odph_table_t)tbl;
+}
+
+int
+odph_cuckoo_table_destroy(odph_table_t tbl)
+{
+ int ret, i, j;
+ odph_cuckoo_table_impl *impl = NULL;
+ char pool_name[ODPH_TABLE_NAME_LEN + 3];
+ odp_event_t ev;
+ odp_shm_t shm;
+ odp_pool_t pool;
+
+ if (tbl == NULL)
+ return -1;
+
+ impl = (odph_cuckoo_table_impl *)tbl;
+
+ /* check magic word */
+ if (impl->magicword != ODPH_CUCKOO_TABLE_MAGIC_WORD) {
+ ODPH_DBG("wrong magicword for cuckoo table\n");
+ return -1;
+ }
+
+ /* free all used buffers*/
+ for (i = 0; i < impl->num_buckets; i++) {
+ for (j = 0; j < HASH_BUCKET_ENTRIES; j++) {
+ if (impl->buckets[i].signatures[j].current
+ != NULL_SIGNATURE)
+ odp_buffer_free(impl->buckets[i].key_buf[j]);
+ }
+ }
+
+ /* free all free buffers */
+ while ((ev = odp_queue_deq(impl->free_slots))
+ != ODP_EVENT_INVALID) {
+ odp_buffer_free(odp_buffer_from_event(ev));
+ }
+
+ /* destroy free_slots queue */
+ ret = odp_queue_destroy(impl->free_slots);
+ if (ret < 0)
+ ODPH_DBG("failed to destroy free_slots queue\n");
+
+ /* destroy key-value pool */
+ snprintf(pool_name, sizeof(pool_name), "kv_%s", impl->name);
+ pool = odp_pool_lookup(pool_name);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_DBG("invalid pool\n");
+ return -1;
+ }
+
+ ret = odp_pool_destroy(pool);
+ if (ret != 0) {
+ ODPH_DBG("failed to destroy key-value buffer pool\n");
+ return -1;
+ }
+
+ /* free impl */
+ shm = odp_shm_lookup(impl->name);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_DBG("unable look up shm\n");
+ return -1;
+ }
+
+ return odp_shm_free(shm);
+}
+
+static uint32_t hash(const odph_cuckoo_table_impl *h, const void *key)
+{
+ /* calc hash result by key */
+ return odp_hash_crc32c(key, h->key_len, 0);
+}
+
+/* Calc the secondary hash value from the primary hash value of a given key */
+static inline uint32_t
+hash_secondary(const uint32_t primary_hash)
+{
+ static const unsigned all_bits_shift = 12;
+ static const unsigned alt_bits_xor = 0x5bd1e995;
+
+ uint32_t tag = primary_hash >> all_bits_shift;
+
+ return (primary_hash ^ ((tag + 1) * alt_bits_xor));
+}
+
+/* Search for an entry that can be pushed to its alternative location */
+static inline int
+make_space_bucket(
+ const odph_cuckoo_table_impl *impl,
+ struct cuckoo_table_bucket *bkt)
+{
+ unsigned i, j;
+ int ret;
+ uint32_t next_bucket_idx;
+ struct cuckoo_table_bucket *next_bkt[HASH_BUCKET_ENTRIES];
+
+ /*
+ * Push existing item (search for bucket with space in
+ * alternative locations) to its alternative location
+ */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ /* Search for space in alternative locations */
+ next_bucket_idx = bkt->signatures[i].alt & impl->bucket_bitmask;
+ next_bkt[i] = &impl->buckets[next_bucket_idx];
+ for (j = 0; j < HASH_BUCKET_ENTRIES; j++) {
+ if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
+ break;
+ }
+
+ if (j != HASH_BUCKET_ENTRIES)
+ break;
+ }
+
+ /* Alternative location has spare room (end of recursive function) */
+ if (i != HASH_BUCKET_ENTRIES) {
+ next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
+ next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
+ next_bkt[i]->key_buf[j] = bkt->key_buf[i];
+ return i;
+ }
+
+ /* Pick entry that has not been pushed yet */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++)
+ if (bkt->flag[i] == 0)
+ break;
+
+ /* All entries have been pushed, so entry cannot be added */
+ if (i == HASH_BUCKET_ENTRIES)
+ return -ENOSPC;
+
+ /* Set flag to indicate that this entry is going to be pushed */
+ bkt->flag[i] = 1;
+ /* Need room in alternative bucket to insert the pushed entry */
+ ret = make_space_bucket(impl, next_bkt[i]);
+ /*
+ * After recursive function.
+ * Clear flags and insert the pushed entry
+ * in its alternative location if successful,
+ * or return error
+ */
+ bkt->flag[i] = 0;
+ if (ret >= 0) {
+ next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
+ next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
+ next_bkt[i]->key_buf[ret] = bkt->key_buf[i];
+ return i;
+ }
+
+ return ret;
+}
+
+static inline int32_t
+cuckoo_table_add_key_with_hash(
+ const odph_cuckoo_table_impl *h, const void *key,
+ uint32_t sig, void *data)
+{
+ uint32_t alt_hash;
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ unsigned i;
+ struct cuckoo_table_bucket *prim_bkt, *sec_bkt;
+ struct cuckoo_table_key_value *new_kv, *kv;
+
+ odp_buffer_t new_buf;
+ int ret;
+
+ prim_bucket_idx = sig & h->bucket_bitmask;
+ prim_bkt = &h->buckets[prim_bucket_idx];
+ __builtin_prefetch((const void *)(uintptr_t)prim_bkt, 0, 3);
+
+ alt_hash = hash_secondary(sig);
+ sec_bucket_idx = alt_hash & h->bucket_bitmask;
+ sec_bkt = &h->buckets[sec_bucket_idx];
+ __builtin_prefetch((const void *)(uintptr_t)sec_bkt, 0, 3);
+
+ /* Get a new slot for storing the new key */
+ new_buf = odp_buffer_from_event(odp_queue_deq(h->free_slots));
+ if (new_buf == ODP_BUFFER_INVALID)
+ return -ENOSPC;
+
+ /* Check if key is already inserted in primary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ prim_bkt->signatures[i].current == sig &&
+ prim_bkt->signatures[i].alt == alt_hash) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ prim_bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ odp_queue_enq(
+ h->free_slots,
+ odp_buffer_to_event(new_buf));
+ /* Update data */
+ if (kv->value != NULL)
+ memcpy(kv->value, data, h->value_len);
+
+ /* Return bucket index */
+ return prim_bucket_idx;
+ }
+ }
+ }
+
+ /* Check if key is already inserted in secondary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ sec_bkt->signatures[i].alt == sig &&
+ sec_bkt->signatures[i].current == alt_hash) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ sec_bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ odp_queue_enq(
+ h->free_slots,
+ odp_buffer_to_event(new_buf));
+ /* Update data */
+ if (kv->value != NULL)
+ memcpy(kv->value, data, h->value_len);
+
+ /* Return bucket index */
+ return sec_bucket_idx;
+ }
+ }
+ }
+
+ new_kv = (struct cuckoo_table_key_value *)odp_buffer_addr(new_buf);
+ __builtin_prefetch((const void *)(uintptr_t)new_kv, 0, 3);
+
+ /* Copy key and value.
+ * key-value mem block : struct cuckoo_table_key_value
+ * + key (key_len) + value (value_len)
+ */
+ new_kv->key = (uint8_t *)new_kv
+ + sizeof(struct cuckoo_table_key_value);
+ memcpy(new_kv->key, key, h->key_len);
+
+ if (h->value_len > 0) {
+ new_kv->value = new_kv->key + h->key_len;
+ memcpy(new_kv->value, data, h->value_len);
+ } else {
+ new_kv->value = NULL;
+ }
+
+ /* Insert new entry is there is room in the primary bucket */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ /* Check if slot is available */
+ if (odp_likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
+ prim_bkt->signatures[i].current = sig;
+ prim_bkt->signatures[i].alt = alt_hash;
+ prim_bkt->key_buf[i] = new_buf;
+ return prim_bucket_idx;
+ }
+ }
+
+ /* Primary bucket is full, so we need to make space for new entry */
+ ret = make_space_bucket(h, prim_bkt);
+
+ /*
+ * After recursive function.
+ * Insert the new entry in the position of the pushed entry
+ * if successful or return error and
+ * store the new slot back in the pool
+ */
+ if (ret >= 0) {
+ prim_bkt->signatures[ret].current = sig;
+ prim_bkt->signatures[ret].alt = alt_hash;
+ prim_bkt->key_buf[ret] = new_buf;
+ return prim_bucket_idx;
+ }
+
+ /* Error in addition, store new slot back in the free_slots */
+ odp_queue_enq(h->free_slots, odp_buffer_to_event(new_buf));
+ return ret;
+}
+
+int
+odph_cuckoo_table_put_value(odph_table_t tbl, void *key, void *value)
+{
+ if ((tbl == NULL) || (key == NULL))
+ return -EINVAL;
+
+ odph_cuckoo_table_impl *impl = (odph_cuckoo_table_impl *)tbl;
+ int ret = cuckoo_table_add_key_with_hash(
+ impl, key, hash(impl, key), value);
+
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int32_t
+cuckoo_table_lookup_with_hash(
+ const odph_cuckoo_table_impl *h, const void *key,
+ uint32_t sig, void **data_ptr)
+{
+ uint32_t bucket_idx;
+ uint32_t alt_hash;
+ unsigned i;
+ struct cuckoo_table_bucket *bkt;
+ struct cuckoo_table_key_value *kv;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in primary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ bkt->signatures[i].current == sig &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ if (data_ptr != NULL)
+ *data_ptr = kv->value;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bucket_idx;
+ }
+ }
+ }
+
+ /* Calculate secondary hash */
+ alt_hash = hash_secondary(sig);
+ bucket_idx = alt_hash & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in secondary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ bkt->signatures[i].current == alt_hash &&
+ bkt->signatures[i].alt == sig) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ if (data_ptr != NULL)
+ *data_ptr = kv->value;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bucket_idx;
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+odph_cuckoo_table_get_value(
+ odph_table_t tbl, void *key, void *buffer, uint32_t buffer_size)
+{
+ if ((tbl == NULL) || (key == NULL))
+ return -EINVAL;
+
+ odph_cuckoo_table_impl *impl = (odph_cuckoo_table_impl *)tbl;
+ void *tmp = NULL;
+ int ret;
+
+ ret = cuckoo_table_lookup_with_hash(impl, key, hash(impl, key), &tmp);
+
+ if (ret < 0)
+ return -1;
+
+ if (impl->value_len > 0)
+ memcpy(buffer, tmp, impl->value_len);
+
+ return 0;
+}
+
+static inline int32_t
+cuckoo_table_del_key_with_hash(
+ const odph_cuckoo_table_impl *h,
+ const void *key, uint32_t sig)
+{
+ uint32_t bucket_idx;
+ uint32_t alt_hash;
+ unsigned i;
+ struct cuckoo_table_bucket *bkt;
+ struct cuckoo_table_key_value *kv;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in primary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ bkt->signatures[i].current == sig &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ bkt->signatures[i].sig = NULL_SIGNATURE;
+ odp_queue_enq(
+ h->free_slots,
+ odp_buffer_to_event(
+ bkt->key_buf[i]));
+ return bucket_idx;
+ }
+ }
+ }
+
+ /* Calculate secondary hash */
+ alt_hash = hash_secondary(sig);
+ bucket_idx = alt_hash & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ /* Check if key is in secondary location */
+ for (i = 0; i < HASH_BUCKET_ENTRIES; i++) {
+ if (
+ bkt->signatures[i].current == alt_hash &&
+ bkt->signatures[i].sig != NULL_SIGNATURE) {
+ kv = (struct cuckoo_table_key_value *)odp_buffer_addr(
+ bkt->key_buf[i]);
+ if (memcmp(key, kv->key, h->key_len) == 0) {
+ bkt->signatures[i].sig = NULL_SIGNATURE;
+ odp_queue_enq(
+ h->free_slots,
+ odp_buffer_to_event(
+ bkt->key_buf[i]));
+ return bucket_idx;
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+odph_cuckoo_table_remove_value(odph_table_t tbl, void *key)
+{
+ if ((tbl == NULL) || (key == NULL))
+ return -EINVAL;
+
+ odph_cuckoo_table_impl *impl = (odph_cuckoo_table_impl *)tbl;
+ int ret = cuckoo_table_del_key_with_hash(
+ impl, key, hash(impl, key));
+
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+odph_table_ops_t odph_cuckoo_table_ops = {
+ odph_cuckoo_table_create,
+ odph_cuckoo_table_lookup,
+ odph_cuckoo_table_destroy,
+ odph_cuckoo_table_put_value,
+ odph_cuckoo_table_get_value,
+ odph_cuckoo_table_remove_value
+};
diff --git a/helper/include/odp/helper/ip.h b/helper/include/odp/helper/ip.h
index 4cfc00f97..ba6e675f4 100644
--- a/helper/include/odp/helper/ip.h
+++ b/helper/include/odp/helper/ip.h
@@ -101,17 +101,18 @@ ODP_STATIC_ASSERT(sizeof(odph_ipv4hdr_t) == ODPH_IPV4HDR_LEN,
*/
static inline int odph_ipv4_csum_valid(odp_packet_t pkt)
{
+ uint32_t offset;
odp_u16be_t res = 0;
uint16_t *w;
int nleft = sizeof(odph_ipv4hdr_t);
odph_ipv4hdr_t ip;
odp_u16be_t chksum;
- if (!odp_packet_l3_offset(pkt))
+ offset = odp_packet_l3_offset(pkt);
+ if (offset == ODP_PACKET_OFFSET_INVALID)
return 0;
- odp_packet_copy_to_mem(pkt, odp_packet_l3_offset(pkt),
- sizeof(odph_ipv4hdr_t), &ip);
+ odp_packet_copy_to_mem(pkt, offset, sizeof(odph_ipv4hdr_t), &ip);
w = (uint16_t *)(void *)&ip;
chksum = ip.chksum;
@@ -137,10 +138,10 @@ static inline odp_u16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
odph_ipv4hdr_t *ip;
int nleft = sizeof(odph_ipv4hdr_t);
- if (!odp_packet_l3_offset(pkt))
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ if (ip == NULL)
return 0;
- ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
w = (uint16_t *)(void *)ip;
ip->chksum = odph_chksum(w, nleft);
return ip->chksum;
diff --git a/helper/include/odp/helper/tcp.h b/helper/include/odp/helper/tcp.h
index cabef9072..fd234e583 100644
--- a/helper/include/odp/helper/tcp.h
+++ b/helper/include/odp/helper/tcp.h
@@ -34,7 +34,7 @@ typedef struct ODP_PACKED {
odp_u32be_t ack_no; /**< Acknowledgment number */
union {
odp_u16be_t doffset_flags;
-#if defined(ODP_BIG_ENDIAN_BITFIELD)
+#if ODP_BIG_ENDIAN_BITFIELD
struct {
odp_u16be_t rsvd1:8;
odp_u16be_t flags:8; /**< TCP flags as a byte */
@@ -51,7 +51,7 @@ typedef struct ODP_PACKED {
odp_u16be_t syn:1;
odp_u16be_t fin:1;
};
-#elif defined(ODP_LITTLE_ENDIAN_BITFIELD)
+#elif ODP_LITTLE_ENDIAN_BITFIELD
struct {
odp_u16be_t flags:8;
odp_u16be_t rsvd1:8; /**< TCP flags as a byte */
diff --git a/helper/iplookuptable.c b/helper/iplookuptable.c
new file mode 100644
index 000000000..5f80743b3
--- /dev/null
+++ b/helper/iplookuptable.c
@@ -0,0 +1,937 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+
+#include "odph_iplookuptable.h"
+#include "odph_list_internal.h"
+#include "odph_debug.h"
+#include <odp_api.h>
+
+/** @magic word, write to the first byte of the memory block
+ * to indicate this block is used by a ip lookup table
+ */
+#define ODPH_IP_LOOKUP_TABLE_MAGIC_WORD 0xCFCFFCFC
+
+/* The length(bit) of the IPv4 address */
+#define IP_LENGTH 32
+
+/* The number of L1 entries */
+#define ENTRY_NUM_L1 (1 << 16)
+/* The size of one L2\L3 subtree */
+#define ENTRY_NUM_SUBTREE (1 << 8)
+
+#define WHICH_CHILD(ip, cidr) ((ip >> (IP_LENGTH - cidr)) & 0x00000001)
+
+/** @internal entry struct
+ * Structure store an entry of the ip prefix table.
+ * Because of the leaf pushing, each entry of the table must have
+ * either a child entry, or a nexthop info.
+ * If child == 0 and index != ODP_BUFFER_INVALID, this entry has
+ * a nexthop info, index indicates the buffer that stores the
+ * nexthop value, and ptr points to the address of the buffer.
+ * If child == 1, this entry has a subtree, index indicates
+ * the buffer that stores the subtree, and ptr points to the
+ * address of the buffer.
+ */
+typedef struct {
+ union {
+ uint8_t u8;
+ struct {
+#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
+ uint8_t child : 1;
+ uint8_t cidr : 7;
+#else
+ uint8_t cidr : 7;
+ uint8_t child : 1;
+#endif
+ };
+ };
+ union {
+ odp_buffer_t nexthop;
+ void *ptr;
+ };
+} prefix_entry_t;
+
+#define ENTRY_SIZE (sizeof(prefix_entry_t) + sizeof(odp_buffer_t))
+#define ENTRY_BUFF_ARR(x) ((odp_buffer_t *)((char *)x \
+ + sizeof(prefix_entry_t) * ENTRY_NUM_SUBTREE))
+
+/** @internal trie node struct
+ * In this IP lookup algorithm, we use a
+ * binary tire to detect the overlap prefix.
+ */
+typedef struct trie_node {
+ /* tree structure */
+ struct trie_node *parent;
+ struct trie_node *left;
+ struct trie_node *right;
+ /* IP prefix length */
+ uint8_t cidr;
+ /* Nexthop buffer index */
+ odp_buffer_t nexthop;
+ /* Buffer that stores this node */
+ odp_buffer_t buffer;
+} trie_node_t;
+
+/** Number of L2\L3 entries(subtrees) per cache cube. */
+#define CACHE_NUM_SUBTREE (1 << 13)
+/** Number of trie nodes per cache cube. */
+#define CACHE_NUM_TRIE (1 << 20)
+
+/** @typedef cache_type_t
+ * Cache node type
+ */
+typedef enum {
+ CACHE_TYPE_SUBTREE = 0,
+ CACHE_TYPE_TRIE
+} cache_type_t;
+
+/** A IP lookup table structure. */
+typedef struct {
+ /**< for check */
+ uint32_t magicword;
+ /** Name of the hash. */
+ char name[ODPH_TABLE_NAME_LEN];
+ /** Total L1 entries. */
+ prefix_entry_t *l1e;
+ /** Root node of the binary trie */
+ trie_node_t *trie;
+ /** Length of value. */
+ uint32_t nexthop_len;
+ /** Queues of free slots (caches)
+ * There are two queues:
+ * - free_slots[CACHE_TYPE_SUBTREE] is used for L2 and
+ * L3 entries (subtrees). Each entry stores an 8-bit
+ * subtree.
+ * - free_slots[CACHE_TYPE_TRIE] is used for the binary
+ * trie. Each entry contains a trie node.
+ */
+ odp_queue_t free_slots[2];
+ /** The number of pool used by each queue. */
+ uint32_t cache_count[2];
+} odph_iplookup_table_impl ODP_ALIGNED_CACHE;
+
+/***********************************************************
+ ***************** Cache management ********************
+ ***********************************************************/
+
+/** Destroy all caches */
+static void
+cache_destroy(odph_iplookup_table_impl *impl)
+{
+ odp_queue_t queue;
+ odp_event_t ev;
+ uint32_t i = 0, count = 0;
+ char pool_name[ODPH_TABLE_NAME_LEN + 8];
+
+ /* free all buffers in the queue */
+ for (; i < 2; i++) {
+ queue = impl->free_slots[i];
+ if (queue == ODP_QUEUE_INVALID)
+ continue;
+
+ while ((ev = odp_queue_deq(queue))
+ != ODP_EVENT_INVALID) {
+ odp_buffer_free(odp_buffer_from_event(ev));
+ }
+ odp_queue_destroy(queue);
+ }
+
+ /* destroy all cache pools */
+ for (i = 0; i < 2; i++) {
+ for (count = 0; count < impl->cache_count[i]; count++) {
+ sprintf(
+ pool_name, "%s_%d_%d",
+ impl->name, i, count);
+ odp_pool_destroy(odp_pool_lookup(pool_name));
+ }
+ }
+}
+
+/** According to the type of cahce, set the value of
+ * a buffer to the initial value.
+ */
+static void
+cache_init_buffer(odp_buffer_t buffer, cache_type_t type, uint32_t size)
+{
+ int i = 0;
+ void *addr = odp_buffer_addr(buffer);
+
+ memset(addr, 0, size);
+ if (type == CACHE_TYPE_SUBTREE) {
+ prefix_entry_t *entry = (prefix_entry_t *)addr;
+
+ for (i = 0; i < ENTRY_NUM_SUBTREE; i++, entry++)
+ entry->nexthop = ODP_BUFFER_INVALID;
+ } else if (type == CACHE_TYPE_TRIE) {
+ trie_node_t *node = (trie_node_t *)addr;
+
+ node->buffer = buffer;
+ node->nexthop = ODP_BUFFER_INVALID;
+ }
+}
+
+/** Create a new buffer pool, and insert its buffer into the queue. */
+static int
+cache_alloc_new_pool(
+ odph_iplookup_table_impl *tbl, cache_type_t type)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_queue_t queue = tbl->free_slots[type];
+
+ odp_buffer_t buffer;
+ char pool_name[ODPH_TABLE_NAME_LEN + 8];
+ uint32_t size = 0, num = 0;
+
+ /* Create new pool (new free buffers). */
+ param.type = ODP_POOL_BUFFER;
+ param.buf.align = ODP_CACHE_LINE_SIZE;
+ if (type == CACHE_TYPE_SUBTREE) {
+ num = CACHE_NUM_SUBTREE;
+ size = ENTRY_SIZE * ENTRY_NUM_SUBTREE;
+ } else if (type == CACHE_TYPE_TRIE) {
+ num = CACHE_NUM_TRIE;
+ size = sizeof(trie_node_t);
+ } else {
+ ODPH_DBG("wrong cache_type_t.\n");
+ return -1;
+ }
+ param.buf.size = size;
+ param.buf.num = num;
+
+ sprintf(
+ pool_name, "%s_%d_%d",
+ tbl->name, type, tbl->cache_count[type]);
+ pool = odp_pool_create(pool_name, &param);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_DBG("failed to create a new pool.\n");
+ return -1;
+ }
+
+ /* insert new free buffers into queue */
+ while ((buffer = odp_buffer_alloc(pool))
+ != ODP_BUFFER_INVALID) {
+ cache_init_buffer(buffer, type, size);
+ odp_queue_enq(queue, odp_buffer_to_event(buffer));
+ }
+
+ tbl->cache_count[type]++;
+ return 0;
+}
+
+/** Get a new buffer from a cache list. If there is no
+ * available buffer, allocate a new pool.
+ */
+static odp_buffer_t
+cache_get_buffer(odph_iplookup_table_impl *tbl, cache_type_t type)
+{
+ odp_buffer_t buffer = ODP_BUFFER_INVALID;
+ odp_queue_t queue = tbl->free_slots[type];
+
+ /* get free buffer from queue */
+ buffer = odp_buffer_from_event(
+ odp_queue_deq(queue));
+
+ /* If there is no free buffer available, allocate new pool */
+ if (buffer == ODP_BUFFER_INVALID) {
+ cache_alloc_new_pool(tbl, type);
+ buffer = odp_buffer_from_event(odp_queue_deq(queue));
+ }
+
+ return buffer;
+}
+
+/***********************************************************
+ ****************** Binary trie ********************
+ ***********************************************************/
+
+/* Initialize the root node of the trie */
+static int
+trie_init(odph_iplookup_table_impl *tbl)
+{
+ trie_node_t *root = NULL;
+ odp_buffer_t buffer = cache_get_buffer(tbl, CACHE_TYPE_TRIE);
+
+ if (buffer != ODP_BUFFER_INVALID) {
+ root = (trie_node_t *)odp_buffer_addr(buffer);
+ root->cidr = 0;
+ tbl->trie = root;
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Destroy the whole trie (recursively) */
+static void
+trie_destroy(odph_iplookup_table_impl *tbl, trie_node_t *trie)
+{
+ if (trie->left != NULL)
+ trie_destroy(tbl, trie->left);
+ if (trie->right != NULL)
+ trie_destroy(tbl, trie->right);
+
+ /* destroy this node */
+ odp_queue_enq(
+ tbl->free_slots[CACHE_TYPE_TRIE],
+ odp_buffer_to_event(trie->buffer));
+}
+
+/* Insert a new prefix node into the trie
+ * If the node is already existed, update its nexthop info,
+ * Return 0 and set nexthop pointer to INVALID.
+ * If the node is not exitsed, create this target node and
+ * all nodes along the path from root to the target node.
+ * Then return 0 and set nexthop pointer points to the
+ * new buffer.
+ * Return -1 for error.
+ */
+static int
+trie_insert_node(
+ odph_iplookup_table_impl *tbl, trie_node_t *root,
+ uint32_t ip, uint8_t cidr, odp_buffer_t nexthop)
+{
+ uint8_t level = 0, child;
+ odp_buffer_t buf;
+ trie_node_t *node = root, *prev = root;
+
+ /* create/update all nodes along the path
+ * from root to the new node. */
+ for (level = 1; level <= cidr; level++) {
+ child = WHICH_CHILD(ip, level);
+
+ node = child == 0 ? prev->left : prev->right;
+ /* If the child node doesn't exit, create it. */
+ if (node == NULL) {
+ buf = cache_get_buffer(tbl, CACHE_TYPE_TRIE);
+ if (buf == ODP_BUFFER_INVALID)
+ return -1;
+
+ node = (trie_node_t *)odp_buffer_addr(buf);
+ node->cidr = level;
+ node->parent = prev;
+
+ if (child == 0)
+ prev->left = node;
+ else
+ prev->right = node;
+ }
+ prev = node;
+ }
+
+ /* The final one is the target. */
+ node->nexthop = nexthop;
+ return 0;
+}
+
+/* Delete a node */
+static int
+trie_delete_node(
+ odph_iplookup_table_impl *tbl,
+ trie_node_t *root, uint32_t ip, uint8_t cidr)
+{
+ if (root == NULL)
+ return -1;
+
+ /* The default prefix (root node) cannot be deleted. */
+ if (cidr == 0)
+ return -1;
+
+ trie_node_t *node = root, *prev = NULL;
+ uint8_t level = 1, child = 0;
+ odp_buffer_t tmp;
+
+ /* Find the target node. */
+ for (level = 1; level <= cidr; level++) {
+ child = WHICH_CHILD(ip, level);
+ node = (child == 0) ? node->left : node->right;
+ if (node == NULL) {
+ ODPH_DBG("Trie node is not existed\n");
+ return -1;
+ }
+ }
+
+ node->nexthop = ODP_BUFFER_INVALID;
+
+ /* Delete all redundant nodes along the path. */
+ for (level = cidr; level > 0; level--) {
+ if (
+ node->left != NULL || node->right != NULL ||
+ node->nexthop != ODP_BUFFER_INVALID)
+ break;
+
+ child = WHICH_CHILD(ip, level);
+ prev = node->parent;
+
+ /* free trie node */
+ tmp = node->buffer;
+ cache_init_buffer(
+ tmp, CACHE_TYPE_TRIE, sizeof(trie_node_t));
+ odp_queue_enq(
+ tbl->free_slots[CACHE_TYPE_TRIE],
+ odp_buffer_to_event(tmp));
+
+ if (child == 0)
+ prev->left = NULL;
+ else
+ prev->right = NULL;
+ node = prev;
+ }
+ return 0;
+}
+
+/* Detect the longest overlapping prefix. */
+static int
+trie_detect_overlap(
+ trie_node_t *trie, uint32_t ip, uint8_t cidr,
+ uint8_t leaf_push, uint8_t *over_cidr,
+ odp_buffer_t *over_nexthop)
+{
+ uint8_t child = 0;
+ uint32_t level, limit = cidr > leaf_push ? leaf_push + 1 : cidr;
+ trie_node_t *node = trie, *longest = trie;
+
+ for (level = 1; level < limit; level++) {
+ child = WHICH_CHILD(ip, level);
+ node = (child == 0) ? node->left : node->right;
+ if (node->nexthop != ODP_BUFFER_INVALID)
+ longest = node;
+ }
+
+ *over_cidr = longest->cidr;
+ *over_nexthop = longest->nexthop;
+ return 0;
+}
+
+/***********************************************************
+ *************** IP prefix lookup table ****************
+ ***********************************************************/
+
+odph_table_t
+odph_iplookup_table_lookup(const char *name)
+{
+ odph_iplookup_table_impl *tbl = NULL;
+
+ if (name == NULL || strlen(name) >= ODPH_TABLE_NAME_LEN)
+ return NULL;
+
+ tbl = (odph_iplookup_table_impl *)odp_shm_addr(odp_shm_lookup(name));
+
+ if (
+ tbl != NULL &&
+ tbl->magicword == ODPH_IP_LOOKUP_TABLE_MAGIC_WORD &&
+ strcmp(tbl->name, name) == 0)
+ return (odph_table_t)tbl;
+
+ return NULL;
+}
+
+odph_table_t
+odph_iplookup_table_create(
+ const char *name, uint32_t ODP_IGNORED_1,
+ uint32_t ODP_IGNORED_2, uint32_t value_size)
+{
+ odph_iplookup_table_impl *tbl;
+ odp_shm_t shm_tbl;
+ odp_queue_t queue;
+ odp_queue_param_t qparam;
+
+ unsigned i;
+ uint32_t impl_size, l1_size;
+ char queue_name[ODPH_TABLE_NAME_LEN + 2];
+
+ /* Check for valid parameters */
+ if (strlen(name) == 0) {
+ ODPH_DBG("invalid parameters\n");
+ return NULL;
+ }
+
+ /* Guarantee there's no existing */
+ tbl = (odph_iplookup_table_impl *)odph_iplookup_table_lookup(name);
+ if (tbl != NULL) {
+ ODPH_DBG("IP prefix table %s already exists\n", name);
+ return NULL;
+ }
+
+ /* Calculate the sizes of different parts of IP prefix table */
+ impl_size = sizeof(odph_iplookup_table_impl);
+ l1_size = ENTRY_SIZE * ENTRY_NUM_L1;
+
+ shm_tbl = odp_shm_reserve(
+ name, impl_size + l1_size,
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+
+ if (shm_tbl == ODP_SHM_INVALID) {
+ ODPH_DBG(
+ "shm allocation failed for odph_iplookup_table_impl %s\n",
+ name);
+ return NULL;
+ }
+
+ tbl = (odph_iplookup_table_impl *)odp_shm_addr(shm_tbl);
+ memset(tbl, 0, impl_size + l1_size);
+
+ /* header of this mem block is the table impl struct,
+ * then the l1 entries array.
+ */
+ tbl->l1e = (prefix_entry_t *)((char *)tbl + impl_size);
+ for (i = 0; i < ENTRY_NUM_L1; i++)
+ tbl->l1e[i].nexthop = ODP_BUFFER_INVALID;
+
+ /* Setup table context. */
+ snprintf(tbl->name, sizeof(tbl->name), "%s", name);
+ tbl->magicword = ODPH_IP_LOOKUP_TABLE_MAGIC_WORD;
+ tbl->nexthop_len = value_size;
+
+ /* Initialize cache */
+ for (i = 0; i < 2; i++) {
+ tbl->cache_count[i] = 0;
+
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PLAIN;
+ sprintf(queue_name, "%s_%d", name, i);
+ queue = odp_queue_create(queue_name, &qparam);
+ if (queue == ODP_QUEUE_INVALID) {
+ ODPH_DBG("failed to create queue");
+ cache_destroy(tbl);
+ return NULL;
+ }
+ tbl->free_slots[i] = queue;
+ cache_alloc_new_pool(tbl, i);
+ }
+
+ /* Initialize tire */
+ if (trie_init(tbl) < 0) {
+ odp_shm_free(shm_tbl);
+ return NULL;
+ }
+
+ return (odph_table_t)tbl;
+}
+
+int
+odph_iplookup_table_destroy(odph_table_t tbl)
+{
+ int i, j;
+ odph_iplookup_table_impl *impl = NULL;
+ prefix_entry_t *subtree = NULL;
+ odp_buffer_t *buff1 = NULL, *buff2 = NULL;
+
+ if (tbl == NULL)
+ return -1;
+
+ impl = (odph_iplookup_table_impl *)tbl;
+
+ /* check magic word */
+ if (impl->magicword != ODPH_IP_LOOKUP_TABLE_MAGIC_WORD) {
+ ODPH_DBG("wrong magicword for IP prefix table\n");
+ return -1;
+ }
+
+ /* destroy trie */
+ trie_destroy(impl, impl->trie);
+
+ /* free all L2 and L3 entries */
+ buff1 = ENTRY_BUFF_ARR(impl->l1e);
+ for (i = 0; i < ENTRY_NUM_L1; i++) {
+ if ((impl->l1e[i]).child == 0)
+ continue;
+
+ subtree = (prefix_entry_t *)impl->l1e[i].ptr;
+ buff2 = ENTRY_BUFF_ARR(subtree);
+ /* destroy all l3 subtrees of this l2 subtree */
+ for (j = 0; j < ENTRY_NUM_SUBTREE; j++) {
+ if (subtree[j].child == 0)
+ continue;
+ odp_queue_enq(
+ impl->free_slots[CACHE_TYPE_TRIE],
+ odp_buffer_to_event(buff2[j]));
+ }
+ /* destroy this l2 subtree */
+ odp_queue_enq(
+ impl->free_slots[CACHE_TYPE_TRIE],
+ odp_buffer_to_event(buff1[i]));
+ }
+
+ /* destroy all cache */
+ cache_destroy(impl);
+
+ /* free impl */
+ odp_shm_free(odp_shm_lookup(impl->name));
+ return 0;
+}
+
+/* Insert the prefix into level x
+ * Return:
+ * -1 error
+ * 0 the table is unmodified
+ * 1 the table is modified
+ */
+static int
+prefix_insert_into_lx(
+ odph_iplookup_table_impl *tbl, prefix_entry_t *entry,
+ uint8_t cidr, odp_buffer_t nexthop, uint8_t level)
+{
+ uint8_t ret = 0;
+ uint32_t i = 0, limit = (1 << (level - cidr));
+ prefix_entry_t *e = entry, *ne = NULL;
+
+ for (i = 0; i < limit; i++, e++) {
+ if (e->child == 1) {
+ if (e->cidr > cidr)
+ continue;
+
+ e->cidr = cidr;
+ /* push to next level */
+ ne = (prefix_entry_t *)e->ptr;
+ ret = prefix_insert_into_lx(
+ tbl, ne, cidr, nexthop, cidr + 8);
+ } else {
+ if (e->cidr > cidr)
+ continue;
+
+ e->child = 0;
+ e->cidr = cidr;
+ e->nexthop = nexthop;
+ ret = 1;
+ }
+ }
+ return ret;
+}
+
+static int
+prefix_insert_iter(
+ odph_iplookup_table_impl *tbl, prefix_entry_t *entry,
+ odp_buffer_t *buff, uint32_t ip, uint8_t cidr,
+ odp_buffer_t nexthop, uint8_t level, uint8_t depth)
+{
+ uint8_t state = 0;
+ prefix_entry_t *ne = NULL;
+ odp_buffer_t *nbuff = NULL;
+
+ /* If child subtree is existed, get it. */
+ if (entry->child) {
+ ne = (prefix_entry_t *)entry->ptr;
+ nbuff = ENTRY_BUFF_ARR(ne);
+ } else {
+ /* If the child is not existed, create a new subtree. */
+ odp_buffer_t buf, push = entry->nexthop;
+
+ buf = cache_get_buffer(tbl, CACHE_TYPE_SUBTREE);
+ if (buf == ODP_BUFFER_INVALID) {
+ ODPH_DBG("failed to get subtree buffer from cache.\n");
+ return -1;
+ }
+ ne = (prefix_entry_t *)odp_buffer_addr(buf);
+ nbuff = ENTRY_BUFF_ARR(ne);
+
+ entry->child = 1;
+ entry->ptr = ne;
+ *buff = buf;
+
+ /* If this entry contains a nexthop and a small cidr,
+ * push it to the next level.
+ */
+ if (entry->cidr > 0) {
+ state = prefix_insert_into_lx(
+ tbl, ne, entry->cidr,
+ push, entry->cidr + 8);
+ }
+ }
+
+ ne += (ip >> 24);
+ nbuff += (ip >> 24);
+ if (cidr <= 8) {
+ state = prefix_insert_into_lx(
+ tbl, ne, cidr + depth * 8, nexthop, level);
+ } else {
+ state = prefix_insert_iter(
+ tbl, ne, nbuff, ip << 8, cidr - 8,
+ nexthop, level + 8, depth + 1);
+ }
+
+ return state;
+}
+
+int
+odph_iplookup_table_put_value(odph_table_t tbl, void *key, void *value)
+{
+ if ((tbl == NULL) || (key == NULL) || (value == NULL))
+ return -1;
+
+ odph_iplookup_table_impl *impl = (odph_iplookup_table_impl *)tbl;
+ odph_iplookup_prefix_t *prefix = (odph_iplookup_prefix_t *)key;
+ prefix_entry_t *l1e = NULL;
+ odp_buffer_t nexthop = *((odp_buffer_t *)value);
+ int ret = 0;
+
+ if (prefix->cidr == 0)
+ return -1;
+ prefix->ip = prefix->ip & (0xffffffff << (IP_LENGTH - prefix->cidr));
+
+ /* insert into trie */
+ ret = trie_insert_node(
+ impl, impl->trie,
+ prefix->ip, prefix->cidr, nexthop);
+
+ if (ret < 0) {
+ ODPH_DBG("failed to insert into trie\n");
+ return -1;
+ }
+
+ /* get L1 entry */
+ l1e = &impl->l1e[prefix->ip >> 16];
+ odp_buffer_t *buff = ENTRY_BUFF_ARR(impl->l1e) + (prefix->ip >> 16);
+
+ if (prefix->cidr <= 16) {
+ ret = prefix_insert_into_lx(
+ impl, l1e, prefix->cidr, nexthop, 16);
+ } else {
+ ret = prefix_insert_iter(
+ impl, l1e, buff,
+ ((prefix->ip) << 16), prefix->cidr - 16,
+ nexthop, 24, 2);
+ }
+
+ return ret;
+}
+
+int
+odph_iplookup_table_get_value(
+ odph_table_t tbl, void *key, void *buffer, uint32_t buffer_size)
+{
+ if ((tbl == NULL) || (key == NULL) || (buffer == NULL))
+ return -EINVAL;
+
+ odph_iplookup_table_impl *impl = (odph_iplookup_table_impl *)tbl;
+ uint32_t ip = *((uint32_t *)key);
+ prefix_entry_t *entry = &impl->l1e[ip >> 16];
+ odp_buffer_t *buff = (odp_buffer_t *)buffer;
+
+ if (entry == NULL) {
+ ODPH_DBG("failed to get L1 entry.\n");
+ return -1;
+ }
+
+ ip <<= 16;
+ while (entry->child) {
+ entry = (prefix_entry_t *)entry->ptr;
+ entry += ip >> 24;
+ ip <<= 8;
+ }
+
+ /* copy data */
+ if (entry->nexthop == ODP_BUFFER_INVALID) {
+ /* ONLY match the default prefix */
+ printf("only match the default prefix\n");
+ *buff = ODP_BUFFER_INVALID;
+ } else {
+ *buff = entry->nexthop;
+ }
+
+ return 0;
+}
+
+static int
+prefix_delete_lx(
+ odph_iplookup_table_impl *tbl, prefix_entry_t *l1e,
+ odp_buffer_t *buff, uint8_t cidr, uint8_t over_cidr,
+ odp_buffer_t over_nexthop, uint8_t level)
+{
+ uint8_t ret, flag = 1;
+ prefix_entry_t *e = l1e;
+ odp_buffer_t *b = buff;
+ uint32_t i = 0, limit = 1 << (level - cidr);
+
+ for (i = 0; i < limit; i++, e++, b++) {
+ if (e->child == 1) {
+ if (e->cidr > cidr) {
+ flag = 0;
+ continue;
+ }
+
+ prefix_entry_t *ne = (prefix_entry_t *)e->ptr;
+ odp_buffer_t *nbuff = ENTRY_BUFF_ARR(ne);
+
+ e->cidr = over_cidr;
+ ret = prefix_delete_lx(
+ tbl, ne, nbuff, cidr, over_cidr,
+ over_nexthop, cidr + 8);
+
+ /* If ret == 1, the next 2^8 entries equal to
+ * (over_cidr, over_nexthop). In this case, we
+ * should not push the (over_cidr, over_nexthop)
+ * to the next level. In fact, we should recycle
+ * the next 2^8 entries.
+ */
+ if (ret) {
+ /* destroy subtree */
+ cache_init_buffer(
+ *b, CACHE_TYPE_SUBTREE,
+ ENTRY_SIZE * ENTRY_NUM_SUBTREE);
+ odp_queue_enq(
+ tbl->free_slots[CACHE_TYPE_SUBTREE],
+ odp_buffer_to_event(*b));
+ e->child = 0;
+ e->nexthop = over_nexthop;
+ } else {
+ flag = 0;
+ }
+ } else {
+ if (e->cidr > cidr) {
+ flag = 0;
+ continue;
+ } else {
+ e->cidr = over_cidr;
+ e->nexthop = over_nexthop;
+ }
+ }
+ }
+ return flag;
+}
+
+/* Check if the entry can be recycled.
+ * An entry can be recycled duo to two reasons:
+ * - all children of the entry are the same,
+ * - all children of the entry have a cidr smaller than the level
+ * bottom bound.
+ */
+static uint8_t
+can_recycle(prefix_entry_t *e, uint32_t level)
+{
+ uint8_t recycle = 1;
+ int i = 1;
+ prefix_entry_t *ne = (prefix_entry_t *)e->ptr;
+
+ if (ne->child)
+ return 0;
+
+ uint8_t cidr = ne->cidr;
+ odp_buffer_t index = ne->nexthop;
+
+ if (cidr > level)
+ return 0;
+
+ ne++;
+ for (; i < 256; i++, ne++) {
+ if (
+ ne->child != 0 || ne->cidr != cidr ||
+ ne->nexthop != index) {
+ recycle = 0;
+ break;
+ }
+ }
+ return recycle;
+}
+
+static uint8_t
+prefix_delete_iter(
+ odph_iplookup_table_impl *tbl, prefix_entry_t *e,
+ odp_buffer_t *buff, uint32_t ip, uint8_t cidr,
+ uint8_t level, uint8_t depth)
+{
+ uint8_t ret = 0, over_cidr;
+ odp_buffer_t over_nexthop;
+
+ trie_detect_overlap(
+ tbl->trie, ip, cidr + 8 * depth, level,
+ &over_cidr, &over_nexthop);
+ if (cidr > 8) {
+ prefix_entry_t *ne =
+ (prefix_entry_t *)e->ptr;
+ odp_buffer_t *nbuff = ENTRY_BUFF_ARR(ne);
+
+ ne += ((uint32_t)(ip << level) >> 24);
+ nbuff += ((uint32_t)(ip << level) >> 24);
+ ret = prefix_delete_iter(
+ tbl, ne, nbuff, ip, cidr - 8,
+ level + 8, depth + 1);
+
+ if (ret && can_recycle(e, level)) {
+ /* destroy subtree */
+ cache_init_buffer(
+ *buff, CACHE_TYPE_SUBTREE,
+ ENTRY_SIZE * ENTRY_NUM_SUBTREE);
+ odp_queue_enq(
+ tbl->free_slots[CACHE_TYPE_SUBTREE],
+ odp_buffer_to_event(*buff));
+ e->child = 0;
+ e->nexthop = over_nexthop;
+ e->cidr = over_cidr;
+ return 1;
+ }
+ return 0;
+ }
+
+ ret = prefix_delete_lx(
+ tbl, e, buff, cidr + 8 * depth,
+ over_cidr, over_nexthop, level);
+ return ret;
+}
+
+int
+odph_iplookup_table_remove_value(odph_table_t tbl, void *key)
+{
+ if ((tbl == NULL) || (key == NULL))
+ return -EINVAL;
+
+ odph_iplookup_table_impl *impl = (odph_iplookup_table_impl *)tbl;
+ odph_iplookup_prefix_t *prefix = (odph_iplookup_prefix_t *)key;
+ uint32_t ip = prefix->ip;
+ uint8_t cidr = prefix->cidr;
+
+ if (prefix->cidr < 0)
+ return -EINVAL;
+
+ prefix_entry_t *entry = &impl->l1e[ip >> 16];
+ odp_buffer_t *buff = ENTRY_BUFF_ARR(impl->l1e) + (ip >> 16);
+ uint8_t over_cidr, ret;
+ odp_buffer_t over_nexthop;
+
+ trie_detect_overlap(
+ impl->trie, ip, cidr, 16, &over_cidr, &over_nexthop);
+
+ if (cidr <= 16) {
+ prefix_delete_lx(
+ impl, entry, buff, cidr, over_cidr, over_nexthop, 16);
+ } else {
+ prefix_entry_t *ne = (prefix_entry_t *)entry->ptr;
+ odp_buffer_t *nbuff = ENTRY_BUFF_ARR(ne);
+
+ ne += ((uint32_t)(ip << 16) >> 24);
+ nbuff += ((uint32_t)(ip << 16) >> 24);
+ ret = prefix_delete_iter(impl, ne, nbuff, ip, cidr - 16, 24, 2);
+
+ if (ret && can_recycle(entry, 16)) {
+ /* destroy subtree */
+ cache_init_buffer(
+ *buff, CACHE_TYPE_SUBTREE,
+ sizeof(prefix_entry_t) * ENTRY_NUM_SUBTREE);
+ odp_queue_enq(
+ impl->free_slots[CACHE_TYPE_SUBTREE],
+ odp_buffer_to_event(*buff));
+ entry->child = 0;
+ entry->cidr = over_cidr;
+ entry->nexthop = over_nexthop;
+ }
+ }
+
+ return trie_delete_node(impl, impl->trie, ip, cidr);
+}
+
+odph_table_ops_t odph_iplookup_table_ops = {
+ odph_iplookup_table_create,
+ odph_iplookup_table_lookup,
+ odph_iplookup_table_destroy,
+ odph_iplookup_table_put_value,
+ odph_iplookup_table_get_value,
+ odph_iplookup_table_remove_value
+};
diff --git a/helper/linux.c b/helper/linux.c
index 7bd0b07ad..1f009cdf3 100644
--- a/helper/linux.c
+++ b/helper/linux.c
@@ -42,8 +42,6 @@ static void *odp_run_start_routine(void *arg)
if (ret < 0)
ODPH_ERR("Local term failed\n");
- else if (ret == 0 && odp_term_global(thr_params->instance))
- ODPH_ERR("Global term failed\n");
return ret_ptr;
}
@@ -277,8 +275,6 @@ static void *odpthread_run_start_routine(void *arg)
if (ret < 0)
ODPH_ERR("Local term failed\n");
- else if (ret == 0 && odp_term_global(thr_params->instance))
- ODPH_ERR("Global term failed\n");
/* for process implementation of odp threads, just return status... */
if (start_args->linuxtype == ODPTHREAD_PROCESS)
diff --git a/helper/odph_cuckootable.h b/helper/odph_cuckootable.h
new file mode 100644
index 000000000..d56998078
--- /dev/null
+++ b/helper/odph_cuckootable.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ODPH_CUCKOO_TABLE_H_
+#define ODPH_CUCKOO_TABLE_H_
+
+#include <odp/helper/table.h>
+
+/**
+ * @file
+ *
+ * ODP Cuckoo Hash Table
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+odph_table_t odph_cuckoo_table_create(
+ const char *name,
+ uint32_t capacity,
+ uint32_t key_size,
+ uint32_t value_size);
+
+odph_table_t odph_cuckoo_table_lookup(const char *name);
+
+int odph_cuckoo_table_destroy(odph_table_t table);
+
+int odph_cuckoo_table_put_value(
+ odph_table_t table,
+ void *key, void *value);
+
+int odph_cuckoo_table_get_value(
+ odph_table_t table,
+ void *key, void *buffer,
+ uint32_t buffer_size);
+
+int odph_cuckoo_table_remove_value(odph_table_t table, void *key);
+
+extern odph_table_ops_t odph_cuckoo_table_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODPH_CUCKOO_TABLE_H_ */
diff --git a/helper/odph_iplookuptable.h b/helper/odph_iplookuptable.h
new file mode 100644
index 000000000..0ae6b3762
--- /dev/null
+++ b/helper/odph_iplookuptable.h
@@ -0,0 +1,58 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP IP Lookup Table
+ *
+ * This is an implementation of the IP lookup table. The key of
+ * this table is IPv4 address (32 bits), and the value can be
+ * defined by user. This table uses the 16,8,8 ip lookup (longest
+ * prefix matching) algorithm.
+ */
+
+#ifndef ODPH_IPLOOKUP_TABLE_H_
+#define ODPH_IPLOOKUP_TABLE_H_
+
+#include <odp/helper/table.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ uint32_t ip;
+ uint8_t cidr;
+} odph_iplookup_prefix_t;
+
+odph_table_t odph_iplookup_table_create(
+ const char *name,
+ uint32_t ODP_IGNORED_1,
+ uint32_t ODP_IGNORED_2,
+ uint32_t value_size);
+
+odph_table_t odph_iplookup_table_lookup(const char *name);
+
+int odph_iplookup_table_destroy(odph_table_t table);
+
+int odph_iplookup_table_put_value(
+ odph_table_t table, void *key, void *value);
+
+int odph_iplookup_table_get_value(
+ odph_table_t table, void *key,
+ void *buffer, uint32_t buffer_size);
+
+int odph_iplookup_table_remove_value(
+ odph_table_t table, void *key);
+
+extern odph_table_ops_t odph_iplookup_table_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODPH_IPLOOKUP_TABLE_H_ */
diff --git a/helper/test/.gitignore b/helper/test/.gitignore
index 5ce3c3baf..e5b6a0f21 100644
--- a/helper/test/.gitignore
+++ b/helper/test/.gitignore
@@ -1,6 +1,8 @@
*.trs
*.log
chksum
+cuckootable
+iplookuptable
odpthreads
parse
process
diff --git a/helper/test/Makefile.am b/helper/test/Makefile.am
index e68dabecf..8b1aa765e 100644
--- a/helper/test/Makefile.am
+++ b/helper/test/Makefile.am
@@ -6,10 +6,12 @@ AM_LDFLAGS += -static
TESTS_ENVIRONMENT += TEST_DIR=${builddir}
EXECUTABLES = chksum$(EXEEXT) \
+ cuckootable$(EXEEXT) \
+ table$(EXEEXT) \
thread$(EXEEXT) \
parse$(EXEEXT)\
- process$(EXEEXT)\
- table$(EXEEXT)
+ process$(EXEEXT) \
+ iplookuptable$(EXEEXT)
COMPILE_ONLY = odpthreads
@@ -27,6 +29,7 @@ test_PROGRAMS = $(EXECUTABLES) $(COMPILE_ONLY)
EXTRA_DIST = odpthreads_as_processes odpthreads_as_pthreads
dist_chksum_SOURCES = chksum.c
+dist_cuckootable_SOURCES = cuckootable.c
dist_odpthreads_SOURCES = odpthreads.c
odpthreads_LDADD = $(LIB)/libodphelper-linux.la $(LIB)/libodp-dpdk.la
dist_thread_SOURCES = thread.c
@@ -35,3 +38,4 @@ dist_process_SOURCES = process.c
dist_parse_SOURCES = parse.c
process_LDADD = $(LIB)/libodphelper-linux.la $(LIB)/libodp-dpdk.la
dist_table_SOURCES = table.c
+dist_iplookuptable_SOURCES = iplookuptable.c
diff --git a/helper/test/cuckootable.c b/helper/test/cuckootable.c
new file mode 100644
index 000000000..5b4333b56
--- /dev/null
+++ b/helper/test/cuckootable.c
@@ -0,0 +1,573 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include <odp_api.h>
+#include <test_debug.h>
+#include <../odph_cuckootable.h>
+
+/*******************************************************************************
+ * Hash function performance test configuration section.
+ *
+ * The five arrays below control what tests are performed. Every combination
+ * from the array entries is tested.
+ */
+/******************************************************************************/
+
+/* 5-tuple key type */
+struct flow_key {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint8_t proto;
+} __packed;
+
+/*
+ * Print out result of unit test hash operation.
+ */
+static void print_key_info(
+ const char *msg, const struct flow_key *key)
+{
+ const uint8_t *p = (const uint8_t *)key;
+ unsigned i;
+
+ printf("%s key:0x", msg);
+ for (i = 0; i < sizeof(struct flow_key); i++)
+ printf("%02X", p[i]);
+ printf("\n");
+}
+
+static double get_time_diff(struct timeval *start, struct timeval *end)
+{
+ int sec = end->tv_sec - start->tv_sec;
+ int usec = end->tv_usec - start->tv_usec;
+
+ if (usec < 0) {
+ sec--;
+ usec += 1000000;
+ }
+ double diff = sec + (double)usec / 1000000;
+
+ return diff;
+}
+
+/** Create IPv4 address */
+#define IPv4(a, b, c, d) ((uint32_t)(((a) & 0xff) << 24) | \
+ (((b) & 0xff) << 16) | \
+ (((c) & 0xff) << 8) | \
+ ((d) & 0xff))
+
+/* Keys used by unit test functions */
+static struct flow_key keys[5] = { {
+ .ip_src = IPv4(0x03, 0x02, 0x01, 0x00),
+ .ip_dst = IPv4(0x07, 0x06, 0x05, 0x04),
+ .port_src = 0x0908,
+ .port_dst = 0x0b0a,
+ .proto = 0x0c,
+}, {
+ .ip_src = IPv4(0x13, 0x12, 0x11, 0x10),
+ .ip_dst = IPv4(0x17, 0x16, 0x15, 0x14),
+ .port_src = 0x1918,
+ .port_dst = 0x1b1a,
+ .proto = 0x1c,
+}, {
+ .ip_src = IPv4(0x23, 0x22, 0x21, 0x20),
+ .ip_dst = IPv4(0x27, 0x26, 0x25, 0x24),
+ .port_src = 0x2928,
+ .port_dst = 0x2b2a,
+ .proto = 0x2c,
+}, {
+ .ip_src = IPv4(0x33, 0x32, 0x31, 0x30),
+ .ip_dst = IPv4(0x37, 0x36, 0x35, 0x34),
+ .port_src = 0x3938,
+ .port_dst = 0x3b3a,
+ .proto = 0x3c,
+}, {
+ .ip_src = IPv4(0x43, 0x42, 0x41, 0x40),
+ .ip_dst = IPv4(0x47, 0x46, 0x45, 0x44),
+ .port_src = 0x4948,
+ .port_dst = 0x4b4a,
+ .proto = 0x4c,
+} };
+
+/*
+ * Basic sequence of operations for a single key:
+ * - put
+ * - get (hit)
+ * - remove
+ * - get (miss)
+ */
+static int test_put_remove(void)
+{
+ odph_table_t table;
+ odph_table_ops_t *ops;
+
+ ops = &odph_cuckoo_table_ops;
+
+ /* test with standard put/get/remove functions */
+ int ret;
+
+ table = ops->f_create("put_remove", 10, sizeof(struct flow_key), 0);
+ if (table == NULL) {
+ printf("cuckoo hash table creation failed\n");
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_put_value(table, &keys[0], NULL);
+ print_key_info("Add", &keys[0]);
+ if (ret < 0) {
+ printf("failed to add key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_get_value(table, &keys[0], NULL, 0);
+ print_key_info("Lkp", &keys[0]);
+ if (ret < 0) {
+ printf("failed to find key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_remove_value(table, &keys[0]);
+ print_key_info("Del", &keys[0]);
+ if (ret < 0) {
+ printf("failed to delete key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_get_value(table, &keys[0], NULL, 0);
+ print_key_info("Lkp", &keys[0]);
+ if (ret >= 0) {
+ printf("error: found key after deleting!\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ odph_cuckoo_table_destroy(table);
+ return 0;
+}
+
+/*
+ * Sequence of operations for a single key:
+ * key type : struct flow_key
+ * value type: uint8_t
+ * - remove: miss
+ * - put
+ * - get: hit
+ * - put: update
+ * - get: hit (updated data)
+ * - remove: hit
+ * - remove: miss
+ */
+static int test_put_update_remove(void)
+{
+ odph_table_t table;
+ int ret;
+ uint8_t val1 = 1, val2 = 2, val = 0;
+
+ table = odph_cuckoo_table_create(
+ "put_update_remove",
+ 10, sizeof(struct flow_key), sizeof(uint8_t));
+ if (table == NULL) {
+ printf("failed to create table\n");
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_remove_value(table, &keys[0]);
+ print_key_info("Del", &keys[0]);
+ if (ret >= 0) {
+ printf("error: found non-existent key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_put_value(table, &keys[0], &val1);
+ print_key_info("Add", &keys[0]);
+ if (ret < 0) {
+ printf("failed to add key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_get_value(
+ table, &keys[0], &val, sizeof(uint8_t));
+ print_key_info("Lkp", &keys[0]);
+ if (ret < 0) {
+ printf("failed to find key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_put_value(table, &keys[0], &val2);
+ if (ret < 0) {
+ printf("failed to re-add key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_get_value(
+ table, &keys[0], &val, sizeof(uint8_t));
+ print_key_info("Lkp", &keys[0]);
+ if (ret < 0) {
+ printf("failed to find key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_remove_value(table, &keys[0]);
+ print_key_info("Del", &keys[0]);
+ if (ret < 0) {
+ printf("failed to delete key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_cuckoo_table_remove_value(table, &keys[0]);
+ print_key_info("Del", &keys[0]);
+ if (ret >= 0) {
+ printf("error: deleted already deleted key\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ odph_cuckoo_table_destroy(table);
+ return 0;
+}
+
+/*
+ * Sequence of operations for find existing hash table
+ *
+ * - create table
+ * - find existing table: hit
+ * - find non-existing table: miss
+ *
+ */
+static int test_table_lookup(void)
+{
+ odph_table_t table, result;
+
+ /* Create cuckoo hash table. */
+ table = odph_cuckoo_table_create("table_lookup", 10, 4, 0);
+ if (table == NULL) {
+ printf("failed to create table\n");
+ return -1;
+ }
+
+ /* Try to find existing hash table */
+ result = odph_cuckoo_table_lookup("table_lookup");
+ if (result != table) {
+ printf("error: could not find existing table\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ /* Try to find non-existing hash table */
+ result = odph_cuckoo_table_lookup("non_existing");
+ if (result != NULL) {
+ printf("error: found table that shouldn't exist.\n");
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+
+ /* Cleanup. */
+ odph_cuckoo_table_destroy(table);
+ return 0;
+}
+
+/*
+ * Sequence of operations for 5 keys
+ * - put keys
+ * - get keys: hit
+ * - remove keys : hit
+ * - get keys: miss
+ */
+static int test_five_keys(void)
+{
+ odph_table_t table;
+ unsigned i;
+ int ret;
+
+ table = odph_cuckoo_table_create(
+ "five_keys", 10, sizeof(struct flow_key), 0);
+ if (table == NULL) {
+ printf("failed to create table\n");
+ return -1;
+ }
+
+ /* put */
+ for (i = 0; i < 5; i++) {
+ ret = odph_cuckoo_table_put_value(table, &keys[i], NULL);
+ print_key_info("Add", &keys[i]);
+ if (ret < 0) {
+ printf("failed to add key %d\n", i);
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+ }
+
+ /* get */
+ for (i = 0; i < 5; i++) {
+ ret = odph_cuckoo_table_get_value(table, &keys[i], NULL, 0);
+ print_key_info("Lkp", &keys[i]);
+ if (ret < 0) {
+ printf("failed to find key %d\n", i);
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+ }
+
+ /* remove */
+ for (i = 0; i < 5; i++) {
+ ret = odph_cuckoo_table_remove_value(table, &keys[i]);
+ print_key_info("Del", &keys[i]);
+ if (ret < 0) {
+ printf("failed to delete key %d\n", i);
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+ }
+
+ /* get */
+ for (i = 0; i < 5; i++) {
+ ret = odph_cuckoo_table_get_value(table, &keys[i], NULL, 0);
+ print_key_info("Lkp", &keys[i]);
+ if (ret >= 0) {
+ printf("found non-existing key %d\n", i);
+ odph_cuckoo_table_destroy(table);
+ return -1;
+ }
+ }
+
+ odph_cuckoo_table_destroy(table);
+ return 0;
+}
+
+#define BUCKET_ENTRIES 4
+#define HASH_ENTRIES_MAX 1048576
+/*
+ * Do tests for cuchoo tabke creation with bad parameters.
+ */
+static int test_creation_with_bad_parameters(void)
+{
+ odph_table_t table;
+
+ table = odph_cuckoo_table_create(
+ "bad_param_0", HASH_ENTRIES_MAX + 1, 4, 0);
+ if (table != NULL) {
+ odph_cuckoo_table_destroy(table);
+ printf("Impossible creating table successfully with entries in parameter exceeded\n");
+ return -1;
+ }
+
+ table = odph_cuckoo_table_create(
+ "bad_param_1", BUCKET_ENTRIES - 1, 4, 0);
+ if (table != NULL) {
+ odph_cuckoo_table_destroy(table);
+ printf("Impossible creating hash successfully if entries less than bucket_entries in parameter\n");
+ return -1;
+ }
+
+ table = odph_cuckoo_table_create("bad_param_2", 10, 0, 0);
+ if (table != NULL) {
+ odph_cuckoo_table_destroy(table);
+ printf("Impossible creating hash successfully if key_len in parameter is zero\n");
+ return -1;
+ }
+
+ printf("# Test successful. No more errors expected\n");
+
+ return 0;
+}
+
+#define PERFORMANCE_CAPACITY 1000000
+
+/*
+ * Test the performance of cuckoo hash table.
+ * table capacity : 1,000,000
+ * key size : 4 bytes
+ * value size : 0
+ * Insert at most number random keys into the table. If one
+ * insertion is failed, the rest insertions will be cancelled.
+ * The table utilization of the report will show actual number
+ * of items inserted.
+ * Then search all inserted items.
+ */
+static int test_performance(int number)
+{
+ odph_table_t table;
+
+ /* generate random keys */
+ uint8_t *key_space = NULL;
+ const void **key_ptr = NULL;
+ unsigned key_len = 4, j;
+ unsigned elem_num = (number > PERFORMANCE_CAPACITY) ?
+ PERFORMANCE_CAPACITY : number;
+ unsigned key_num = key_len * elem_num;
+
+ key_space = (uint8_t *)malloc(key_num);
+ key_ptr = (const void **)malloc(sizeof(void *) * elem_num);
+ if (key_space == NULL)
+ return -ENOENT;
+
+ for (j = 0; j < key_num; j++) {
+ key_space[j] = rand() % 255;
+ if (j % key_len == 0)
+ key_ptr[j / key_len] = &key_space[j];
+ }
+
+ unsigned num;
+ int ret = 0;
+ struct timeval start, end;
+ double add_time = 0;
+
+ fflush(stdout);
+ table = odph_cuckoo_table_create(
+ "performance_test", PERFORMANCE_CAPACITY, key_len, 0);
+ if (table == NULL) {
+ printf("cuckoo table creation failed\n");
+ return -ENOENT;
+ }
+
+ /* insert (put) */
+ gettimeofday(&start, 0);
+ for (j = 0; j < elem_num; j++) {
+ ret = odph_cuckoo_table_put_value(
+ table, &key_space[j * key_len], NULL);
+ if (ret < 0)
+ break;
+ }
+ gettimeofday(&end, 0);
+ num = j;
+ add_time = get_time_diff(&start, &end);
+ printf(
+ "add %u/%u (%.2f) items, time = %.9lfs\n",
+ num, PERFORMANCE_CAPACITY,
+ (double)num / PERFORMANCE_CAPACITY, add_time);
+
+ /* search (get) */
+ gettimeofday(&start, 0);
+ for (j = 0; j < num; j++) {
+ ret = odph_cuckoo_table_get_value(
+ table, &key_space[j * key_len], NULL, 0);
+
+ if (ret < 0)
+ printf("lookup error\n");
+ }
+ gettimeofday(&end, 0);
+ printf(
+ "lookup %u items, time = %.9lfs\n",
+ num, get_time_diff(&start, &end));
+
+ odph_cuckoo_table_destroy(table);
+ free(key_ptr);
+ free(key_space);
+ return ret;
+}
+
+/*
+ * Do all unit and performance tests.
+ */
+static int
+test_cuckoo_hash_table(void)
+{
+ if (test_put_remove() < 0)
+ return -1;
+ if (test_table_lookup() < 0)
+ return -1;
+ if (test_put_update_remove() < 0)
+ return -1;
+ if (test_five_keys() < 0)
+ return -1;
+ if (test_creation_with_bad_parameters() < 0)
+ return -1;
+ if (test_performance(950000) < 0)
+ return -1;
+
+ return 0;
+}
+
+int main(int argc TEST_UNUSED, char *argv[] TEST_UNUSED)
+{
+ odp_instance_t instance;
+ int ret = 0;
+
+ ret = odp_init_global(&instance, NULL, NULL);
+ if (ret != 0) {
+ fprintf(stderr, "Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ if (ret != 0) {
+ fprintf(stderr, "Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ srand(time(0));
+ ret = test_cuckoo_hash_table();
+
+ if (ret < 0)
+ printf("cuckoo hash table test fail!!\n");
+ else
+ printf("All Tests pass!!\n");
+
+ if (odp_term_local()) {
+ fprintf(stderr, "Error: ODP local term failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ fprintf(stderr, "Error: ODP global term failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/helper/test/iplookuptable.c b/helper/test/iplookuptable.c
new file mode 100644
index 000000000..e1d28207b
--- /dev/null
+++ b/helper/test/iplookuptable.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include <odp_api.h>
+#include <test_debug.h>
+#include <../odph_iplookuptable.h>
+#include <odp/helper/ip.h>
+
+static void print_prefix_info(
+ const char *msg, uint32_t ip, uint8_t cidr)
+{
+ int i = 0;
+ uint8_t *ptr = (uint8_t *)(&ip);
+
+ printf("%s IP prefix: ", msg);
+ for (i = 3; i >= 0; i--) {
+ if (i != 3)
+ printf(".");
+ printf("%d", ptr[i]);
+ }
+ printf("/%d\n", cidr);
+}
+
+/*
+ * Basic sequence of operations for a single key:
+ * - put short prefix
+ * - put long prefix
+ * - get (hit long prefix)
+ * - remove long prefix
+ * - get (hit short prefix)
+ */
+static int test_ip_lookup_table(void)
+{
+ odph_iplookup_prefix_t prefix1, prefix2;
+ odph_table_t table;
+ int ret;
+ uint64_t value1 = 1, value2 = 2, result = 0;
+ uint32_t lkp_ip = 0;
+
+ table = odph_iplookup_table_create(
+ "prefix_test", 0, 0, sizeof(uint32_t));
+ if (table == NULL) {
+ printf("IP prefix lookup table creation failed\n");
+ return -1;
+ }
+
+ ret = odph_ipv4_addr_parse(&prefix1.ip, "192.168.0.0");
+ if (ret < 0) {
+ printf("Failed to get IP addr from str\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+ prefix1.cidr = 11;
+
+ ret = odph_ipv4_addr_parse(&prefix2.ip, "192.168.0.0");
+ if (ret < 0) {
+ printf("Failed to get IP addr from str\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+ prefix2.cidr = 24;
+
+ ret = odph_ipv4_addr_parse(&lkp_ip, "192.168.0.1");
+ if (ret < 0) {
+ printf("Failed to get IP addr from str\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ /* test with standard put/get/remove functions */
+ ret = odph_iplookup_table_put_value(table, &prefix1, &value1);
+ print_prefix_info("Add", prefix1.ip, prefix1.cidr);
+ if (ret < 0) {
+ printf("Failed to add ip prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_iplookup_table_get_value(table, &lkp_ip, &result, 0);
+ print_prefix_info("Lkp", lkp_ip, 32);
+ if (ret < 0 || result != 1) {
+ printf("Failed to find longest prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ /* add a longer prefix */
+ ret = odph_iplookup_table_put_value(table, &prefix2, &value2);
+ print_prefix_info("Add", prefix2.ip, prefix2.cidr);
+ if (ret < 0) {
+ printf("Failed to add ip prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_iplookup_table_get_value(table, &lkp_ip, &result, 0);
+ print_prefix_info("Lkp", lkp_ip, 32);
+ if (ret < 0 || result != 2) {
+ printf("Failed to find longest prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_iplookup_table_remove_value(table, &prefix2);
+ print_prefix_info("Del", prefix2.ip, prefix2.cidr);
+ if (ret < 0) {
+ printf("Failed to delete ip prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_iplookup_table_get_value(table, &lkp_ip, &result, 0);
+ print_prefix_info("Lkp", lkp_ip, 32);
+ if (ret < 0 || result != 1) {
+ printf("Error: found result ater deleting\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ ret = odph_iplookup_table_remove_value(table, &prefix1);
+ print_prefix_info("Del", prefix1.ip, prefix1.cidr);
+ if (ret < 0) {
+ printf("Failed to delete prefix\n");
+ odph_iplookup_table_destroy(table);
+ return -1;
+ }
+
+ odph_iplookup_table_destroy(table);
+ return 0;
+}
+
+int main(int argc TEST_UNUSED, char *argv[] TEST_UNUSED)
+{
+ odp_instance_t instance;
+ int ret = 0;
+
+ ret = odp_init_global(&instance, NULL, NULL);
+ if (ret != 0) {
+ fprintf(stderr, "Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = odp_init_local(instance, ODP_THREAD_WORKER);
+ if (ret != 0) {
+ fprintf(stderr, "Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (test_ip_lookup_table() < 0)
+ printf("Test failed\n");
+ else
+ printf("All tests passed\n");
+
+ if (odp_term_local()) {
+ fprintf(stderr, "Error: ODP local term failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ fprintf(stderr, "Error: ODP global term failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/include/odp/api/spec/align.h b/include/odp/api/spec/align.h
index cbe7d674d..fdf8c29e1 100644
--- a/include/odp/api/spec/align.h
+++ b/include/odp/api/spec/align.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_ALIGN_H_
#define ODP_API_ALIGN_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -75,5 +75,5 @@ extern "C" {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/atomic.h b/include/odp/api/spec/atomic.h
index 36c50cb11..408829df2 100644
--- a/include/odp/api/spec/atomic.h
+++ b/include/odp/api/spec/atomic.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -13,7 +12,7 @@
#ifndef ODP_API_ATOMIC_H_
#define ODP_API_ATOMIC_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -629,5 +628,5 @@ int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/barrier.h b/include/odp/api/spec/barrier.h
index fbd107248..6de683c73 100644
--- a/include/odp/api/spec/barrier.h
+++ b/include/odp/api/spec/barrier.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -13,7 +12,7 @@
#ifndef ODP_API_BARRIER_H_
#define ODP_API_BARRIER_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -44,7 +43,6 @@ extern "C" {
*/
void odp_barrier_init(odp_barrier_t *barr, int count);
-
/**
* Synchronize thread execution on barrier.
* Wait for all threads to arrive at the barrier until they are let loose again.
@@ -64,5 +62,5 @@ void odp_barrier_wait(odp_barrier_t *barr);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/buffer.h b/include/odp/api/spec/buffer.h
index 5c632b51f..94829b324 100644
--- a/include/odp/api/spec/buffer.h
+++ b/include/odp/api/spec/buffer.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_BUFFER_H_
#define ODP_API_BUFFER_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -169,5 +169,5 @@ uint64_t odp_buffer_to_u64(odp_buffer_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/byteorder.h b/include/odp/api/spec/byteorder.h
index 101899714..2899adbea 100644
--- a/include/odp/api/spec/byteorder.h
+++ b/include/odp/api/spec/byteorder.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_BYTEORDER_H_
#define ODP_API_BYTEORDER_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -39,6 +39,9 @@ extern "C" {
*
* @def ODP_BYTE_ORDER
* Selected byte order
+ *
+ * @def ODP_BITFIELD_ORDER
+ * Selected bitfield order
*/
/**
@@ -178,5 +181,5 @@ odp_u64le_t odp_cpu_to_le_64(uint64_t cpu64);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index 523a8c415..0e1addd6e 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_CLASSIFY_H_
#define ODP_API_CLASSIFY_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -44,7 +44,7 @@ extern "C" {
/**
* @def ODP_COS_NAME_LEN
- * Maximum ClassOfService name length in chars
+ * Maximum ClassOfService name length in chars including null char
*/
/**
@@ -193,12 +193,14 @@ int odp_cls_capability(odp_cls_capability_t *capability);
/**
* Create a class-of-service
*
- * @param name String intended for debugging purposes.
+ * The use of class-of-service name is optional. Unique names are not required.
*
- * @param param class of service parameters
+ * @param name Name of the class-of-service or NULL. Maximum string
+ * length is ODP_COS_NAME_LEN.
+ * @param param Class-of-service parameters
*
- * @retval class of service handle
- * @retval ODP_COS_INVALID on failure.
+ * @retval Class-of-service handle
+ * @retval ODP_COS_INVALID on failure.
*
* @note ODP_QUEUE_INVALID and ODP_POOL_INVALID are valid values for queue
* and pool associated with a class of service and when any one of these values
@@ -499,5 +501,5 @@ uint64_t odp_pmr_to_u64(odp_pmr_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/compiler.h b/include/odp/api/spec/compiler.h
index d271e909e..c88350e2c 100644
--- a/include/odp/api/spec/compiler.h
+++ b/include/odp/api/spec/compiler.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_COMPILER_H_
#define ODP_API_COMPILER_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -49,5 +49,5 @@ extern "C" {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/cpu.h b/include/odp/api/spec/cpu.h
index 27895119c..0f47e4798 100644
--- a/include/odp/api/spec/cpu.h
+++ b/include/odp/api/spec/cpu.h
@@ -13,7 +13,7 @@
#ifndef ODP_CPU_H_
#define ODP_CPU_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -177,5 +177,5 @@ void odp_cpu_pause(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/cpumask.h b/include/odp/api/spec/cpumask.h
index 6e16fd0c0..22d8e8f24 100644
--- a/include/odp/api/spec/cpumask.h
+++ b/include/odp/api/spec/cpumask.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_CPUMASK_H_
#define ODP_API_CPUMASK_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -250,5 +250,5 @@ int odp_cpumask_all_available(odp_cpumask_t *mask);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index dea1fe938..9855bf989 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_CRYPTO_H_
#define ODP_API_CRYPTO_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -65,14 +65,28 @@ typedef enum {
typedef enum {
/** No cipher algorithm specified */
ODP_CIPHER_ALG_NULL,
+
/** DES */
ODP_CIPHER_ALG_DES,
+
/** Triple DES with cipher block chaining */
ODP_CIPHER_ALG_3DES_CBC,
- /** AES128 with cipher block chaining */
+
+ /** AES with cipher block chaining */
+ ODP_CIPHER_ALG_AES_CBC,
+
+ /** AES in Galois/Counter Mode
+ *
+ * @note Must be paired with cipher ODP_AUTH_ALG_AES_GCM
+ */
+ ODP_CIPHER_ALG_AES_GCM,
+
+ /** @deprecated Use ODP_CIPHER_ALG_AES_CBC instead */
ODP_CIPHER_ALG_AES128_CBC,
- /** AES128 in Galois/Counter Mode */
- ODP_CIPHER_ALG_AES128_GCM,
+
+ /** @deprecated Use ODP_CIPHER_ALG_AES_GCM instead */
+ ODP_CIPHER_ALG_AES128_GCM
+
} odp_cipher_alg_t;
/**
@@ -81,12 +95,33 @@ typedef enum {
typedef enum {
/** No authentication algorithm specified */
ODP_AUTH_ALG_NULL,
- /** HMAC-MD5 with 96 bit key */
+
+ /** HMAC-MD5
+ *
+ * MD5 algorithm in HMAC mode
+ */
+ ODP_AUTH_ALG_MD5_HMAC,
+
+ /** HMAC-SHA-256
+ *
+ * SHA-256 algorithm in HMAC mode
+ */
+ ODP_AUTH_ALG_SHA256_HMAC,
+
+ /** AES in Galois/Counter Mode
+ *
+ * @note Must be paired with cipher ODP_CIPHER_ALG_AES_GCM
+ */
+ ODP_AUTH_ALG_AES_GCM,
+
+ /** @deprecated Use ODP_AUTH_ALG_MD5_HMAC instead */
ODP_AUTH_ALG_MD5_96,
- /** SHA256 with 128 bit key */
+
+ /** @deprecated Use ODP_AUTH_ALG_SHA256_HMAC instead */
ODP_AUTH_ALG_SHA256_128,
- /** AES128 in Galois/Counter Mode */
- ODP_AUTH_ALG_AES128_GCM,
+
+ /** @deprecated Use ODP_AUTH_ALG_AES_GCM instead */
+ ODP_AUTH_ALG_AES128_GCM
} odp_auth_alg_t;
/**
@@ -96,19 +131,25 @@ typedef union odp_crypto_cipher_algos_t {
/** Cipher algorithms */
struct {
/** ODP_CIPHER_ALG_NULL */
- uint32_t null : 1;
+ uint32_t null : 1;
/** ODP_CIPHER_ALG_DES */
- uint32_t des : 1;
+ uint32_t des : 1;
/** ODP_CIPHER_ALG_3DES_CBC */
- uint32_t trides_cbc : 1;
+ uint32_t trides_cbc : 1;
- /** ODP_CIPHER_ALG_AES128_CBC */
- uint32_t aes128_cbc : 1;
+ /** ODP_CIPHER_ALG_AES_CBC */
+ uint32_t aes_cbc : 1;
- /** ODP_CIPHER_ALG_AES128_GCM */
- uint32_t aes128_gcm : 1;
+ /** ODP_CIPHER_ALG_AES_GCM */
+ uint32_t aes_gcm : 1;
+
+ /** @deprecated Use aes_cbc instead */
+ uint32_t aes128_cbc : 1;
+
+ /** @deprecated Use aes_gcm instead */
+ uint32_t aes128_gcm : 1;
} bit;
/** All bits of the bit field structure
@@ -125,16 +166,25 @@ typedef union odp_crypto_auth_algos_t {
/** Authentication algorithms */
struct {
/** ODP_AUTH_ALG_NULL */
- uint32_t null : 1;
+ uint32_t null : 1;
+
+ /** ODP_AUTH_ALG_MD5_HMAC */
+ uint32_t md5_hmac : 1;
+
+ /** ODP_AUTH_ALG_SHA256_HMAC */
+ uint32_t sha256_hmac : 1;
+
+ /** ODP_AUTH_ALG_AES_GCM */
+ uint32_t aes_gcm : 1;
- /** ODP_AUTH_ALG_MD5_96 */
- uint32_t md5_96 : 1;
+ /** @deprecated Use md5_hmac instead */
+ uint32_t md5_96 : 1;
- /** ODP_AUTH_ALG_SHA256_128 */
- uint32_t sha256_128 : 1;
+ /** @deprecated Use sha256_hmac instead */
+ uint32_t sha256_128 : 1;
- /** ODP_AUTH_ALG_AES128_GCM */
- uint32_t aes128_gcm : 1;
+ /** @deprecated Use aes_gcm instead */
+ uint32_t aes128_gcm : 1;
} bit;
/** All bits of the bit field structure
@@ -148,109 +198,164 @@ typedef union odp_crypto_auth_algos_t {
* Crypto API key structure
*/
typedef struct odp_crypto_key {
- uint8_t *data; /**< Key data */
- uint32_t length; /**< Key length in bytes */
+ /** Key data */
+ uint8_t *data;
+
+ /** Key length in bytes */
+ uint32_t length;
+
} odp_crypto_key_t;
/**
* Crypto API IV structure
*/
typedef struct odp_crypto_iv {
- uint8_t *data; /**< IV data */
- uint32_t length; /**< IV length in bytes */
+ /** IV data */
+ uint8_t *data;
+
+ /** IV length in bytes */
+ uint32_t length;
+
} odp_crypto_iv_t;
/**
* Crypto API data range specifier
*/
typedef struct odp_crypto_data_range {
- uint32_t offset; /**< Offset from beginning of buffer (chain) */
- uint32_t length; /**< Length of data to operate on */
+ /** Offset from beginning of packet */
+ uint32_t offset;
+
+ /** Length of data to operate on */
+ uint32_t length;
+
} odp_crypto_data_range_t;
/**
* Crypto API session creation parameters
*/
-typedef struct odp_crypto_session_params {
- odp_crypto_op_t op; /**< Encode versus decode */
- odp_bool_t auth_cipher_text; /**< Authenticate/cipher ordering */
- odp_crypto_op_mode_t pref_mode; /**< Preferred sync vs async */
- odp_cipher_alg_t cipher_alg; /**< Cipher algorithm */
- odp_crypto_key_t cipher_key; /**< Cipher key */
- odp_crypto_iv_t iv; /**< Cipher Initialization Vector (IV) */
- odp_auth_alg_t auth_alg; /**< Authentication algorithm */
- odp_crypto_key_t auth_key; /**< Authentication key */
- odp_queue_t compl_queue; /**< Async mode completion event queue */
- odp_pool_t output_pool; /**< Output buffer pool */
-} odp_crypto_session_params_t;
-
-/**
- * @var odp_crypto_session_params_t::auth_cipher_text
- *
- * Controls ordering of authentication and cipher operations,
- * and is relative to the operation (encode vs decode).
- * When encoding, @c TRUE indicates the authentication operation
- * should be performed @b after the cipher operation else before.
- * When decoding, @c TRUE indicates the reverse order of operation.
- *
- * @var odp_crypto_session_params_t::compl_queue
- *
- * When the API operates asynchronously, the completion queue is
- * used to return the completion status of the operation to the
- * application.
- *
- * @var odp_crypto_session_params_t::output_pool
- *
- * When the output packet is not specified during the call to
- * odp_crypto_operation, the output packet buffer will be allocated
- * from this pool.
- */
+typedef struct odp_crypto_session_param_t {
+ /** Encode vs. decode operation */
+ odp_crypto_op_t op;
+
+ /** Authenticate cipher vs. plain text
+ *
+ * Controls ordering of authentication and cipher operations,
+ * and is relative to the operation (encode vs decode). When encoding,
+ * TRUE indicates the authentication operation should be performed
+ * after the cipher operation else before. When decoding, TRUE
+ * indicates the reverse order of operation.
+ *
+ * true: Authenticate cipher text
+ * false: Authenticate plain text
+ */
+ odp_bool_t auth_cipher_text;
+
+ /** Preferred sync vs. async */
+ odp_crypto_op_mode_t pref_mode;
+
+ /** Cipher algorithm
+ *
+ * Use odp_crypto_capability() for supported algorithms.
+ */
+ odp_cipher_alg_t cipher_alg;
+
+ /** Cipher key
+ *
+ * Use odp_crypto_cipher_capa() for supported key and IV lengths.
+ */
+ odp_crypto_key_t cipher_key;
+
+ /** Cipher Initialization Vector (IV) */
+ odp_crypto_iv_t iv;
+
+ /** Authentication algorithm
+ *
+ * Use odp_crypto_capability() for supported algorithms.
+ */
+ odp_auth_alg_t auth_alg;
+
+ /** Authentication key
+ *
+ * Use odp_crypto_auth_capa() for supported digest and key lengths.
+ */
+ odp_crypto_key_t auth_key;
+
+ /** Async mode completion event queue
+ *
+ * When odp_crypto_operation() is asynchronous, the completion queue is
+ * used to return the completion status of the operation to the
+ * application.
+ */
+ odp_queue_t compl_queue;
+
+ /** Output pool
+ *
+ * When the output packet is not specified during the call to
+ * odp_crypto_operation(), the output packet will be allocated
+ * from this pool.
+ */
+ odp_pool_t output_pool;
+
+} odp_crypto_session_param_t;
+
+/** @deprecated Use odp_crypto_session_param_t instead */
+typedef odp_crypto_session_param_t odp_crypto_session_params_t;
/**
* Crypto API per packet operation parameters
*/
-typedef struct odp_crypto_op_params {
- odp_crypto_session_t session; /**< Session handle from creation */
- void *ctx; /**< User context */
- odp_packet_t pkt; /**< Input packet buffer */
- odp_packet_t out_pkt; /**< Output packet buffer */
- uint8_t *override_iv_ptr; /**< Override session IV pointer */
- uint32_t hash_result_offset; /**< Offset from start of packet buffer for hash result */
- odp_crypto_data_range_t cipher_range; /**< Data range to apply cipher */
- odp_crypto_data_range_t auth_range; /**< Data range to authenticate */
-} odp_crypto_op_params_t;
-
-/**
- * @var odp_crypto_op_params_t::pkt
- * Specifies the input packet buffer for the crypto operation. When the
- * @c out_pkt variable is set to @c ODP_PACKET_INVALID (indicating a new
- * buffer should be allocated for the resulting packet), the \#define TBD
- * indicates whether the implementation will free the input packet buffer
- * or if it becomes the responsibility of the caller.
- *
- * @var odp_crypto_op_params_t::out_pkt
- *
- * The API supports both "in place" (the original packet "pkt" is
- * modified) and "copy" (the packet is replicated to a new buffer
- * which contains the modified data).
- *
- * The "in place" mode of operation is indicated by setting @c out_pkt
- * equal to @c pkt. For the copy mode of operation, setting @c out_pkt
- * to a valid packet buffer value indicates the caller wishes to specify
- * the destination buffer. Setting @c out_pkt to @c ODP_PACKET_INVALID
- * indicates the caller wishes the destination packet buffer be allocated
- * from the output pool specified during session creation.
- *
- * @var odp_crypto_op_params_t::hash_result_offset
- *
- * Specifies the offset where the hash result is to be stored. In case of
- * decode sessions, input hash values will be read from this offset, and
- * overwritten with hash results. If this offset lies within specified
- * auth_range, implementation will mute this field before calculating the hash
- * result.
- *
- * @sa odp_crypto_session_params_t::output_pool.
- */
+typedef struct odp_crypto_op_param_t {
+ /** Session handle from creation */
+ odp_crypto_session_t session;
+
+ /** User context */
+ void *ctx;
+
+ /** Input packet
+ *
+ * Specifies the input packet for the crypto operation. When the
+ * 'out_pkt' variable is set to ODP_PACKET_INVALID (indicating a new
+ * packet should be allocated for the resulting packet).
+ */
+ odp_packet_t pkt;
+
+ /** Output packet
+ *
+ * Both "in place" (the original packet 'pkt' is modified) and
+ * "copy" (the packet is replicated to a new packet which contains
+ * the modified data) modes are supported. The "in place" mode of
+ * operation is indicated by setting 'out_pkt' equal to 'pkt'.
+ * For the copy mode of operation, setting 'out_pkt' to a valid packet
+ * value indicates the caller wishes to specify the destination packet.
+ * Setting 'out_pkt' to ODP_PACKET_INVALID indicates the caller wishes
+ * the destination packet be allocated from the output pool specified
+ * during session creation.
+ */
+ odp_packet_t out_pkt;
+
+ /** Override session IV pointer */
+ uint8_t *override_iv_ptr;
+
+ /** Offset from start of packet for hash result
+ *
+ * Specifies the offset where the hash result is to be stored. In case
+ * of decode sessions, input hash values will be read from this offset,
+ * and overwritten with hash results. If this offset lies within
+ * specified 'auth_range', implementation will mute this field before
+ * calculating the hash result.
+ */
+ uint32_t hash_result_offset;
+
+ /** Data range to apply cipher */
+ odp_crypto_data_range_t cipher_range;
+
+ /** Data range to authenticate */
+ odp_crypto_data_range_t auth_range;
+
+} odp_crypto_op_param_t;
+
+/** @deprecated Use odp_crypto_op_param_t instead */
+typedef odp_crypto_op_param_t odp_crypto_op_params_t;
/**
* Crypto API session creation return code
@@ -290,7 +395,7 @@ typedef enum {
ODP_CRYPTO_HW_ERR_NONE,
/** Error detected during DMA of data */
ODP_CRYPTO_HW_ERR_DMA,
- /** Operation failed due to buffer pool depletion */
+ /** Operation failed due to pool depletion */
ODP_CRYPTO_HW_ERR_BP_DEPLETED,
} odp_crypto_hw_err_t;
@@ -298,19 +403,33 @@ typedef enum {
* Cryto API per packet operation completion status
*/
typedef struct odp_crypto_compl_status {
- odp_crypto_alg_err_t alg_err; /**< Algorithm specific return code */
- odp_crypto_hw_err_t hw_err; /**< Hardware specific return code */
+ /** Algorithm specific return code */
+ odp_crypto_alg_err_t alg_err;
+
+ /** Hardware specific return code */
+ odp_crypto_hw_err_t hw_err;
+
} odp_crypto_compl_status_t;
/**
* Crypto API operation result
*/
typedef struct odp_crypto_op_result {
- odp_bool_t ok; /**< Request completed successfully */
- void *ctx; /**< User context from request */
- odp_packet_t pkt; /**< Output packet */
- odp_crypto_compl_status_t cipher_status; /**< Cipher status */
- odp_crypto_compl_status_t auth_status; /**< Authentication status */
+ /** Request completed successfully */
+ odp_bool_t ok;
+
+ /** User context from request */
+ void *ctx;
+
+ /** Output packet */
+ odp_packet_t pkt;
+
+ /** Cipher status */
+ odp_crypto_compl_status_t cipher_status;
+
+ /** Authentication status */
+ odp_crypto_compl_status_t auth_status;
+
} odp_crypto_op_result_t;
/**
@@ -335,6 +454,43 @@ typedef struct odp_crypto_capability_t {
} odp_crypto_capability_t;
/**
+ * Cipher algorithm capabilities
+ */
+typedef struct odp_crypto_cipher_capability_t {
+ /** Key length in bytes */
+ uint32_t key_len;
+
+ /** IV length in bytes */
+ uint32_t iv_len;
+
+} odp_crypto_cipher_capability_t;
+
+/**
+ * Authentication algorithm capabilities
+ */
+typedef struct odp_crypto_auth_capability_t {
+ /** Digest length in bytes */
+ uint32_t digest_len;
+
+ /** Key length in bytes */
+ uint32_t key_len;
+
+ /** Additional Authenticated Data (AAD) lengths */
+ struct {
+ /** Minimum AAD length in bytes */
+ uint32_t min;
+
+ /** Maximum AAD length in bytes */
+ uint32_t max;
+
+ /** Increment of supported lengths between min and max
+ * (in bytes) */
+ uint32_t inc;
+ } aad_len;
+
+} odp_crypto_auth_capability_t;
+
+/**
* Query crypto capabilities
*
* Outputs crypto capabilities on success.
@@ -347,16 +503,59 @@ typedef struct odp_crypto_capability_t {
int odp_crypto_capability(odp_crypto_capability_t *capa);
/**
- * Crypto session creation (synchronous)
+ * Query supported cipher algorithm capabilities
+ *
+ * Outputs all supported configuration options for the algorithm. Output is
+ * sorted (from the smallest to the largest) first by key length, then by IV
+ * length.
*
- * @param params Session parameters
+ * @param cipher Cipher algorithm
+ * @param[out] capa Array of capability structures for output
+ * @param num Maximum number of capability structures to output
+ *
+ * @return Number of capability structures for the algorithm. If this is larger
+ * than 'num', only 'num' first structures were output and application
+ * may call the function again with a larger value of 'num'.
+ * @retval <0 on failure
+ */
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t capa[],
+ int num);
+
+/**
+ * Query supported authentication algorithm capabilities
+ *
+ * Outputs all supported configuration options for the algorithm. Output is
+ * sorted (from the smallest to the largest) first by digest length, then by key
+ * length.
+ *
+ * @param auth Authentication algorithm
+ * @param[out] capa Array of capability structures for output
+ * @param num Maximum number of capability structures to output
+ *
+ * @return Number of capability structures for the algorithm. If this is larger
+ * than 'num', only 'num' first structures were output and application
+ * may call the function again with a larger value of 'num'.
+ * @retval <0 on failure
+ */
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t capa[], int num);
+
+/**
+ * Crypto session creation
+ *
+ * Create a crypto session according to the session parameters. Use
+ * odp_crypto_session_param_init() to initialize parameters into their
+ * default values.
+ *
+ * @param param Session parameters
* @param session Created session else ODP_CRYPTO_SESSION_INVALID
* @param status Failure code if unsuccessful
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_crypto_session_create(odp_crypto_session_params_t *params,
+int odp_crypto_session_create(odp_crypto_session_param_t *param,
odp_crypto_session_t *session,
odp_crypto_ses_create_err_t *status);
@@ -410,14 +609,14 @@ void odp_crypto_compl_free(odp_crypto_compl_t completion_event);
* If "posted" returns TRUE the result will be delivered via the completion
* queue specified when the session was created.
*
- * @param params Operation parameters
+ * @param param Operation parameters
* @param posted Pointer to return posted, TRUE for async operation
* @param result Results of operation (when posted returns FALSE)
*
* @retval 0 on success
* @retval <0 on failure
*/
-int odp_crypto_operation(odp_crypto_op_params_t *params,
+int odp_crypto_operation(odp_crypto_op_param_t *param,
odp_bool_t *posted,
odp_crypto_op_result_t *result);
@@ -457,6 +656,16 @@ uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl);
uint64_t odp_crypto_compl_to_u64(odp_crypto_compl_t hdl);
/**
+ * Initialize crypto session parameters
+ *
+ * Initialize an odp_crypto_session_param_t to its default values for
+ * all fields.
+ *
+ * @param param Pointer to odp_crypto_session_param_t to be initialized
+ */
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param);
+
+/**
* @}
*/
@@ -464,5 +673,5 @@ uint64_t odp_crypto_compl_to_u64(odp_crypto_compl_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/debug.h b/include/odp/api/spec/debug.h
index a49dff398..b3b170f3e 100644
--- a/include/odp/api/spec/debug.h
+++ b/include/odp/api/spec/debug.h
@@ -11,7 +11,7 @@
#ifndef ODP_API_DEBUG_H_
#define ODP_API_DEBUG_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -32,5 +32,5 @@ extern "C" {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/errno.h b/include/odp/api/spec/errno.h
index a1e76429b..9b60a98ba 100644
--- a/include/odp/api/spec/errno.h
+++ b/include/odp/api/spec/errno.h
@@ -12,7 +12,7 @@
#ifndef ODP_ERRNO_H_
#define ODP_ERRNO_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -83,5 +83,5 @@ const char *odp_errno_str(int errnum);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/event.h b/include/odp/api/spec/event.h
index 082768f8d..fdfa52d1c 100644
--- a/include/odp/api/spec/event.h
+++ b/include/odp/api/spec/event.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_EVENT_H_
#define ODP_API_EVENT_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -83,5 +83,5 @@ void odp_event_free(odp_event_t event);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/hash.h b/include/odp/api/spec/hash.h
index 07a01569d..66b740e2c 100644
--- a/include/odp/api/spec/hash.h
+++ b/include/odp/api/spec/hash.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_HASH_H_
#define ODP_API_HASH_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -96,5 +96,5 @@ int odp_hash_crc_gen64(const void *data, uint32_t data_len,
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/hints.h b/include/odp/api/spec/hints.h
index ff5099c2f..82400f073 100644
--- a/include/odp/api/spec/hints.h
+++ b/include/odp/api/spec/hints.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_HINTS_H_
#define ODP_API_HINTS_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -114,5 +114,5 @@ extern "C" {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/init.h b/include/odp/api/spec/init.h
index fec677421..154cdf8f3 100644
--- a/include/odp/api/spec/init.h
+++ b/include/odp/api/spec/init.h
@@ -21,7 +21,7 @@
#ifndef ODP_API_INIT_H_
#define ODP_API_INIT_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -277,5 +277,5 @@ int odp_term_local(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h
index 522adb2d3..4a86ebad0 100644
--- a/include/odp/api/spec/packet.h
+++ b/include/odp/api/spec/packet.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_PACKET_H_
#define ODP_API_PACKET_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -82,13 +82,14 @@ extern "C" {
* Allocate a packet from a packet pool
*
* Allocates a packet of the requested length from the specified packet pool.
- * Pool must have been created with ODP_POOL_PACKET type. The
+ * The pool must have been created with ODP_POOL_PACKET type. The
* packet is initialized with data pointers and lengths set according to the
* specified len, and the default headroom and tailroom length settings. All
- * other packet metadata are set to their default values.
+ * other packet metadata are set to their default values. Packet length must
+ * be greater than zero and not exceed packet pool parameter 'max_len' value.
*
* @param pool Pool handle
- * @param len Packet data length
+ * @param len Packet data length (1 ... pool max_len)
*
* @return Handle of allocated packet
* @retval ODP_PACKET_INVALID Packet could not be allocated
@@ -105,7 +106,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool, uint32_t len);
* packets from a pool.
*
* @param pool Pool handle
- * @param len Packet data length
+ * @param len Packet data length (1 ... pool max_len)
* @param[out] pkt Array of packet handles for output
* @param num Maximum number of packets to allocate
*
@@ -780,7 +781,8 @@ uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg);
* Concatenate all packet data from 'src' packet into tail of 'dst' packet.
* Operation preserves 'dst' packet metadata in the resulting packet,
* while 'src' packet handle, metadata and old segment handles for both packets
- * become invalid.
+ * become invalid. Source and destination packet handles must not refer to
+ * the same packet.
*
* A successful operation overwrites 'dst' packet handle with a new handle,
* which application must use as the reference to the resulting packet
@@ -927,6 +929,9 @@ int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
* Copy 'len' bytes of data from 'src' packet to 'dst' packet. Copy starts from
* the specified source and destination packet offsets. Copied areas
* (offset ... offset + len) must not exceed their packet data lengths.
+ * Source and destination packet handles must not refer to the same packet (use
+ * odp_packet_copy_data() or odp_packet_move_data() for a single packet).
+ *
* Packet is not modified on an error.
*
* @param dst Destination packet handle
@@ -1402,5 +1407,5 @@ uint64_t odp_packet_seg_to_u64(odp_packet_seg_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/packet_flags.h b/include/odp/api/spec/packet_flags.h
index c2998c1f5..377b75ba0 100644
--- a/include/odp/api/spec/packet_flags.h
+++ b/include/odp/api/spec/packet_flags.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_PACKET_FLAGS_H_
#define ODP_API_PACKET_FLAGS_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -494,5 +494,5 @@ void odp_packet_has_ts_clr(odp_packet_t pkt);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/packet_io.h b/include/odp/api/spec/packet_io.h
index c7373fdca..85cd6d184 100644
--- a/include/odp/api/spec/packet_io.h
+++ b/include/odp/api/spec/packet_io.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_PACKET_IO_H_
#define ODP_API_PACKET_IO_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -189,12 +189,11 @@ typedef struct odp_pktin_queue_param_t {
/** Number of input queues to be created
*
- * When classifier is enabled the number of queues may be zero
- * (in odp_pktin_queue_config() step), otherwise at least one
- * queue is required. More than one input queues require either flow
- * hashing or classifier enabled. The maximum value is defined by
- * pktio capability 'max_input_queues'. Queue type is defined by the
- * input mode. The default value is 1. */
+ * When classifier is enabled in odp_pktin_queue_config() this
+ * value is ignored, otherwise at least one queue is required.
+ * More than one input queues require flow hashing configured.
+ * The maximum value is defined by pktio capability 'max_input_queues'.
+ * Queue type is defined by the input mode. The default value is 1. */
unsigned num_queues;
/** Queue parameters
@@ -202,7 +201,9 @@ typedef struct odp_pktin_queue_param_t {
* These are used for input queue creation in ODP_PKTIN_MODE_QUEUE
* or ODP_PKTIN_MODE_SCHED modes. Scheduler parameters are considered
* only in ODP_PKTIN_MODE_SCHED mode. Default values are defined in
- * odp_queue_param_t documentation. */
+ * odp_queue_param_t documentation.
+ * When classifier is enabled in odp_pktin_queue_config() this
+ * value is ignored. */
odp_queue_param_t queue_param;
} odp_pktin_queue_param_t;
@@ -887,6 +888,8 @@ int odp_pktio_mac_addr(odp_pktio_t pktio, void *mac_addr, int size);
*
* @retval 0 on success
* @retval <0 on failure
+ *
+ * @note The default_cos has to be unique per odp_pktio_t instance.
*/
int odp_pktio_default_cos_set(odp_pktio_t pktio, odp_cos_t default_cos);
@@ -1074,5 +1077,5 @@ odp_time_t odp_pktin_ts_from_ns(odp_pktio_t pktio, uint64_t ns);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/packet_io_stats.h b/include/odp/api/spec/packet_io_stats.h
index 73cf704e3..299ecd0e1 100644
--- a/include/odp/api/spec/packet_io_stats.h
+++ b/include/odp/api/spec/packet_io_stats.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_PACKET_IO_STATS_H_
#define ODP_API_PACKET_IO_STATS_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -139,5 +139,5 @@ int odp_pktio_stats_reset(odp_pktio_t pktio);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h
index b31b6aa8d..c0de195a7 100644
--- a/include/odp/api/spec/pool.h
+++ b/include/odp/api/spec/pool.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_POOL_H_
#define ODP_API_POOL_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -36,8 +36,10 @@ extern "C" {
* Invalid pool
*/
-/** Maximum queue name length in chars */
-#define ODP_POOL_NAME_LEN 32
+/**
+ * @def ODP_POOL_NAME_LEN
+ * Maximum pool name length in chars including null char
+ */
/**
* Pool capabilities
@@ -192,6 +194,12 @@ typedef struct odp_pool_param_t {
pkt.max_len. Use 0 for default. */
uint32_t len;
+ /** Maximum packet length that will be allocated from
+ the pool. The maximum value is defined by pool
+ capability pkt.max_len. Use 0 for default (the
+ pool maximum). */
+ uint32_t max_len;
+
/** Minimum number of packet data bytes that are stored
in the first segment of a packet. The maximum value
is defined by pool capability pkt.max_seg_len.
@@ -220,14 +228,12 @@ typedef struct odp_pool_param_t {
/**
* Create a pool
*
- * This routine is used to create a pool. It take two arguments: the optional
- * name of the pool to be created and a parameter struct that describes the
- * pool to be created. If a name is not specified the result is an anonymous
- * pool that cannot be referenced by odp_pool_lookup().
- *
- * @param name Name of the pool, max ODP_POOL_NAME_LEN-1 chars.
- * May be specified as NULL for anonymous pools.
+ * This routine is used to create a pool. The use of pool name is optional.
+ * Unique names are not required. However, odp_pool_lookup() returns only a
+ * single matching pool.
*
+ * @param name Name of the pool or NULL. Maximum string length is
+ * ODP_POOL_NAME_LEN.
* @param params Pool parameters.
*
* @return Handle of the created pool
@@ -256,11 +262,8 @@ int odp_pool_destroy(odp_pool_t pool);
*
* @param name Name of the pool
*
- * @return Handle of found pool
+ * @return Handle of the first matching pool
* @retval ODP_POOL_INVALID Pool could not be found
- *
- * @note This routine cannot be used to look up an anonymous pool (one created
- * with no name).
*/
odp_pool_t odp_pool_lookup(const char *name);
@@ -327,5 +330,5 @@ void odp_pool_param_init(odp_pool_param_t *param);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/queue.h b/include/odp/api/spec/queue.h
index 92822da77..7972feacb 100644
--- a/include/odp/api/spec/queue.h
+++ b/include/odp/api/spec/queue.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_QUEUE_H_
#define ODP_API_QUEUE_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -44,7 +44,7 @@ extern "C" {
/**
* @def ODP_QUEUE_NAME_LEN
- * Maximum queue name length in chars
+ * Maximum queue name length in chars including null char
*/
/**
@@ -173,9 +173,12 @@ typedef struct odp_queue_param_t {
* Create a queue according to the queue parameters. Queue type is specified by
* queue parameter 'type'. Use odp_queue_param_init() to initialize parameters
* into their default values. Default values are also used when 'param' pointer
- * is NULL. The default queue type is ODP_QUEUE_TYPE_PLAIN.
+ * is NULL. The default queue type is ODP_QUEUE_TYPE_PLAIN. The use of queue
+ * name is optional. Unique names are not required. However, odp_queue_lookup()
+ * returns only a single matching queue.
*
- * @param name Queue name
+ * @param name Name of the queue or NULL. Maximum string length is
+ * ODP_QUEUE_NAME_LEN.
* @param param Queue parameters. Uses defaults if NULL.
*
* @return Queue handle
@@ -203,7 +206,7 @@ int odp_queue_destroy(odp_queue_t queue);
*
* @param name Queue name
*
- * @return Queue handle
+ * @return Handle of the first matching queue
* @retval ODP_QUEUE_INVALID on failure
*/
odp_queue_t odp_queue_lookup(const char *name);
@@ -413,5 +416,5 @@ int odp_queue_info(odp_queue_t queue, odp_queue_info_t *info);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/random.h b/include/odp/api/spec/random.h
index db776309a..4765475c2 100644
--- a/include/odp/api/spec/random.h
+++ b/include/odp/api/spec/random.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_RANDOM_H_
#define ODP_API_RANDOM_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -24,18 +24,82 @@ extern "C" {
*/
/**
+ * Random kind selector
+ *
+ * The kind of random denotes the statistical quality of the random data
+ * returned. Basic random simply appears uniformly distributed, Cryptographic
+ * random is statistically random and suitable for use by cryptographic
+ * functions. True random is generated from a hardware entropy source rather
+ * than an algorithm and is thus completely unpredictable. These form a
+ * hierarchy where higher quality data is presumably more costly to generate
+ * than lower quality data.
+ */
+typedef enum {
+ /** Basic random, presumably pseudo-random generated by SW. This
+ * is the lowest kind of random */
+ ODP_RANDOM_BASIC,
+ /** Cryptographic quality random */
+ ODP_RANDOM_CRYPTO,
+ /** True random, generated from a HW entropy source. This is the
+ * highest kind of random */
+ ODP_RANDOM_TRUE,
+} odp_random_kind_t;
+
+/**
+ * Query random max kind
+ *
+ * Implementations support the returned max kind and all kinds weaker than it.
+ *
+ * @return kind The maximum odp_random_kind_t supported by this implementation
+ */
+odp_random_kind_t odp_random_max_kind(void);
+
+/**
* Generate random byte data
*
+ * The intent in supporting different kinds of random data is to allow
+ * tradeoffs between performance and the quality of random data needed. The
+ * assumption is that basic random is cheap while true random is relatively
+ * expensive in terms of time to generate, with cryptographic random being
+ * something in between. Implementations that support highly efficient true
+ * random are free to use this for all requested kinds. So it is always
+ * permissible to "upgrade" a random data request, but never to "downgrade"
+ * such requests.
+ *
* @param[out] buf Output buffer
- * @param size Size of output buffer
- * @param use_entropy Use entropy
+ * @param len Length of output buffer in bytes
+ * @param kind Specifies the type of random data required. Request
+ * is expected to fail if the implementation is unable to
+ * provide the requested type.
+ *
+ * @return Number of bytes written
+ * @retval <0 on failure
+ */
+int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind);
+
+/**
+ * Generate repeatable random data for testing purposes
+ *
+ * For testing purposes it is often useful to generate "random" sequences that
+ * are repeatable. This is accomplished by supplying a seed value that is used
+ * for pseudo-random data generation. The caller-provided seed value is
+ * updated for each call to continue the sequence. Restarting a series of
+ * calls with the same initial seed value will generate the same sequence of
+ * random test data.
+ *
+ * This function returns data of ODP_RANDOM_BASIC quality and should be used
+ * only for testing purposes. Use odp_random_data() for production.
*
- * @todo Define the implication of the use_entropy parameter
+ * @param[out] buf Output buffer
+ * @param len Length of output buffer in bytes
+ * @param[in,out] seed Seed value to use. This must be a thread-local
+ * variable. Results are undefined if multiple threads
+ * call this routine with the same seed variable.
*
* @return Number of bytes written
* @retval <0 on failure
*/
-int32_t odp_random_data(uint8_t *buf, int32_t size, odp_bool_t use_entropy);
+int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed);
/**
* @}
@@ -45,5 +109,5 @@ int32_t odp_random_data(uint8_t *buf, int32_t size, odp_bool_t use_entropy);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/rwlock.h b/include/odp/api/spec/rwlock.h
index 2624b5623..ff8a3f278 100644
--- a/include/odp/api/spec/rwlock.h
+++ b/include/odp/api/spec/rwlock.h
@@ -6,7 +6,7 @@
#ifndef ODP_API_RWLOCK_H_
#define ODP_API_RWLOCK_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
/**
* @file
@@ -100,5 +100,5 @@ void odp_rwlock_write_unlock(odp_rwlock_t *rwlock);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif /* ODP_RWLOCK_H_ */
diff --git a/include/odp/api/spec/rwlock_recursive.h b/include/odp/api/spec/rwlock_recursive.h
index 9d50f2020..1c19c7217 100644
--- a/include/odp/api/spec/rwlock_recursive.h
+++ b/include/odp/api/spec/rwlock_recursive.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_RWLOCK_RECURSIVE_H_
#define ODP_API_RWLOCK_RECURSIVE_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -118,5 +118,5 @@ void odp_rwlock_recursive_write_unlock(odp_rwlock_recursive_t *lock);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index f8fed176a..8244746d7 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_SCHEDULE_H_
#define ODP_API_SCHEDULE_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -42,7 +42,7 @@ extern "C" {
/**
* @def ODP_SCHED_GROUP_NAME_LEN
- * Maximum schedule group name length in chars
+ * Maximum schedule group name length in chars including null char
*/
/**
@@ -214,10 +214,12 @@ int odp_schedule_num_prio(void);
* mask will receive events from a queue that belongs to the schedule group.
* Thread masks of various schedule groups may overlap. There are predefined
* groups such as ODP_SCHED_GROUP_ALL and ODP_SCHED_GROUP_WORKER, which are
- * always present and automatically updated. Group name is optional
- * (may be NULL) and can have ODP_SCHED_GROUP_NAME_LEN characters in maximum.
+ * always present and automatically updated. The use of group name is optional.
+ * Unique names are not required. However, odp_schedule_group_lookup() returns
+ * only a single matching group.
*
- * @param name Schedule group name
+ * @param name Name of the schedule group or NULL. Maximum string length is
+ * ODP_SCHED_GROUP_NAME_LEN.
* @param mask Thread mask
*
* @return Schedule group handle
@@ -245,11 +247,9 @@ int odp_schedule_group_destroy(odp_schedule_group_t group);
/**
* Look up a schedule group by name
*
- * Return the handle of a schedule group from its name
- *
* @param name Name of schedule group
*
- * @return Handle of schedule group for specified name
+ * @return Handle of the first matching schedule group
* @retval ODP_SCHEDULE_GROUP_INVALID No matching schedule group found
*/
odp_schedule_group_t odp_schedule_group_lookup(const char *name);
@@ -375,5 +375,5 @@ void odp_schedule_order_unlock(unsigned lock_index);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/schedule_types.h b/include/odp/api/spec/schedule_types.h
index b7c198071..8a4e42c64 100644
--- a/include/odp/api/spec/schedule_types.h
+++ b/include/odp/api/spec/schedule_types.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_SCHEDULE_TYPES_H_
#define ODP_API_SCHEDULE_TYPES_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -157,5 +157,5 @@ typedef struct odp_schedule_param_t {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/shared_memory.h b/include/odp/api/spec/shared_memory.h
index fbe0fdee7..1a9c1299e 100644
--- a/include/odp/api/spec/shared_memory.h
+++ b/include/odp/api/spec/shared_memory.h
@@ -13,7 +13,8 @@
#ifndef ODP_API_SHARED_MEMORY_H_
#define ODP_API_SHARED_MEMORY_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
+#include <odp/api/init.h>
#ifdef __cplusplus
extern "C" {
@@ -39,16 +40,31 @@ extern "C" {
* Synonym for buffer pool use
*/
-/** Maximum shared memory block name length in chars */
-#define ODP_SHM_NAME_LEN 32
+/**
+ * @def ODP_SHM_NAME_LEN
+ * Maximum shared memory block name length in chars including null char
+ */
/*
- * Shared memory flags
+ * Shared memory flags:
*/
-
-/* Share level */
-#define ODP_SHM_SW_ONLY 0x1 /**< Application SW only, no HW access */
-#define ODP_SHM_PROC 0x2 /**< Share with external processes */
+#define ODP_SHM_SW_ONLY 0x1 /**< Application SW only, no HW access */
+#define ODP_SHM_PROC 0x2 /**< Share with external processes */
+/**
+ * Single virtual address
+ *
+ * When set, this flag guarantees that all ODP threads sharing this
+ * memory block will see the block at the same address - regardless
+ * of ODP thread type (e.g. pthread vs. process (or fork process time)).
+ */
+#define ODP_SHM_SINGLE_VA 0x4
+/**
+ * Export memory
+ *
+ * When set, the memory block becomes visible to other ODP instances
+ * through odp_shm_import().
+ */
+#define ODP_SHM_EXPORT 0x08
/**
* Shared memory block info
@@ -135,6 +151,28 @@ int odp_shm_free(odp_shm_t shm);
*/
odp_shm_t odp_shm_lookup(const char *name);
+/**
+ * Import a block of shared memory, exported by another ODP instance
+ *
+ * This call creates a new handle for accessing a shared memory block created
+ * (with ODP_SHM_EXPORT flag) by another ODP instance. An instance may have
+ * only a single handle to the same block. Application must not access the
+ * block after freeing the handle. When an imported handle is freed, only
+ * the calling instance is affected. The exported block may be freed only
+ * after all other instances have stopped accessing the block.
+ *
+ * @param remote_name Name of the block, in the remote ODP instance
+ * @param odp_inst Remote ODP instance, as returned by odp_init_global()
+ * @param local_name Name given to the block, in the local ODP instance
+ * May be NULL, if the application doesn't need a name
+ * (for a lookup).
+ *
+ * @return A handle to access a block exported by another ODP instance.
+ * @retval ODP_SHM_INVALID on failure
+ */
+odp_shm_t odp_shm_import(const char *remote_name,
+ odp_instance_t odp_inst,
+ const char *local_name);
/**
* Shared memory block address
@@ -187,5 +225,5 @@ uint64_t odp_shm_to_u64(odp_shm_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/spinlock.h b/include/odp/api/spec/spinlock.h
index 8263171ca..11b7339b1 100644
--- a/include/odp/api/spec/spinlock.h
+++ b/include/odp/api/spec/spinlock.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -13,7 +12,7 @@
#ifndef ODP_API_SPINLOCK_H_
#define ODP_API_SPINLOCK_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -41,7 +40,6 @@ extern "C" {
*/
void odp_spinlock_init(odp_spinlock_t *splock);
-
/**
* Acquire spin lock.
*
@@ -49,7 +47,6 @@ void odp_spinlock_init(odp_spinlock_t *splock);
*/
void odp_spinlock_lock(odp_spinlock_t *splock);
-
/**
* Try to acquire spin lock.
*
@@ -60,7 +57,6 @@ void odp_spinlock_lock(odp_spinlock_t *splock);
*/
int odp_spinlock_trylock(odp_spinlock_t *splock);
-
/**
* Release spin lock.
*
@@ -68,7 +64,6 @@ int odp_spinlock_trylock(odp_spinlock_t *splock);
*/
void odp_spinlock_unlock(odp_spinlock_t *splock);
-
/**
* Check if spin lock is busy (locked).
*
@@ -79,8 +74,6 @@ void odp_spinlock_unlock(odp_spinlock_t *splock);
*/
int odp_spinlock_is_locked(odp_spinlock_t *splock);
-
-
/**
* @}
*/
@@ -89,5 +82,5 @@ int odp_spinlock_is_locked(odp_spinlock_t *splock);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/spinlock_recursive.h b/include/odp/api/spec/spinlock_recursive.h
index 07829fd15..c9c7ddb02 100644
--- a/include/odp/api/spec/spinlock_recursive.h
+++ b/include/odp/api/spec/spinlock_recursive.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_SPINLOCK_RECURSIVE_H_
#define ODP_API_SPINLOCK_RECURSIVE_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -83,5 +83,5 @@ int odp_spinlock_recursive_is_locked(odp_spinlock_recursive_t *lock);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/std_clib.h b/include/odp/api/spec/std_clib.h
index 772732c2f..33e9db536 100644
--- a/include/odp/api/spec/std_clib.h
+++ b/include/odp/api/spec/std_clib.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_STD_CLIB_H_
#define ODP_API_STD_CLIB_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -80,5 +80,5 @@ int odp_memcmp(const void *ptr1, const void *ptr2, size_t num);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/std_types.h b/include/odp/api/spec/std_types.h
index 47018d50c..ec6a6df6d 100644
--- a/include/odp/api/spec/std_types.h
+++ b/include/odp/api/spec/std_types.h
@@ -14,7 +14,7 @@
#ifndef ODP_API_STD_TYPES_H_
#define ODP_API_STD_TYPES_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -39,5 +39,5 @@ extern "C" {
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/sync.h b/include/odp/api/spec/sync.h
index 84b7cb912..6f87db559 100644
--- a/include/odp/api/spec/sync.h
+++ b/include/odp/api/spec/sync.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -13,7 +12,7 @@
#ifndef ODP_API_SYNC_H_
#define ODP_API_SYNC_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -88,5 +87,5 @@ void odp_mb_full(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/system_info.h b/include/odp/api/spec/system_info.h
index c5a5fd0ef..0bb4f1f12 100644
--- a/include/odp/api/spec/system_info.h
+++ b/include/odp/api/spec/system_info.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_SYSTEM_INFO_H_
#define ODP_API_SYSTEM_INFO_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -52,5 +52,5 @@ int odp_sys_cache_line_size(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/thread.h b/include/odp/api/spec/thread.h
index 6e2a817a1..689ba59b5 100644
--- a/include/odp/api/spec/thread.h
+++ b/include/odp/api/spec/thread.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_THREAD_H_
#define ODP_API_THREAD_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -110,5 +110,5 @@ odp_thread_type_t odp_thread_type(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/thrmask.h b/include/odp/api/spec/thrmask.h
index 73f386608..3986769ac 100644
--- a/include/odp/api/spec/thrmask.h
+++ b/include/odp/api/spec/thrmask.h
@@ -12,7 +12,7 @@
#ifndef ODP_API_THRMASK_H_
#define ODP_API_THRMASK_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -237,5 +237,5 @@ int odp_thrmask_control(odp_thrmask_t *mask);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/ticketlock.h b/include/odp/api/spec/ticketlock.h
index d485565f7..b23253b55 100644
--- a/include/odp/api/spec/ticketlock.h
+++ b/include/odp/api/spec/ticketlock.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_TICKETLOCK_H_
#define ODP_API_TICKETLOCK_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -88,5 +88,5 @@ int odp_ticketlock_is_locked(odp_ticketlock_t *tklock);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/time.h b/include/odp/api/spec/time.h
index a78fc2ce7..fcc94c98e 100644
--- a/include/odp/api/spec/time.h
+++ b/include/odp/api/spec/time.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_TIME_H_
#define ODP_API_TIME_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -178,5 +178,5 @@ uint64_t odp_time_to_u64(odp_time_t time);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index 3f8fdd4cc..75f9db98e 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -13,7 +13,7 @@
#ifndef ODP_API_TIMER_H_
#define ODP_API_TIMER_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -90,8 +90,10 @@ typedef enum {
ODP_TIMER_NOEVENT = -3
} odp_timer_set_t;
-/** Maximum timer pool name length in chars (including null char) */
-#define ODP_TIMER_POOL_NAME_LEN 32
+/**
+ * @def ODP_TIMER_POOL_NAME_LEN
+ * Maximum timer pool name length in chars including null char
+ */
/** Timer pool parameters
* Timer pool parameters are used when creating and querying timer pools.
@@ -108,7 +110,10 @@ typedef struct {
/**
* Create a timer pool
*
- * @param name Name of the timer pool. The string will be copied.
+ * The use of pool name is optional. Unique names are not required.
+ *
+ * @param name Name of the timer pool or NULL. Maximum string length is
+ * ODP_TIMER_POOL_NAME_LEN.
* @param params Timer pool parameters. The content will be copied.
*
* @return Timer pool handle on success
@@ -413,5 +418,5 @@ uint64_t odp_timeout_to_u64(odp_timeout_t hdl);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/traffic_mngr.h b/include/odp/api/spec/traffic_mngr.h
index 347364845..71198bbdd 100644
--- a/include/odp/api/spec/traffic_mngr.h
+++ b/include/odp/api/spec/traffic_mngr.h
@@ -6,7 +6,7 @@
#ifndef ODP_TRAFFIC_MNGR_H_
#define ODP_TRAFFIC_MNGR_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -1961,5 +1961,5 @@ void odp_tm_stats_print(odp_tm_t odp_tm);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/include/odp/api/spec/version.h.in b/include/odp/api/spec/version.h.in
index 4b16dcc5c..f5e9e9c8b 100644
--- a/include/odp/api/spec/version.h.in
+++ b/include/odp/api/spec/version.h.in
@@ -13,7 +13,7 @@
#ifndef ODP_API_VERSION_H_
#define ODP_API_VERSION_H_
-#include <odp/api/visibility_begin.h>
+#include <odp/visibility_begin.h>
#ifdef __cplusplus
extern "C" {
@@ -103,5 +103,5 @@ const char *odp_version_impl_str(void);
}
#endif
-#include <odp/api/visibility_end.h>
+#include <odp/visibility_end.h>
#endif
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 22cf6f39d..0bc98427c 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -13,6 +13,11 @@ include_HEADERS = \
$(top_srcdir)/include/odp.h \
$(top_srcdir)/include/odp_api.h
+odpincludedir= $(includedir)/odp
+odpinclude_HEADERS = \
+ $(srcdir)/include/odp/visibility_begin.h \
+ $(srcdir)/include/odp/visibility_end.h
+
odpapiincludedir= $(includedir)/odp/api
odpapiinclude_HEADERS = \
$(srcdir)/include/odp/api/align.h \
@@ -56,8 +61,6 @@ odpapiinclude_HEADERS = \
$(srcdir)/include/odp/api/timer.h \
$(srcdir)/include/odp/api/traffic_mngr.h \
$(srcdir)/include/odp/api/version.h \
- $(srcdir)/include/odp/api/visibility_begin.h \
- $(srcdir)/include/odp/api/visibility_end.h \
$(srcdir)/arch/@ARCH_DIR@/odp/api/cpu_arch.h
odpapiplatincludedir= $(includedir)/odp/api/plat
@@ -96,7 +99,14 @@ odpapiplatinclude_HEADERS = \
$(srcdir)/include/odp/api/plat/traffic_mngr_types.h \
$(srcdir)/include/odp/api/plat/version_types.h
+odpdrvincludedir = $(includedir)/odp/drv
+odpdrvinclude_HEADERS = \
+ $(srcdir)/include/odp/drv/compiler.h
+
noinst_HEADERS = \
+ ${srcdir}/include/_fdserver_internal.h \
+ ${srcdir}/include/_ishm_internal.h \
+ ${srcdir}/include/_ishmphy_internal.h \
${srcdir}/include/odp_align_internal.h \
${srcdir}/include/odp_atomic_internal.h \
${srcdir}/include/odp_buffer_inlines.h \
@@ -123,9 +133,8 @@ noinst_HEADERS = \
${srcdir}/include/odp_pool_internal.h \
${srcdir}/include/odp_posix_extensions.h \
${srcdir}/include/odp_queue_internal.h \
+ ${srcdir}/include/odp_ring_internal.h \
${srcdir}/include/odp_schedule_if.h \
- ${srcdir}/include/odp_schedule_internal.h \
- ${srcdir}/include/odp_schedule_ordered_internal.h \
${srcdir}/include/odp_sorted_list_internal.h \
${srcdir}/include/odp_shm_internal.h \
${srcdir}/include/odp_timer_internal.h \
@@ -139,6 +148,9 @@ noinst_HEADERS = \
${srcdir}/Makefile.inc
__LIB__libodp_linux_la_SOURCES = \
+ _fdserver.c \
+ _ishm.c \
+ _ishmphy.c \
odp_atomic.c \
odp_barrier.c \
odp_buffer.c \
@@ -176,7 +188,6 @@ __LIB__libodp_linux_la_SOURCES = \
odp_rwlock_recursive.c \
odp_schedule.c \
odp_schedule_if.c \
- odp_schedule_ordered.c \
odp_schedule_sp.c \
odp_shared_memory.c \
odp_sorted_list.c \
diff --git a/platform/linux-generic/_fdserver.c b/platform/linux-generic/_fdserver.c
new file mode 100644
index 000000000..9aed7a9ff
--- /dev/null
+++ b/platform/linux-generic/_fdserver.c
@@ -0,0 +1,673 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file implements a file descriptor sharing server enabling
+ * sharing of file descriptors between processes, regardless of fork time.
+ *
+ * File descriptors are process scoped, but they can be "sent and converted
+ * on the fly" between processes using special unix domain socket ancillary
+ * data.
+ * The receiving process gets a file descriptor "pointing" to the same thing
+ * as the one sent (but the value of the file descriptor itself may be different
+ * from the one sent).
+ * Because ODP applications are responsible for creating ODP threads (i.e.
+ * pthreads or linux processes), ODP has no control on the order things happen:
+ * Nothing prevent a thread A to fork B and C, and then C creating a pktio
+ * which will be used by A and B to send/receive packets.
+ * Assuming this pktio uses a file descriptor, the latter will need to be
+ * shared between the processes, despite the "non convenient" fork time.
+ * The shared memory allocator is likely to use this as well to be able to
+ * share memory regardless of fork() time.
+ * This server handles a table of {(context,key)<-> fd} pair, and is
+ * interfaced by the following functions:
+ *
+ * _odp_fdserver_register_fd(context, key, fd_to_send);
+ * _odp_fdserver_deregister_fd(context, key);
+ * _odp_fdserver_lookup_fd(context, key);
+ *
+ * which are used to register/deregister or querry for file descriptor based
+ * on a context and key value couple, which has to be unique.
+ *
+ * Note again that the file descriptors stored here are local to this server
+ * process and get converted both when registered or looked up.
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp/api/spinlock.h>
+#include <odp_internal.h>
+#include <odp_debug_internal.h>
+#include <_fdserver_internal.h>
+#include <sys/prctl.h>
+#include <signal.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+
+#define FDSERVER_SOCKPATH_MAXLEN 32
+#define FDSERVER_SOCKPATH_FORMAT "/tmp/odp-%d-fdserver"
+#define FDSERVER_BACKLOG 5
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/* when accessing the client functions, clients should be mutexed: */
+odp_spinlock_t *client_lock;
+
+/* define the tables of file descriptors handled by this server: */
+#define FDSERVER_MAX_ENTRIES 256
+typedef struct fdentry_s {
+ fd_server_context_e context;
+ uint64_t key;
+ int fd;
+} fdentry_t;
+static fdentry_t *fd_table;
+static int fd_table_nb_entries;
+
+/*
+ * define the message struct used for communication between client and server
+ * (this single message is used in both direction)
+ * The file descriptors are sent out of band as ancillary data for conversion.
+ */
+typedef struct fd_server_msg {
+ int command;
+ fd_server_context_e context;
+ uint64_t key;
+} fdserver_msg_t;
+/* possible commands are: */
+#define FD_REGISTER_REQ 1 /* client -> server */
+#define FD_REGISTER_ACK 2 /* server -> client */
+#define FD_REGISTER_NACK 3 /* server -> client */
+#define FD_LOOKUP_REQ 4 /* client -> server */
+#define FD_LOOKUP_ACK 5 /* server -> client */
+#define FD_LOOKUP_NACK 6 /* server -> client */
+#define FD_DEREGISTER_REQ 7 /* client -> server */
+#define FD_DEREGISTER_ACK 8 /* server -> client */
+#define FD_DEREGISTER_NACK 9 /* server -> client */
+#define FD_SERVERSTOP_REQ 10 /* client -> server (stops) */
+
+/*
+ * Client and server function:
+ * Send a fdserver_msg, possibly including a file descriptor, on the socket
+ * This function is used both by:
+ * -the client (sending a FD_REGISTER_REQ with a file descriptor to be shared,
+ * or FD_LOOKUP_REQ/FD_DEREGISTER_REQ without a file descriptor)
+ * -the server (sending FD_REGISTER_ACK/NACK, FD_LOOKUP_NACK,
+ * FD_DEREGISTER_ACK/NACK... without a fd or a
+ * FD_LOOKUP_ACK with a fd)
+ * This function make use of the ancillary data (control data) to pass and
+ * convert file descriptors over UNIX sockets
+ * Return -1 on error, 0 on success.
+ */
+static int send_fdserver_msg(int sock, int command,
+ fd_server_context_e context, uint64_t key,
+ int fd_to_send)
+{
+ struct msghdr socket_message;
+ struct iovec io_vector[1]; /* one msg frgmt only */
+ struct cmsghdr *control_message = NULL;
+ int *fd_location;
+ fdserver_msg_t msg;
+ int res;
+
+ char ancillary_data[CMSG_SPACE(sizeof(int))];
+
+ /* prepare the register request body (single framgent): */
+ msg.command = command;
+ msg.context = context;
+ msg.key = key;
+ io_vector[0].iov_base = &msg;
+ io_vector[0].iov_len = sizeof(fdserver_msg_t);
+
+ /* initialize socket message */
+ memset(&socket_message, 0, sizeof(struct msghdr));
+ socket_message.msg_iov = io_vector;
+ socket_message.msg_iovlen = 1;
+
+ if (fd_to_send >= 0) {
+ /* provide space for the ancillary data */
+ memset(ancillary_data, 0, CMSG_SPACE(sizeof(int)));
+ socket_message.msg_control = ancillary_data;
+ socket_message.msg_controllen = CMSG_SPACE(sizeof(int));
+
+ /* initialize a single ancillary data element for fd passing */
+ control_message = CMSG_FIRSTHDR(&socket_message);
+ control_message->cmsg_level = SOL_SOCKET;
+ control_message->cmsg_type = SCM_RIGHTS;
+ control_message->cmsg_len = CMSG_LEN(sizeof(int));
+ fd_location = (int *)(void *)CMSG_DATA(control_message);
+ *fd_location = fd_to_send;
+ }
+ res = sendmsg(sock, &socket_message, 0);
+ if (res < 0) {
+ ODP_ERR("send_fdserver_msg: %s\n", strerror(errno));
+ return(-1);
+ }
+
+ return 0;
+}
+
+/*
+ * Client and server function
+ * Receive a fdserver_msg, possibly including a file descriptor, on the
+ * given socket.
+ * This function is used both by:
+ * -the server (receiving a FD_REGISTER_REQ with a file descriptor to be shared,
+ * or FD_LOOKUP_REQ, FD_DEREGISTER_REQ without a file descriptor)
+ * -the client (receiving FD_REGISTER_ACK...without a fd or a FD_LOOKUP_ACK with
+ * a fd)
+ * This function make use of the ancillary data (control data) to pass and
+ * convert file descriptors over UNIX sockets.
+ * Return -1 on error, 0 on success.
+ */
+static int recv_fdserver_msg(int sock, int *command,
+ fd_server_context_e *context, uint64_t *key,
+ int *recvd_fd)
+{
+ struct msghdr socket_message;
+ struct iovec io_vector[1]; /* one msg frgmt only */
+ struct cmsghdr *control_message = NULL;
+ int *fd_location;
+ fdserver_msg_t msg;
+ char ancillary_data[CMSG_SPACE(sizeof(int))];
+
+ memset(&socket_message, 0, sizeof(struct msghdr));
+ memset(ancillary_data, 0, CMSG_SPACE(sizeof(int)));
+
+ /* setup a place to fill in message contents */
+ io_vector[0].iov_base = &msg;
+ io_vector[0].iov_len = sizeof(fdserver_msg_t);
+ socket_message.msg_iov = io_vector;
+ socket_message.msg_iovlen = 1;
+
+ /* provide space for the ancillary data */
+ socket_message.msg_control = ancillary_data;
+ socket_message.msg_controllen = CMSG_SPACE(sizeof(int));
+
+ /* receive the message */
+ if (recvmsg(sock, &socket_message, MSG_CMSG_CLOEXEC) < 0) {
+ ODP_ERR("recv_fdserver_msg: %s\n", strerror(errno));
+ return(-1);
+ }
+
+ *command = msg.command;
+ *context = msg.context;
+ *key = msg.key;
+
+ /* grab the converted file descriptor (if any) */
+ *recvd_fd = -1;
+
+ if ((socket_message.msg_flags & MSG_CTRUNC) == MSG_CTRUNC)
+ return 0;
+
+ /* iterate ancillary elements to find the file descriptor: */
+ for (control_message = CMSG_FIRSTHDR(&socket_message);
+ control_message != NULL;
+ control_message = CMSG_NXTHDR(&socket_message, control_message)) {
+ if ((control_message->cmsg_level == SOL_SOCKET) &&
+ (control_message->cmsg_type == SCM_RIGHTS)) {
+ fd_location = (int *)(void *)CMSG_DATA(control_message);
+ *recvd_fd = *fd_location;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* opens and returns a connected socket to the server */
+static int get_socket(void)
+{
+ char sockpath[FDSERVER_SOCKPATH_MAXLEN];
+ int s_sock; /* server socket */
+ struct sockaddr_un remote;
+ int len;
+
+ /* construct the named socket path: */
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
+ odp_global_data.main_pid);
+
+ s_sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (s_sock == -1) {
+ ODP_ERR("cannot connect to server: %s\n", strerror(errno));
+ return(-1);
+ }
+
+ remote.sun_family = AF_UNIX;
+ strcpy(remote.sun_path, sockpath);
+ len = strlen(remote.sun_path) + sizeof(remote.sun_family);
+ if (connect(s_sock, (struct sockaddr *)&remote, len) == -1) {
+ ODP_ERR("cannot connect to server: %s\n", strerror(errno));
+ close(s_sock);
+ return(-1);
+ }
+
+ return s_sock;
+}
+
+/*
+ * Client function:
+ * Register a file descriptor to the server. Return -1 on error.
+ */
+int _odp_fdserver_register_fd(fd_server_context_e context, uint64_t key,
+ int fd_to_send)
+{
+ int s_sock; /* server socket */
+ int res;
+ int command;
+ int fd;
+
+ odp_spinlock_lock(client_lock);
+
+ ODP_DBG("FD client register: pid=%d key=%" PRIu64 ", fd=%d\n",
+ getpid(), key, fd_to_send);
+
+ s_sock = get_socket();
+ if (s_sock < 0) {
+ odp_spinlock_unlock(client_lock);
+ return(-1);
+ }
+
+ res = send_fdserver_msg(s_sock, FD_REGISTER_REQ, context, key,
+ fd_to_send);
+ if (res < 0) {
+ ODP_ERR("fd registration failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
+
+ if ((res < 0) || (command != FD_REGISTER_ACK)) {
+ ODP_ERR("fd registration failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ close(s_sock);
+
+ odp_spinlock_unlock(client_lock);
+ return 0;
+}
+
+/*
+ * Client function:
+ * Deregister a file descriptor from the server. Return -1 on error.
+ */
+int _odp_fdserver_deregister_fd(fd_server_context_e context, uint64_t key)
+{
+ int s_sock; /* server socket */
+ int res;
+ int command;
+ int fd;
+
+ odp_spinlock_lock(client_lock);
+
+ ODP_DBG("FD client deregister: pid=%d key=%" PRIu64 "\n",
+ getpid(), key);
+
+ s_sock = get_socket();
+ if (s_sock < 0) {
+ odp_spinlock_unlock(client_lock);
+ return(-1);
+ }
+
+ res = send_fdserver_msg(s_sock, FD_DEREGISTER_REQ, context, key, -1);
+ if (res < 0) {
+ ODP_ERR("fd de-registration failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
+
+ if ((res < 0) || (command != FD_DEREGISTER_ACK)) {
+ ODP_ERR("fd de-registration failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ close(s_sock);
+
+ odp_spinlock_unlock(client_lock);
+ return 0;
+}
+
+/*
+ * client function:
+ * lookup a file descriptor from the server. return -1 on error,
+ * or the file descriptor on success (>=0).
+ */
+int _odp_fdserver_lookup_fd(fd_server_context_e context, uint64_t key)
+{
+ int s_sock; /* server socket */
+ int res;
+ int command;
+ int fd;
+
+ odp_spinlock_lock(client_lock);
+
+ s_sock = get_socket();
+ if (s_sock < 0) {
+ odp_spinlock_unlock(client_lock);
+ return(-1);
+ }
+
+ res = send_fdserver_msg(s_sock, FD_LOOKUP_REQ, context, key, -1);
+ if (res < 0) {
+ ODP_ERR("fd lookup failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
+
+ if ((res < 0) || (command != FD_LOOKUP_ACK)) {
+ ODP_ERR("fd lookup failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ close(s_sock);
+ ODP_DBG("FD client lookup: pid=%d, key=%" PRIu64 ", fd=%d\n",
+ getpid(), key, fd);
+
+ odp_spinlock_unlock(client_lock);
+ return fd;
+}
+
+/*
+ * request server terminaison:
+ */
+static int stop_server(void)
+{
+ int s_sock; /* server socket */
+ int res;
+
+ odp_spinlock_lock(client_lock);
+
+ ODP_DBG("FD sending server stop request\n");
+
+ s_sock = get_socket();
+ if (s_sock < 0) {
+ odp_spinlock_unlock(client_lock);
+ return(-1);
+ }
+
+ res = send_fdserver_msg(s_sock, FD_SERVERSTOP_REQ, 0, 0, -1);
+ if (res < 0) {
+ ODP_ERR("fd stop request failure\n");
+ close(s_sock);
+ odp_spinlock_unlock(client_lock);
+ return -1;
+ }
+
+ close(s_sock);
+
+ odp_spinlock_unlock(client_lock);
+ return 0;
+}
+
+/*
+ * server function
+ * receive a client request and handle it.
+ * Always returns 0 unless a stop request is received.
+ */
+static int handle_request(int client_sock)
+{
+ int command;
+ fd_server_context_e context;
+ uint64_t key;
+ int fd;
+ int i;
+
+ /* get a client request: */
+ recv_fdserver_msg(client_sock, &command, &context, &key, &fd);
+ switch (command) {
+ case FD_REGISTER_REQ:
+ if ((fd < 0) || (context >= FD_SRV_CTX_END)) {
+ ODP_ERR("Invalid register fd or context\n");
+ send_fdserver_msg(client_sock, FD_REGISTER_NACK,
+ FD_SRV_CTX_NA, 0, -1);
+ return 0;
+ }
+
+ /* store the file descriptor in table: */
+ if (fd_table_nb_entries < FDSERVER_MAX_ENTRIES) {
+ fd_table[fd_table_nb_entries].context = context;
+ fd_table[fd_table_nb_entries].key = key;
+ fd_table[fd_table_nb_entries++].fd = fd;
+ ODP_DBG("storing {ctx=%d, key=%" PRIu64 "}->fd=%d\n",
+ context, key, fd);
+ } else {
+ ODP_ERR("FD table full\n");
+ send_fdserver_msg(client_sock, FD_REGISTER_NACK,
+ FD_SRV_CTX_NA, 0, -1);
+ return 0;
+ }
+
+ send_fdserver_msg(client_sock, FD_REGISTER_ACK,
+ FD_SRV_CTX_NA, 0, -1);
+ break;
+
+ case FD_LOOKUP_REQ:
+ if (context >= FD_SRV_CTX_END) {
+ ODP_ERR("invalid lookup context\n");
+ send_fdserver_msg(client_sock, FD_LOOKUP_NACK,
+ FD_SRV_CTX_NA, 0, -1);
+ return 0;
+ }
+
+ /* search key in table and sent reply: */
+ for (i = 0; i < fd_table_nb_entries; i++) {
+ if ((fd_table[i].context == context) &&
+ (fd_table[i].key == key)) {
+ fd = fd_table[i].fd;
+ ODP_DBG("lookup {ctx=%d,"
+ " key=%" PRIu64 "}->fd=%d\n",
+ context, key, fd);
+ send_fdserver_msg(client_sock,
+ FD_LOOKUP_ACK, context, key,
+ fd);
+ return 0;
+ }
+ }
+
+ /* context+key not found... send nack */
+ send_fdserver_msg(client_sock, FD_LOOKUP_NACK, context, key,
+ -1);
+ break;
+
+ case FD_DEREGISTER_REQ:
+ if (context >= FD_SRV_CTX_END) {
+ ODP_ERR("invalid deregister context\n");
+ send_fdserver_msg(client_sock, FD_DEREGISTER_NACK,
+ FD_SRV_CTX_NA, 0, -1);
+ return 0;
+ }
+
+ /* search key in table and remove it if found, and reply: */
+ for (i = 0; i < fd_table_nb_entries; i++) {
+ if ((fd_table[i].context == context) &&
+ (fd_table[i].key == key)) {
+ ODP_DBG("drop {ctx=%d,"
+ " key=%" PRIu64 "}->fd=%d\n",
+ context, key, fd_table[i].fd);
+ close(fd_table[i].fd);
+ fd_table[i] = fd_table[--fd_table_nb_entries];
+ send_fdserver_msg(client_sock,
+ FD_DEREGISTER_ACK,
+ context, key, -1);
+ return 0;
+ }
+ }
+
+ /* key not found... send nack */
+ send_fdserver_msg(client_sock, FD_DEREGISTER_NACK,
+ context, key, -1);
+ break;
+
+ case FD_SERVERSTOP_REQ:
+ ODP_DBG("Stoping FD server\n");
+ return 1;
+
+ default:
+ ODP_ERR("Unexpected request\n");
+ break;
+ }
+ return 0;
+}
+
+/*
+ * server function
+ * loop forever, handling client requests one by one
+ */
+static void wait_requests(int sock)
+{
+ int c_socket; /* client connection */
+ unsigned int addr_sz;
+ struct sockaddr_un remote;
+
+ for (;;) {
+ addr_sz = sizeof(remote);
+ c_socket = accept(sock, (struct sockaddr *)&remote, &addr_sz);
+ if (c_socket == -1) {
+ ODP_ERR("wait_requests: %s\n", strerror(errno));
+ return;
+ }
+
+ if (handle_request(c_socket))
+ break;
+ close(c_socket);
+ }
+ close(c_socket);
+}
+
+/*
+ * Create a unix domain socket and fork a process to listen to incoming
+ * requests.
+ */
+int _odp_fdserver_init_global(void)
+{
+ char sockpath[FDSERVER_SOCKPATH_MAXLEN];
+ int sock;
+ struct sockaddr_un local;
+ pid_t server_pid;
+ int res;
+
+ /* create the client spinlock that any client can see: */
+ client_lock = mmap(NULL, sizeof(odp_spinlock_t), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+
+ odp_spinlock_init(client_lock);
+
+ /* construct the server named socket path: */
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
+ odp_global_data.main_pid);
+
+ /* create UNIX domain socket: */
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock == -1) {
+ ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
+ return(-1);
+ }
+
+ /* remove previous named socket if it already exists: */
+ unlink(sockpath);
+
+ /* bind to new named socket: */
+ local.sun_family = AF_UNIX;
+ strncpy(local.sun_path, sockpath, sizeof(local.sun_path));
+ res = bind(sock, (struct sockaddr *)&local, sizeof(struct sockaddr_un));
+ if (res == -1) {
+ ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
+ close(sock);
+ return(-1);
+ }
+
+ /* listen for incoming conections: */
+ if (listen(sock, FDSERVER_BACKLOG) == -1) {
+ ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
+ close(sock);
+ return(-1);
+ }
+
+ /* fork a server process: */
+ server_pid = fork();
+ if (server_pid == -1) {
+ ODP_ERR("Could not fork!\n");
+ close(sock);
+ return(-1);
+ }
+
+ if (server_pid == 0) { /*child */
+ /* TODO: pin the server on appropriate service cpu mask */
+ /* when (if) we can agree on the usage of service mask */
+
+ /* request to be killed if parent dies, hence avoiding */
+ /* orphans being "adopted" by the init process... */
+ prctl(PR_SET_PDEATHSIG, SIGTERM);
+
+ /* allocate the space for the file descriptor<->key table: */
+ fd_table = malloc(FDSERVER_MAX_ENTRIES * sizeof(fdentry_t));
+ if (!fd_table) {
+ ODP_ERR("maloc failed!\n");
+ exit(1);
+ }
+
+ /* wait for clients requests */
+ wait_requests(sock); /* Returns when server is stopped */
+ close(sock);
+
+ /* release the file descriptor table: */
+ free(fd_table);
+
+ exit(0);
+ }
+
+ /* parent */
+ close(sock);
+ return 0;
+}
+
+/*
+ * Terminate the server
+ */
+int _odp_fdserver_term_global(void)
+{
+ int status;
+ char sockpath[FDSERVER_SOCKPATH_MAXLEN];
+
+ /* close the server and wait for child terminaison*/
+ stop_server();
+ wait(&status);
+
+ /* construct the server named socket path: */
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
+ odp_global_data.main_pid);
+
+ /* delete the UNIX domain socket: */
+ unlink(sockpath);
+
+ return 0;
+}
diff --git a/platform/linux-generic/_ishm.c b/platform/linux-generic/_ishm.c
new file mode 100644
index 000000000..f88983467
--- /dev/null
+++ b/platform/linux-generic/_ishm.c
@@ -0,0 +1,1716 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* This file handles the internal shared memory: internal shared memory
+ * is memory which is sharable by all ODP threads regardless of how the
+ * ODP thread is implemented (pthread or process) and regardless of fork()
+ * time.
+ * Moreover, when reserved with the _ODP_ISHM_SINGLE_VA flag,
+ * internal shared memory is guaranteed to always be located at the same virtual
+ * address, i.e. pointers to internal shared memory are fully shareable
+ * between odp threads (regardless of thread type or fork time) in that case.
+ * Internal shared memory is mainly meant to be used internaly within ODP
+ * (hence its name), but may also be allocated by odp applications and drivers,
+ * in the future (through these interfaces).
+ * To guarrentee this full pointer shareability (when reserved with the
+ * _ODP_ISHM_SINGLE_VA flag) internal shared memory is handled as follows:
+ * At global_init time, a huge virtual address space reservation is performed.
+ * Note that this is just reserving virtual space, not physical memory.
+ * Because all ODP threads (pthreads or processes) are descendants of the ODP
+ * instantiation process, this VA space is inherited by all ODP threads.
+ * When internal shmem reservation actually occurs, and
+ * when reserved with the _ODP_ISHM_SINGLE_VA flag, physical memory is
+ * allocated, and mapped (MAP_FIXED) to some part in the huge preallocated
+ * address space area:
+ * because this virtual address space is common to all ODP threads, we
+ * know this mapping will succeed, and not clash with anything else.
+ * Hence, an ODP threads which perform a lookup for the same ishm block
+ * can map it at the same VA address.
+ * When internal shared memory is released, the physical memory is released
+ * and the corresponding virtual space returned to its "pool" of preallocated
+ * virtual space (assuming it was allocated from there).
+ * Note, though, that, if 2 linux processes share the same ishm block,
+ * the virtual space is marked as released as soon as one of the processes
+ * releases the ishm block, but the physical memory space is actually released
+ * by the kernel once all processes have done a ishm operation (i,e. a sync).
+ * This is due to the fact that linux does not contain any syscall to unmap
+ * memory from a different process.
+ *
+ * This file contains functions to handle the VA area (handling fragmentation
+ * and defragmentation resulting from different allocs/release) and also
+ * define the functions to allocate, release and lookup internal shared
+ * memory:
+ * _odp_ishm_reserve(), _odp_ishm_free*() and _odp_ishm_lookup*()...
+ */
+#include <odp_posix_extensions.h>
+#include <odp_config_internal.h>
+#include <odp_internal.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/align.h>
+#include <odp/api/system_info.h>
+#include <odp/api/debug.h>
+#include <odp_shm_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_align_internal.h>
+#include <_fdserver_internal.h>
+#include <_ishm_internal.h>
+#include <_ishmphy_internal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <inttypes.h>
+#include <sys/wait.h>
+#include <libgen.h>
+
+/*
+ * Maximum number of internal shared memory blocks.
+ *
+ * This is the number of separate ISHM areas that can be reserved concurrently
+ * (Note that freeing such blocks may take time, or possibly never happen
+ * if some of the block ownwers never procsync() after free). This number
+ * should take that into account)
+ */
+#define ISHM_MAX_NB_BLOCKS 128
+
+/*
+ * Maximum internal shared memory block name length in chars
+ * probably taking the same number as SHM name size make sense at this stage
+ */
+#define ISHM_NAME_MAXLEN 32
+
+/*
+ * Linux underlying file name: <directory>/odp-<odp_pid>-ishm-<name>
+ * The <name> part may be replaced by a sequence number if no specific
+ * name is given at reserve time
+ * <directory> is either /tmp or the hugepagefs mount point for default size.
+ * (searched at init time)
+ */
+#define ISHM_FILENAME_MAXLEN (ISHM_NAME_MAXLEN + 64)
+#define ISHM_FILENAME_FORMAT "%s/odp-%d-ishm-%s"
+#define ISHM_FILENAME_NORMAL_PAGE_DIR "/tmp"
+
+/*
+ * when the memory is to be shared with an external entity (such as another
+ * ODP instance or an OS process not part of this ODP instance) then a
+ * export file is created describing the exported memory: this defines the
+ * location and the filename format of this description file
+ */
+#define ISHM_EXPTNAME_FORMAT "/tmp/odp-%d-shm-%s"
+
+/*
+ * At worse case the virtual space gets so fragmented that there is
+ * a unallocated fragment between each allocated fragment:
+ * In that case, the number of fragments to take care of is twice the
+ * number of ISHM blocks + 1.
+ */
+#define ISHM_NB_FRAGMNTS (ISHM_MAX_NB_BLOCKS * 2 + 1)
+
+/*
+ * when a memory block is to be exported outside its ODP instance,
+ * an block 'attribute file' is created in /tmp/odp-<pid>-shm-<name>.
+ * The information given in this file is according to the following:
+ */
+#define EXPORT_FILE_LINE1_FMT "ODP exported shm block info:"
+#define EXPORT_FILE_LINE2_FMT "ishm_blockname: %s"
+#define EXPORT_FILE_LINE3_FMT "file: %s"
+#define EXPORT_FILE_LINE4_FMT "length: %" PRIu64
+#define EXPORT_FILE_LINE5_FMT "flags: %" PRIu32
+#define EXPORT_FILE_LINE6_FMT "user_length: %" PRIu64
+#define EXPORT_FILE_LINE7_FMT "user_flags: %" PRIu32
+#define EXPORT_FILE_LINE8_FMT "align: %" PRIu32
+/*
+ * A fragment describes a piece of the shared virtual address space,
+ * and is allocated only when allocation is done with the _ODP_ISHM_SINGLE_VA
+ * flag:
+ * A fragment is said to be used when it actually does represent some
+ * portion of the virtual address space, and is said to be unused when
+ * it does not (so at start, one single fragment is used -describing the
+ * whole address space as unallocated-, and all others are unused).
+ * Fragments get used as address space fragmentation increases.
+ * A fragment is allocated if the piece of address space it
+ * describes is actually used by a shared memory block.
+ * Allocated fragments get their block_index set >=0.
+ */
+typedef struct ishm_fragment {
+ struct ishm_fragment *prev; /* not used when the fragment is unused */
+ struct ishm_fragment *next;
+ void *start; /* start of segment (VA) */
+ uintptr_t len; /* length of segment. multiple of page size */
+ int block_index; /* -1 for unallocated fragments */
+} ishm_fragment_t;
+
+/*
+ * A block describes a piece of reserved memory: Any successful ishm_reserve()
+ * will allocate a block. A ishm_reserve() with the _ODP_ISHM_SINGLE_VA flag set
+ * will allocate both a block and a fragment.
+ * Blocks contain only global data common to all processes.
+ */
+typedef enum {UNKNOWN, HUGE, NORMAL, EXTERNAL} huge_flag_t;
+typedef struct ishm_block {
+ char name[ISHM_NAME_MAXLEN]; /* name for the ishm block (if any) */
+ char filename[ISHM_FILENAME_MAXLEN]; /* name of the .../odp-* file */
+ char exptname[ISHM_FILENAME_MAXLEN]; /* name of the export file */
+ uint32_t user_flags; /* any flags the user want to remember. */
+ uint32_t flags; /* block creation flags. */
+ uint32_t external_fd:1; /* block FD was externally provided */
+ uint64_t user_len; /* length, as requested at reserve time. */
+ void *start; /* only valid if _ODP_ISHM_SINGLE_VA is set*/
+ uint64_t len; /* length. multiple of page size. 0 if free*/
+ ishm_fragment_t *fragment; /* used when _ODP_ISHM_SINGLE_VA is used */
+ huge_flag_t huge; /* page type: external means unknown here. */
+ uint64_t seq; /* sequence number, incremented on alloc and free */
+ uint64_t refcnt;/* number of linux processes mapping this block */
+} ishm_block_t;
+
+/*
+ * Table of blocks describing allocated internal shared memory
+ * This table is visible to every ODP thread (linux process or pthreads).
+ * (it is allocated shared at odp init time and is therefore inherited by all)
+ * Table index is used as handle, so it cannot move!. Entry is regarded as
+ * free when len==0
+ */
+typedef struct {
+ odp_spinlock_t lock;
+ uint64_t dev_seq; /* used when creating device names */
+ uint32_t odpthread_cnt; /* number of running ODP threads */
+ ishm_block_t block[ISHM_MAX_NB_BLOCKS];
+} ishm_table_t;
+static ishm_table_t *ishm_tbl;
+
+/*
+ * Process local table containing the list of (believed) allocated blocks seen
+ * from the current process. There is one such table per linux process. linux
+ * threads within a process shares this table.
+ * The contents within this table may become obsolete when other processes
+ * reserve/free ishm blocks. This is what the procsync() function
+ * catches by comparing the block sequence number with the one in this table.
+ * This table is filled at ishm_reserve and ishm_lookup time.
+ * Entries are removed at ishm_free or procsync time.
+ * Note that flags and len are present in this table and seems to be redundant
+ * with those present in the ishm block table: but this is not fully true:
+ * When ishm_sync() detects obsolete mappings and tries to remove them,
+ * the entry in the ishm block table is then obsolete, and the values which are
+ * found in this table must be used to perform the ummap.
+ * (and the values in the block tables are needed at lookup time...)
+ */
+typedef struct {
+ int thrd_refcnt; /* number of pthreads in this process, really */
+ struct {
+ int block_index; /* entry in the ishm_tbl */
+ uint32_t flags; /* flags used at creation time */
+ uint64_t seq;
+ void *start; /* start of block (VA) */
+ uint64_t len; /* length of block. multiple of page size */
+ int fd; /* file descriptor used for this block */
+ } entry[ISHM_MAX_NB_BLOCKS];
+ int nb_entries;
+} ishm_proctable_t;
+static ishm_proctable_t *ishm_proctable;
+
+/*
+ * Table of fragments describing the common virtual address space:
+ * This table is visible to every ODP thread (linux process or pthreads).
+ * (it is allocated at odp init time and is therefore inherited by all)
+ */
+typedef struct {
+ ishm_fragment_t fragment[ISHM_NB_FRAGMNTS];
+ ishm_fragment_t *used_fragmnts; /* ordered by increasing start addr */
+ ishm_fragment_t *unused_fragmnts;
+} ishm_ftable_t;
+static ishm_ftable_t *ishm_ftbl;
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/* prototypes: */
+static void procsync(void);
+
+/*
+ * Take a piece of the preallocated virtual space to fit "size" bytes.
+ * (best fit). Size must be rounded up to an integer number of pages size.
+ * Possibly split the fragment to keep track of remaining space.
+ * Returns the allocated fragment (best_fragment) and the corresponding address.
+ * External caller must ensure mutex before the call!
+ */
+static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
+ ishm_fragment_t **best_fragmnt)
+{
+ ishm_fragment_t *fragmnt;
+ *best_fragmnt = NULL;
+ ishm_fragment_t *rem_fragmnt;
+ uintptr_t border;/* possible start of new fragment (next alignement) */
+ intptr_t left; /* room remaining after, if the segment is allocated */
+ uintptr_t remainder = ODP_CONFIG_ISHM_VA_PREALLOC_SZ;
+
+ /*
+ * search for the best bit, i.e. search for the unallocated fragment
+ * would give less remainder if the new fragment was allocated within
+ * it:
+ */
+ for (fragmnt = ishm_ftbl->used_fragmnts;
+ fragmnt; fragmnt = fragmnt->next) {
+ /* skip allocated segment: */
+ if (fragmnt->block_index >= 0)
+ continue;
+ /* skip too short segment: */
+ border = ((uintptr_t)fragmnt->start + align - 1) & (-align);
+ left =
+ ((uintptr_t)fragmnt->start + fragmnt->len) - (border + size);
+ if (left < 0)
+ continue;
+ /* remember best fit: */
+ if ((uintptr_t)left < remainder) {
+ remainder = left; /* best, so far */
+ *best_fragmnt = fragmnt;
+ }
+ }
+
+ if (!(*best_fragmnt)) {
+ ODP_ERR("unable to get virtual address for shmem block!\n.");
+ return NULL;
+ }
+
+ (*best_fragmnt)->block_index = block_index;
+ border = ((uintptr_t)(*best_fragmnt)->start + align - 1) & (-align);
+
+ /*
+ * if there is room between previous fragment and new one, (due to
+ * alignment requirement) then fragment (split) the space between
+ * the end of the previous fragment and the beginning of the new one:
+ */
+ if (border - (uintptr_t)(*best_fragmnt)->start > 0) {
+ /* fragment space, i.e. take a new fragment descriptor... */
+ rem_fragmnt = ishm_ftbl->unused_fragmnts;
+ if (!rem_fragmnt) {
+ ODP_ERR("unable to get shmem fragment descriptor!\n.");
+ return NULL;
+ }
+ ishm_ftbl->unused_fragmnts = rem_fragmnt->next;
+
+ /* and link it between best_fragmnt->prev and best_fragmnt */
+ if ((*best_fragmnt)->prev)
+ (*best_fragmnt)->prev->next = rem_fragmnt;
+ else
+ ishm_ftbl->used_fragmnts = rem_fragmnt;
+ rem_fragmnt->prev = (*best_fragmnt)->prev;
+ (*best_fragmnt)->prev = rem_fragmnt;
+ rem_fragmnt->next = (*best_fragmnt);
+
+ /* update length: rem_fragmnt getting space before border */
+ rem_fragmnt->block_index = -1;
+ rem_fragmnt->start = (*best_fragmnt)->start;
+ rem_fragmnt->len = border - (uintptr_t)(*best_fragmnt)->start;
+ (*best_fragmnt)->start =
+ (void *)((uintptr_t)rem_fragmnt->start + rem_fragmnt->len);
+ (*best_fragmnt)->len -= rem_fragmnt->len;
+ }
+
+ /* if this was a perfect fit, i.e. no free space follows, we are done */
+ if (remainder == 0)
+ return (*best_fragmnt)->start;
+
+ /* otherwise, fragment space, i.e. take a new fragment descriptor... */
+ rem_fragmnt = ishm_ftbl->unused_fragmnts;
+ if (!rem_fragmnt) {
+ ODP_ERR("unable to get shmem fragment descriptor!\n.");
+ return (*best_fragmnt)->start;
+ }
+ ishm_ftbl->unused_fragmnts = rem_fragmnt->next;
+
+ /* ... double link it... */
+ rem_fragmnt->next = (*best_fragmnt)->next;
+ rem_fragmnt->prev = (*best_fragmnt);
+ if ((*best_fragmnt)->next)
+ (*best_fragmnt)->next->prev = rem_fragmnt;
+ (*best_fragmnt)->next = rem_fragmnt;
+
+ /* ... and keep track of the remainder */
+ (*best_fragmnt)->len = size;
+ rem_fragmnt->len = remainder;
+ rem_fragmnt->start = (void *)((char *)(*best_fragmnt)->start + size);
+ rem_fragmnt->block_index = -1;
+
+ return (*best_fragmnt)->start;
+}
+
+/*
+ * Free a portion of virtual space.
+ * Possibly defragment, if the freed fragment is adjacent to another
+ * free virtual fragment.
+ * External caller must ensure mutex before the call!
+ */
+static void free_fragment(ishm_fragment_t *fragmnt)
+{
+ ishm_fragment_t *prev_f;
+ ishm_fragment_t *next_f;
+
+ /* sanity check */
+ if (!fragmnt)
+ return;
+
+ prev_f = fragmnt->prev;
+ next_f = fragmnt->next;
+
+ /* free the fragment */
+ fragmnt->block_index = -1;
+
+ /* check if the previous fragment is also free: if so, defragment */
+ if (prev_f && (prev_f->block_index < 0)) {
+ fragmnt->start = prev_f->start;
+ fragmnt->len += prev_f->len;
+ if (prev_f->prev) {
+ prev_f->prev->next = fragmnt;
+ } else {
+ if (ishm_ftbl->used_fragmnts == prev_f)
+ ishm_ftbl->used_fragmnts = fragmnt;
+ else
+ ODP_ERR("corrupted fragment list!.\n");
+ }
+ fragmnt->prev = prev_f->prev;
+
+ /* put removed fragment in free list */
+ prev_f->prev = NULL;
+ prev_f->next = ishm_ftbl->unused_fragmnts;
+ ishm_ftbl->unused_fragmnts = prev_f;
+ }
+
+ /* check if the next fragment is also free: if so, defragment */
+ if (next_f && (next_f->block_index < 0)) {
+ fragmnt->len += next_f->len;
+ if (next_f->next)
+ next_f->next->prev = fragmnt;
+ fragmnt->next = next_f->next;
+
+ /* put removed fragment in free list */
+ next_f->prev = NULL;
+ next_f->next = ishm_ftbl->unused_fragmnts;
+ ishm_ftbl->unused_fragmnts = next_f;
+ }
+}
+
+/*
+ * Create file with size len. returns -1 on error
+ * Creates a file to /tmp/odp-<pid>-<sequence_or_name> (for normal pages)
+ * or /mnt/huge/odp-<pid>-<sequence_or_name> (for huge pages)
+ * Return the new file descriptor, or -1 on error.
+ */
+static int create_file(int block_index, huge_flag_t huge, uint64_t len,
+ uint32_t flags, uint32_t align)
+{
+ char *name;
+ int fd;
+ ishm_block_t *new_block; /* entry in the main block table */
+ char seq_string[ISHM_FILENAME_MAXLEN]; /* used to construct filename*/
+ char filename[ISHM_FILENAME_MAXLEN];/* filename in /tmp/ or /mnt/huge */
+ int oflag = O_RDWR | O_CREAT | O_TRUNC; /* flags for open */
+ FILE *export_file;
+
+ new_block = &ishm_tbl->block[block_index];
+ name = new_block->name;
+
+ /* create the filename: */
+ snprintf(seq_string, ISHM_FILENAME_MAXLEN, "%08" PRIu64,
+ ishm_tbl->dev_seq++);
+
+ /* huge dir must be known to create files there!: */
+ if ((huge == HUGE) &&
+ (!odp_global_data.hugepage_info.default_huge_page_dir))
+ return -1;
+
+ if (huge == HUGE)
+ snprintf(filename, ISHM_FILENAME_MAXLEN,
+ ISHM_FILENAME_FORMAT,
+ odp_global_data.hugepage_info.default_huge_page_dir,
+ odp_global_data.main_pid,
+ (name && name[0]) ? name : seq_string);
+ else
+ snprintf(filename, ISHM_FILENAME_MAXLEN,
+ ISHM_FILENAME_FORMAT,
+ ISHM_FILENAME_NORMAL_PAGE_DIR,
+ odp_global_data.main_pid,
+ (name && name[0]) ? name : seq_string);
+
+ fd = open(filename, oflag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ if (huge == HUGE)
+ ODP_DBG("open failed for %s: %s.\n",
+ filename, strerror(errno));
+ else
+ ODP_ERR("open failed for %s: %s.\n",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (ftruncate(fd, len) == -1) {
+ ODP_ERR("ftruncate failed: fd=%d, err=%s.\n",
+ fd, strerror(errno));
+ close(fd);
+ unlink(filename);
+ return -1;
+ }
+
+
+ /* if _ODP_ISHM_EXPORT is set, create a description file for
+ * external ref:
+ */
+ if (flags & _ODP_ISHM_EXPORT) {
+ strncpy(new_block->filename, filename,
+ ISHM_FILENAME_MAXLEN - 1);
+ snprintf(new_block->exptname, ISHM_FILENAME_MAXLEN,
+ ISHM_EXPTNAME_FORMAT,
+ odp_global_data.main_pid,
+ (name && name[0]) ? name : seq_string);
+ export_file = fopen(new_block->exptname, "w");
+ if (export_file == NULL) {
+ ODP_ERR("open failed: err=%s.\n",
+ strerror(errno));
+ new_block->exptname[0] = 0;
+ } else {
+ fprintf(export_file, EXPORT_FILE_LINE1_FMT "\n");
+ fprintf(export_file, EXPORT_FILE_LINE2_FMT "\n", name);
+ fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
+ new_block->filename);
+ fprintf(export_file, EXPORT_FILE_LINE4_FMT "\n", len);
+ fprintf(export_file, EXPORT_FILE_LINE5_FMT "\n", flags);
+ fprintf(export_file, EXPORT_FILE_LINE6_FMT "\n",
+ new_block->user_len);
+ fprintf(export_file, EXPORT_FILE_LINE7_FMT "\n",
+ new_block->user_flags);
+ fprintf(export_file, EXPORT_FILE_LINE8_FMT "\n", align);
+
+ fclose(export_file);
+ }
+ } else {
+ new_block->exptname[0] = 0;
+ /* remove the file from the filesystem, keeping its fd open */
+ unlink(filename);
+ }
+
+ return fd;
+}
+
+/* delete the files related to a given ishm block: */
+static void delete_file(ishm_block_t *block)
+{
+ /* remove the .../odp-* file, unless fd was external: */
+ if (block->filename[0] != 0)
+ unlink(block->filename);
+ /* also remove possible description file (if block was exported): */
+ if (block->exptname[0] != 0)
+ unlink(block->exptname);
+}
+
+/*
+ * performs the mapping, possibly allocating a fragment of the pre-reserved
+ * VA space if the _ODP_ISHM_SINGLE_VA flag was given.
+ * Sets fd, and returns the mapping address.
+ * This function will also set the _ODP_ISHM_SINGLE_VA flag if the alignment
+ * requires it
+ * Mutex must be assured by the caller.
+ */
+static void *do_map(int block_index, uint64_t len, uint32_t align,
+ uint32_t flags, huge_flag_t huge, int *fd)
+{
+ ishm_block_t *new_block; /* entry in the main block table */
+ void *addr = NULL;
+ void *mapped_addr;
+ ishm_fragment_t *fragment = NULL;
+
+ new_block = &ishm_tbl->block[block_index];
+
+ /*
+ * Creates a file to /tmp/odp-<pid>-<sequence> (for normal pages)
+ * or /mnt/huge/odp-<pid>-<sequence> (for huge pages)
+ * unless a fd was already given
+ */
+ if (*fd < 0) {
+ *fd = create_file(block_index, huge, len, flags, align);
+ if (*fd < 0)
+ return NULL;
+ } else {
+ new_block->filename[0] = 0;
+ }
+
+ /* allocate an address range in the prebooked VA area if needed */
+ if (flags & _ODP_ISHM_SINGLE_VA) {
+ addr = alloc_fragment(len, block_index, align, &fragment);
+ if (!addr) {
+ ODP_ERR("alloc_fragment failed.\n");
+ if (new_block->filename[0]) {
+ close(*fd);
+ *fd = -1;
+ delete_file(new_block);
+ }
+ return NULL;
+ }
+ ishm_tbl->block[block_index].fragment = fragment;
+ }
+
+ /* try to mmap: */
+ mapped_addr = _odp_ishmphy_map(*fd, addr, len, flags);
+ if (mapped_addr == NULL) {
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ free_fragment(fragment);
+ if (new_block->filename[0]) {
+ close(*fd);
+ *fd = -1;
+ delete_file(new_block);
+ }
+ return NULL;
+ }
+
+ return mapped_addr;
+}
+
+/*
+ * Performs an extra mapping (for a process trying to see an existing block
+ * i.e. performing a lookup).
+ * Mutex must be assured by the caller.
+ */
+static void *do_remap(int block_index, int fd)
+{
+ void *mapped_addr;
+ ishm_fragment_t *fragment;
+ uint64_t len;
+ uint32_t flags;
+
+ len = ishm_tbl->block[block_index].len;
+ flags = ishm_tbl->block[block_index].flags;
+
+ if (flags & _ODP_ISHM_SINGLE_VA) {
+ fragment = ishm_tbl->block[block_index].fragment;
+ if (!fragment) {
+ ODP_ERR("invalid fragment failure.\n");
+ return NULL;
+ }
+
+ /* try to mmap: */
+ mapped_addr = _odp_ishmphy_map(fd, fragment->start, len, flags);
+ if (mapped_addr == NULL)
+ return NULL;
+ return mapped_addr;
+ }
+
+ /* try to mmap: */
+ mapped_addr = _odp_ishmphy_map(fd, NULL, len, flags);
+ if (mapped_addr == NULL)
+ return NULL;
+
+ return mapped_addr;
+}
+
+/*
+ * Performs unmapping, possibly freeing a prereserved VA space fragment,
+ * if the _ODP_ISHM_SINGLE_VA flag was set at alloc time
+ * Mutex must be assured by the caller.
+ */
+static int do_unmap(void *start, uint64_t size, uint32_t flags,
+ int block_index)
+{
+ int ret;
+
+ if (start)
+ ret = _odp_ishmphy_unmap(start, size, flags);
+ else
+ ret = 0;
+
+ if ((block_index >= 0) && (flags & _ODP_ISHM_SINGLE_VA)) {
+ /* mark reserved address space as free */
+ free_fragment(ishm_tbl->block[block_index].fragment);
+ }
+
+ return ret;
+}
+
+/*
+ * Search for a given used and allocated block name.
+ * (search is performed in the global ishm table)
+ * Returns the index of the found block (if any) or -1 if none.
+ * Mutex must be assured by the caller.
+ */
+static int find_block_by_name(const char *name)
+{
+ int i;
+
+ if (name == NULL || name[0] == 0)
+ return -1;
+
+ for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ if ((ishm_tbl->block[i].len) &&
+ (strcmp(name, ishm_tbl->block[i].name) == 0))
+ return i;
+ }
+
+ return -1;
+}
+
+/*
+ * Search for a block by address (only works when flag _ODP_ISHM_SINGLE_VA
+ * was set at reserve() time, or if the block is already known by this
+ * process).
+ * Search is performed in the process table and in the global ishm table.
+ * The provided address does not have to be at start: any address
+ * within the fragment is OK.
+ * Returns the index to the found block (if any) or -1 if none.
+ * Mutex must be assured by the caller.
+ */
+static int find_block_by_address(void *addr)
+{
+ int block_index;
+ int i;
+ ishm_fragment_t *fragmnt;
+
+ /*
+ * first check if there is already a process known block for this
+ * address
+ */
+ for (i = 0; i < ishm_proctable->nb_entries; i++) {
+ block_index = ishm_proctable->entry[i].block_index;
+ if ((addr > ishm_proctable->entry[i].start) &&
+ ((char *)addr < ((char *)ishm_proctable->entry[i].start +
+ ishm_tbl->block[block_index].len)))
+ return block_index;
+ }
+
+ /*
+ * then check if there is a existing single VA block known by some other
+ * process and containing the given address
+ */
+ for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ if ((!ishm_tbl->block[i].len) ||
+ (!(ishm_tbl->block[i].flags & _ODP_ISHM_SINGLE_VA)))
+ continue;
+ fragmnt = ishm_tbl->block[i].fragment;
+ if (!fragmnt) {
+ ODP_ERR("find_fragment: invalid NULL fragment\n");
+ return -1;
+ }
+ if ((addr >= fragmnt->start) &&
+ ((char *)addr < ((char *)fragmnt->start + fragmnt->len)))
+ return i;
+ }
+
+ /* address does not belong to any accessible block: */
+ return -1;
+}
+
+/*
+ * Search a given ishm block in the process local table. Return its index
+ * in the process table or -1 if not found (meaning that the ishm table
+ * block index was not referenced in the process local table, i.e. the
+ * block is known by some other process, but not by the current process).
+ * Caller must assure mutex.
+ */
+static int procfind_block(int block_index)
+{
+ int i;
+
+ for (i = 0; i < ishm_proctable->nb_entries; i++) {
+ if (ishm_proctable->entry[i].block_index == block_index)
+ return i;
+ }
+ return -1;
+}
+
+/*
+ * Release the physical memory mapping for blocks which have been freed
+ * by other processes. Caller must ensure mutex.
+ * Mutex must be assured by the caller.
+ */
+static void procsync(void)
+{
+ int i = 0;
+ int last;
+ ishm_block_t *block;
+
+ last = ishm_proctable->nb_entries;
+ while (i < last) {
+ /* if the process sequence number doesn't match the main
+ * table seq number, this entry is obsolete
+ */
+ block = &ishm_tbl->block[ishm_proctable->entry[i].block_index];
+ if (ishm_proctable->entry[i].seq != block->seq) {
+ /* obsolete entry: free memory and remove proc entry */
+ close(ishm_proctable->entry[i].fd);
+ _odp_ishmphy_unmap(ishm_proctable->entry[i].start,
+ ishm_proctable->entry[i].len,
+ ishm_proctable->entry[i].flags);
+ ishm_proctable->entry[i] =
+ ishm_proctable->entry[--last];
+ } else {
+ i++;
+ }
+ }
+ ishm_proctable->nb_entries = last;
+}
+
+/*
+ * Allocate and map internal shared memory, or other objects:
+ * If a name is given, check that this name is not already in use.
+ * If ok, allocate a new shared memory block and map the
+ * provided fd in it (if fd >=0 was given).
+ * If no fd is provided, a shared memory file desc named
+ * /tmp/odp-<pid>-ishm-<name_or_sequence> is created and mapped.
+ * (the name is different for huge page file as they must be on hugepagefs)
+ * The function returns the index of the newly created block in the
+ * main block table (>=0) or -1 on error.
+ */
+int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
+ uint32_t align, uint32_t flags, uint32_t user_flags)
+{
+ int new_index; /* index in the main block table*/
+ ishm_block_t *new_block; /* entry in the main block table*/
+ uint64_t page_sz; /* normal page size. usually 4K*/
+ uint64_t page_hp_size; /* huge page size */
+ uint32_t hp_align;
+ uint64_t len; /* mapped length */
+ void *addr = NULL; /* mapping address */
+ int new_proc_entry;
+ struct stat statbuf;
+ static int huge_error_printed; /* to avoid millions of error...*/
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+
+ /* update this process view... */
+ procsync();
+
+ /* Get system page sizes: page_hp_size is 0 if no huge page available*/
+ page_sz = odp_sys_page_size();
+ page_hp_size = odp_sys_huge_page_size();
+
+ /* grab a new entry: */
+ for (new_index = 0; new_index < ISHM_MAX_NB_BLOCKS; new_index++) {
+ if (ishm_tbl->block[new_index].len == 0) {
+ /* Found free block */
+ break;
+ }
+ }
+
+ /* check if we have reached the maximum number of allocation: */
+ if (new_index >= ISHM_MAX_NB_BLOCKS) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ ODP_ERR("ISHM_MAX_NB_BLOCKS limit reached!\n");
+ return -1;
+ }
+
+ new_block = &ishm_tbl->block[new_index];
+
+ /* save block name (if any given): */
+ if (name)
+ strncpy(new_block->name, name, ISHM_NAME_MAXLEN - 1);
+ else
+ new_block->name[0] = 0;
+
+ /* save user data: */
+ new_block->user_flags = user_flags;
+ new_block->user_len = size;
+
+ /* If a file descriptor is provided, get the real size and map: */
+ if (fd >= 0) {
+ fstat(fd, &statbuf);
+ len = statbuf.st_size;
+ /* note that the huge page flag is meningless here as huge
+ * page is determined by the provided file descriptor: */
+ addr = do_map(new_index, len, align, flags, EXTERNAL, &fd);
+ if (addr == NULL) {
+ close(fd);
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ ODP_ERR("_ishm_reserve failed.\n");
+ return -1;
+ }
+ new_block->huge = EXTERNAL;
+ new_block->external_fd = 1;
+ } else {
+ new_block->external_fd = 0;
+ }
+
+ /* Otherwise, Try first huge pages when possible and needed: */
+ if ((fd < 0) && page_hp_size && (size > page_sz)) {
+ /* at least, alignment in VA should match page size, but user
+ * can request more: If the user requirement exceeds the page
+ * size then we have to make sure the block will be mapped at
+ * the same address every where, otherwise alignment may be
+ * be wrong for some process */
+ hp_align = align;
+ if (hp_align <= odp_sys_huge_page_size())
+ hp_align = odp_sys_huge_page_size();
+ else
+ flags |= _ODP_ISHM_SINGLE_VA;
+
+ /* roundup to page size */
+ len = (size + (page_hp_size - 1)) & (-page_hp_size);
+ addr = do_map(new_index, len, hp_align, flags, HUGE, &fd);
+
+ if (addr == NULL) {
+ if (!huge_error_printed) {
+ ODP_ERR("No huge pages, fall back to normal "
+ "pages. "
+ "check: /proc/sys/vm/nr_hugepages.\n");
+ huge_error_printed = 1;
+ }
+ } else {
+ new_block->huge = HUGE;
+ }
+ }
+
+ /* Try normal pages if huge pages failed */
+ if (fd < 0) {
+ /* at least, alignment in VA should match page size, but user
+ * can request more: If the user requirement exceeds the page
+ * size then we have to make sure the block will be mapped at
+ * the same address every where, otherwise alignment may be
+ * be wrong for some process */
+ if (align <= odp_sys_page_size())
+ align = odp_sys_page_size();
+ else
+ flags |= _ODP_ISHM_SINGLE_VA;
+
+ /* roundup to page size */
+ len = (size + (page_sz - 1)) & (-page_sz);
+ addr = do_map(new_index, len, align, flags, NORMAL, &fd);
+ new_block->huge = NORMAL;
+ }
+
+ /* if neither huge pages or normal pages works, we cannot proceed: */
+ if ((fd < 0) || (addr == NULL) || (len == 0)) {
+ if ((!new_block->external_fd) && (fd >= 0))
+ close(fd);
+ delete_file(new_block);
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ ODP_ERR("_ishm_reserve failed.\n");
+ return -1;
+ }
+
+ /* remember block data and increment block seq number to mark change */
+ new_block->len = len;
+ new_block->user_len = size;
+ new_block->flags = flags;
+ new_block->user_flags = user_flags;
+ new_block->seq++;
+ new_block->refcnt = 1;
+ new_block->start = addr; /* only for SINGLE_VA*/
+
+ /* the allocation succeeded: update the process local view */
+ new_proc_entry = ishm_proctable->nb_entries++;
+ ishm_proctable->entry[new_proc_entry].block_index = new_index;
+ ishm_proctable->entry[new_proc_entry].flags = flags;
+ ishm_proctable->entry[new_proc_entry].seq = new_block->seq;
+ ishm_proctable->entry[new_proc_entry].start = addr;
+ ishm_proctable->entry[new_proc_entry].len = len;
+ ishm_proctable->entry[new_proc_entry].fd = fd;
+
+ /* register the file descriptor to the file descriptor server. */
+ _odp_fdserver_register_fd(FD_SRV_CTX_ISHM, new_index, fd);
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return new_index;
+}
+
+/*
+ * Try to map an memory block mapped by another ODP instance into the
+ * current ODP instance.
+ * returns 0 on success.
+ */
+int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
+ const char *local_name)
+{
+ char export_filename[ISHM_FILENAME_MAXLEN];
+ char blockname[ISHM_FILENAME_MAXLEN];
+ char filename[ISHM_FILENAME_MAXLEN];
+ FILE *export_file;
+ uint64_t len;
+ uint32_t flags;
+ uint64_t user_len;
+ uint32_t user_flags;
+ uint32_t align;
+ int fd;
+ int block_index;
+
+ /* try to read the block description file: */
+ snprintf(export_filename, ISHM_FILENAME_MAXLEN,
+ ISHM_EXPTNAME_FORMAT,
+ external_odp_pid,
+ remote_name);
+
+ export_file = fopen(export_filename, "r");
+
+ if (export_file == NULL) {
+ ODP_ERR("Error opening %s.\n", export_filename);
+ return -1;
+ }
+
+ if (fscanf(export_file, EXPORT_FILE_LINE1_FMT " ") != 0)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE2_FMT " ", blockname) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE3_FMT " ", filename) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE4_FMT " ", &len) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE5_FMT " ", &flags) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE6_FMT " ", &user_len) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE7_FMT " ", &user_flags) != 1)
+ goto error_exp_file;
+
+ if (fscanf(export_file, EXPORT_FILE_LINE8_FMT " ", &align) != 1)
+ goto error_exp_file;
+
+ fclose(export_file);
+
+ /* now open the filename given in the description file: */
+ fd = open(filename, O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd == -1) {
+ ODP_ERR("open failed for %s: %s.\n",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ /* clear the _ODP_ISHM_EXPORT flag so we don't export that again*/
+ flags &= ~(uint32_t)_ODP_ISHM_EXPORT;
+
+ /* reserve the memory, providing the opened file descriptor: */
+ block_index = _odp_ishm_reserve(local_name, 0, fd, align, flags, 0);
+ if (block_index < 0) {
+ close(fd);
+ return block_index;
+ }
+
+ /* set inherited info: */
+ ishm_tbl->block[block_index].user_flags = user_flags;
+ ishm_tbl->block[block_index].user_len = user_len;
+
+ return block_index;
+
+error_exp_file:
+ fclose(export_file);
+ ODP_ERR("Error reading %s.\n", export_filename);
+ return -1;
+}
+
+/*
+ * Free and unmap internal shared memory:
+ * The file descriptor is closed and the .../odp-* file deleted,
+ * unless fd was externally provided at reserve() time.
+ * return 0 if OK, and -1 on error.
+ * Mutex must be assured by the caller.
+ */
+static int block_free(int block_index)
+{
+ int proc_index;
+ ishm_block_t *block; /* entry in the main block table*/
+ int last;
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ ODP_ERR("Request to free an invalid block\n");
+ return -1;
+ }
+
+ block = &ishm_tbl->block[block_index];
+
+ proc_index = procfind_block(block_index);
+ if (proc_index >= 0) {
+ /* close the related fd */
+ close(ishm_proctable->entry[proc_index].fd);
+
+ /* remove the mapping and possible fragment */
+ do_unmap(ishm_proctable->entry[proc_index].start,
+ block->len,
+ ishm_proctable->entry[proc_index].flags,
+ block_index);
+
+ /* remove entry from process local table: */
+ last = ishm_proctable->nb_entries - 1;
+ ishm_proctable->entry[proc_index] =
+ ishm_proctable->entry[last];
+ ishm_proctable->nb_entries = last;
+ } else {
+ /* just possibly free the fragment as no mapping exist here: */
+ do_unmap(NULL, 0, block->flags, block_index);
+ }
+
+ /* remove all files related to this block: */
+ delete_file(block);
+
+ /* deregister the file descriptor from the file descriptor server. */
+ _odp_fdserver_deregister_fd(FD_SRV_CTX_ISHM, block_index);
+
+ /* mark the block as free in the main block table: */
+ block->len = 0;
+
+ /* mark the change so other processes see this entry as obsolete: */
+ block->seq++;
+
+ return 0;
+}
+
+/*
+ * Free and unmap internal shared memory, identified by its block number:
+ * return -1 on error. 0 if OK.
+ */
+int _odp_ishm_free_by_index(int block_index)
+{
+ int ret;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ ret = block_free(block_index);
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return ret;
+}
+
+/*
+ * free and unmap internal shared memory, identified by its block name:
+ * return -1 on error. 0 if OK.
+ */
+int _odp_ishm_free_by_name(const char *name)
+{
+ int block_index;
+ int ret;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ /* search the block in main ishm table */
+ block_index = find_block_by_name(name);
+ if (block_index < 0) {
+ ODP_ERR("Request to free an non existing block..."
+ " (double free?)\n");
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+
+ ret = block_free(block_index);
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return ret;
+}
+
+/*
+ * Free and unmap internal shared memory identified by address:
+ * return -1 on error. 0 if OK.
+ */
+int _odp_ishm_free_by_address(void *addr)
+{
+ int block_index;
+ int ret;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ /* search the block in main ishm table */
+ block_index = find_block_by_address(addr);
+ if (block_index < 0) {
+ ODP_ERR("Request to free an non existing block..."
+ " (double free?)\n");
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+
+ ret = block_free(block_index);
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return ret;
+}
+
+/*
+ * Lookup for an ishm shared memory, identified by its block index
+ * in the main ishm block table.
+ * Map this ishm area in the process VA (if not already present).
+ * Returns the block user address or NULL on error.
+ * Mutex must be assured by the caller.
+ */
+static void *block_lookup(int block_index)
+{
+ int proc_index;
+ int fd = -1;
+ ishm_block_t *block;
+ void *mapped_addr;
+ int new_entry;
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ ODP_ERR("Request to lookup an invalid block\n");
+ return NULL;
+ }
+
+ /* search it in process table: if there, this process knows it already*/
+ proc_index = procfind_block(block_index);
+ if (proc_index >= 0)
+ return ishm_proctable->entry[proc_index].start;
+
+ /* this ishm is not known by this process, yet: we create the mapping.*/
+ fd = _odp_fdserver_lookup_fd(FD_SRV_CTX_ISHM, block_index);
+ if (fd < 0) {
+ ODP_ERR("Could not find ishm file descriptor (BUG!)\n");
+ return NULL;
+ }
+
+ /* perform the mapping */
+ block = &ishm_tbl->block[block_index];
+
+ mapped_addr = do_remap(block_index, fd);
+ if (mapped_addr == NULL) {
+ ODP_ERR(" lookup: Could not map existing shared memory!\n");
+ return NULL;
+ }
+
+ /* the mapping succeeded: update the process local view */
+ new_entry = ishm_proctable->nb_entries++;
+ ishm_proctable->entry[new_entry].block_index = block_index;
+ ishm_proctable->entry[new_entry].flags = block->flags;
+ ishm_proctable->entry[new_entry].seq = block->seq;
+ ishm_proctable->entry[new_entry].start = mapped_addr;
+ ishm_proctable->entry[new_entry].len = block->len;
+ ishm_proctable->entry[new_entry].fd = fd;
+ block->refcnt++;
+
+ return mapped_addr;
+}
+
+/*
+ * Lookup for an ishm shared memory, identified by its block_index.
+ * Maps this ishmem area in the process VA (if not already present).
+ * Returns the block user address, or NULL if the index
+ * does not match any known ishm blocks.
+ */
+void *_odp_ishm_lookup_by_index(int block_index)
+{
+ void *ret;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ ret = block_lookup(block_index);
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return ret;
+}
+
+/*
+ * Lookup for an ishm shared memory, identified by its block name.
+ * Map this ishm area in the process VA (if not already present).
+ * Return the block index, or -1 if the index
+ * does not match any known ishm blocks.
+ */
+int _odp_ishm_lookup_by_name(const char *name)
+{
+ int block_index;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ /* search the block in main ishm table: return -1 if not found: */
+ block_index = find_block_by_name(name);
+ if ((block_index < 0) || (!block_lookup(block_index))) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return block_index;
+}
+
+/*
+ * Lookup for an ishm shared memory block, identified by its VA address.
+ * This works only if the block has already been looked-up (mapped) by the
+ * current process or it it was created with the _ODP_ISHM_SINGLE_VA flag.
+ * Map this ishm area in the process VA (if not already present).
+ * Return the block index, or -1 if the address
+ * does not match any known ishm blocks.
+ */
+int _odp_ishm_lookup_by_address(void *addr)
+{
+ int block_index;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ /* search the block in main ishm table: return -1 if not found: */
+ block_index = find_block_by_address(addr);
+ if ((block_index < 0) || (!block_lookup(block_index))) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return block_index;
+}
+
+/*
+ * Returns the VA address of a given block (which has to be known in the current
+ * process). Returns NULL if the block is unknown.
+ */
+void *_odp_ishm_address(int block_index)
+{
+ int proc_index;
+ void *addr;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ ODP_ERR("Request for address on an invalid block\n");
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return NULL;
+ }
+
+ proc_index = procfind_block(block_index);
+ if (proc_index < 0) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return NULL;
+ }
+
+ addr = ishm_proctable->entry[proc_index].start;
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return addr;
+}
+
+int _odp_ishm_info(int block_index, _odp_ishm_info_t *info)
+{
+ int proc_index;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ ODP_ERR("Request for info on an invalid block\n");
+ return -1;
+ }
+
+ /* search it in process table: if not there, need to map*/
+ proc_index = procfind_block(block_index);
+ if (proc_index < 0) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+
+ info->name = ishm_tbl->block[block_index].name;
+ info->addr = ishm_proctable->entry[proc_index].start;
+ info->size = ishm_tbl->block[block_index].user_len;
+ info->page_size = (ishm_tbl->block[block_index].huge == HUGE) ?
+ odp_sys_huge_page_size() : odp_sys_page_size();
+ info->flags = ishm_tbl->block[block_index].flags;
+ info->user_flags = ishm_tbl->block[block_index].user_flags;
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return 0;
+}
+
+static int do_odp_ishm_init_local(void)
+{
+ int i;
+ int block_index;
+
+ /*
+ * the ishm_process table is local to each linux process
+ * Check that no other linux threads (of same or ancestor processes)
+ * have already created the table, and create it if needed.
+ * We protect this with the general ishm lock to avoid
+ * init race condition of different running threads.
+ */
+ odp_spinlock_lock(&ishm_tbl->lock);
+ ishm_tbl->odpthread_cnt++; /* count ODPthread (pthread or process) */
+ if (!ishm_proctable) {
+ ishm_proctable = malloc(sizeof(ishm_proctable_t));
+ if (!ishm_proctable) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return -1;
+ }
+ memset(ishm_proctable, 0, sizeof(ishm_proctable_t));
+ }
+ if (syscall(SYS_gettid) != getpid())
+ ishm_proctable->thrd_refcnt++; /* new linux thread */
+ else
+ ishm_proctable->thrd_refcnt = 1;/* new linux process */
+
+ /*
+ * if this ODP thread is actually a new linux process, (as opposed
+ * to a pthread), i.e, we just forked, then all shmem blocks
+ * of the parent process are mapped into this child by inheritance.
+ * (The process local table is inherited as well). We hence have to
+ * increase the process refcount for each of the inherited mappings:
+ */
+ if (syscall(SYS_gettid) == getpid()) {
+ for (i = 0; i < ishm_proctable->nb_entries; i++) {
+ block_index = ishm_proctable->entry[i].block_index;
+ ishm_tbl->block[block_index].refcnt++;
+ }
+ }
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return 0;
+}
+
+int _odp_ishm_init_global(void)
+{
+ void *addr;
+ void *spce_addr;
+ int i;
+
+ if ((getpid() != odp_global_data.main_pid) ||
+ (syscall(SYS_gettid) != getpid()))
+ ODP_ERR("odp_init_global() must be performed by the main "
+ "ODP process!\n.");
+
+ if (!odp_global_data.hugepage_info.default_huge_page_dir)
+ ODP_DBG("NOTE: No support for huge pages\n");
+ else
+ ODP_DBG("Huge pages mount point is: %s\n",
+ odp_global_data.hugepage_info.default_huge_page_dir);
+
+ /* allocate space for the internal shared mem block table: */
+ addr = mmap(NULL, sizeof(ishm_table_t),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ ODP_ERR("unable to mmap the main block table\n.");
+ goto init_glob_err1;
+ }
+ ishm_tbl = addr;
+ memset(ishm_tbl, 0, sizeof(ishm_table_t));
+ ishm_tbl->dev_seq = 0;
+ ishm_tbl->odpthread_cnt = 0;
+ odp_spinlock_init(&ishm_tbl->lock);
+
+ /* allocate space for the internal shared mem fragment table: */
+ addr = mmap(NULL, sizeof(ishm_ftable_t),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ ODP_ERR("unable to mmap the main fragment table\n.");
+ goto init_glob_err2;
+ }
+ ishm_ftbl = addr;
+ memset(ishm_ftbl, 0, sizeof(ishm_ftable_t));
+
+ /*
+ *reserve the address space for _ODP_ISHM_SINGLE_VA reserved blocks,
+ * only address space!
+ */
+ spce_addr = _odp_ishmphy_book_va(ODP_CONFIG_ISHM_VA_PREALLOC_SZ,
+ odp_sys_huge_page_size());
+ if (!spce_addr) {
+ ODP_ERR("unable to reserve virtual space\n.");
+ goto init_glob_err3;
+ }
+
+ /* use the first fragment descriptor to describe to whole VA space: */
+ ishm_ftbl->fragment[0].block_index = -1;
+ ishm_ftbl->fragment[0].start = spce_addr;
+ ishm_ftbl->fragment[0].len = ODP_CONFIG_ISHM_VA_PREALLOC_SZ;
+ ishm_ftbl->fragment[0].prev = NULL;
+ ishm_ftbl->fragment[0].next = NULL;
+ ishm_ftbl->used_fragmnts = &ishm_ftbl->fragment[0];
+
+ /* and put all other fragment descriptors in the unused list: */
+ for (i = 1; i < ISHM_NB_FRAGMNTS - 1; i++) {
+ ishm_ftbl->fragment[i].prev = NULL;
+ ishm_ftbl->fragment[i].next = &ishm_ftbl->fragment[i + 1];
+ }
+ ishm_ftbl->fragment[ISHM_NB_FRAGMNTS - 1].prev = NULL;
+ ishm_ftbl->fragment[ISHM_NB_FRAGMNTS - 1].next = NULL;
+ ishm_ftbl->unused_fragmnts = &ishm_ftbl->fragment[1];
+
+ /*
+ * We run _odp_ishm_init_local() directely here to give the
+ * possibility to run shm_reserve() before the odp_init_local()
+ * is performed for the main thread... Many init_global() functions
+ * indeed assume the availability of odp_shm_reserve()...:
+ */
+ return do_odp_ishm_init_local();
+
+init_glob_err3:
+ if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0)
+ ODP_ERR("unable to munmap main fragment table\n.");
+init_glob_err2:
+ if (munmap(ishm_tbl, sizeof(ishm_table_t)) < 0)
+ ODP_ERR("unable to munmap main block table\n.");
+init_glob_err1:
+ return -1;
+}
+
+int _odp_ishm_init_local(void)
+{
+ /*
+ * Do not re-run this for the main ODP process, as it has already
+ * been done in advance at _odp_ishm_init_global() time:
+ */
+ if ((getpid() == odp_global_data.main_pid) &&
+ (syscall(SYS_gettid) == getpid()))
+ return 0;
+
+ return do_odp_ishm_init_local();
+}
+
+static int do_odp_ishm_term_local(void)
+{
+ int i;
+ int proc_table_refcnt = 0;
+ int block_index;
+ ishm_block_t *block;
+
+ procsync();
+
+ ishm_tbl->odpthread_cnt--; /* decount ODPthread (pthread or process) */
+
+ /*
+ * The ishm_process table is local to each linux process
+ * Check that no other linux threads (of this linux process)
+ * still needs the table, and free it if so.
+ * We protect this with the general ishm lock to avoid
+ * term race condition of different running threads.
+ */
+ proc_table_refcnt = --ishm_proctable->thrd_refcnt;
+ if (!proc_table_refcnt) {
+ /*
+ * this is the last thread of this process...
+ * All mappings for this process are about to be lost...
+ * Go through the table of visible blocks for this process,
+ * decreasing the refcnt of each visible blocks, and issuing
+ * warning for those no longer referenced by any process.
+ * Note that non-referenced blocks are not freed: this is
+ * deliberate as this would imply that the semantic of the
+ * freeing function would differ depending on whether we run
+ * with odp_thread as processes or pthreads. With this approach,
+ * the user should always free the blocks manually, which is
+ * more consistent
+ */
+ for (i = 0; i < ishm_proctable->nb_entries; i++) {
+ block_index = ishm_proctable->entry[i].block_index;
+ block = &ishm_tbl->block[block_index];
+ if ((--block->refcnt) <= 0) {
+ block->refcnt = 0;
+ ODP_DBG("Warning: block %d: name:%s "
+ "no longer referenced\n",
+ i,
+ ishm_tbl->block[i].name[0] ?
+ ishm_tbl->block[i].name : "<no name>");
+ }
+ }
+
+ free(ishm_proctable);
+ ishm_proctable = NULL;
+ }
+
+ return 0;
+}
+
+int _odp_ishm_term_local(void)
+{
+ int ret;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+
+ /* postpone last thread term to allow free() by global term functions:*/
+ if (ishm_tbl->odpthread_cnt == 1) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return 0;
+ }
+
+ ret = do_odp_ishm_term_local();
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return ret;
+}
+
+int _odp_ishm_term_global(void)
+{
+ int ret = 0;
+ int index;
+ ishm_block_t *block;
+
+ if ((getpid() != odp_global_data.main_pid) ||
+ (syscall(SYS_gettid) != getpid()))
+ ODP_ERR("odp_term_global() must be performed by the main "
+ "ODP process!\n.");
+
+ /* cleanup possibly non freed memory (and complain a bit): */
+ for (index = 0; index < ISHM_MAX_NB_BLOCKS; index++) {
+ block = &ishm_tbl->block[index];
+ if (block->len != 0) {
+ ODP_ERR("block '%s' (file %s) was never freed "
+ "(cleaning up...).\n",
+ block->name, block->filename);
+ delete_file(block);
+ }
+ }
+
+ /* perform the last thread terminate which was postponed: */
+ ret = do_odp_ishm_term_local();
+
+ /* free the fragment table */
+ if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0) {
+ ret |= -1;
+ ODP_ERR("unable to munmap fragment table\n.");
+ }
+ /* free the block table */
+ if (munmap(ishm_tbl, sizeof(ishm_table_t)) < 0) {
+ ret |= -1;
+ ODP_ERR("unable to munmap main table\n.");
+ }
+
+ /* free the reserved VA space */
+ if (_odp_ishmphy_unbook_va())
+ ret |= -1;
+
+ return ret;
+}
+
+/*
+ * Print the current ishm status (allocated blocks and VA space map)
+ * Return the number of allocated blocks (including those not mapped
+ * by the current odp thread). Also perform a number of sanity check.
+ * For debug.
+ */
+int _odp_ishm_status(const char *title)
+{
+ int i;
+ char flags[3];
+ char huge;
+ int proc_index;
+ ishm_fragment_t *fragmnt;
+ int consecutive_unallocated = 0; /* should never exceed 1 */
+ uintptr_t last_address = 0;
+ ishm_fragment_t *previous = NULL;
+ int nb_used_frgments = 0;
+ int nb_unused_frgments = 0; /* nb frag describing a VA area */
+ int nb_allocated_frgments = 0; /* nb frag describing an allocated VA */
+ int nb_blocks = 0;
+ int single_va_blocks = 0;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ ODP_DBG("ishm blocks allocated at: %s\n", title);
+
+ /* display block table: 1 line per entry +1 extra line if mapped here */
+ for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ if (ishm_tbl->block[i].len <= 0)
+ continue; /* unused block */
+
+ nb_blocks++;
+ if (ishm_tbl->block[i].flags & _ODP_ISHM_SINGLE_VA)
+ single_va_blocks++;
+
+ flags[0] = (ishm_tbl->block[i].flags & _ODP_ISHM_SINGLE_VA) ?
+ 'S' : '.';
+ flags[1] = (ishm_tbl->block[i].flags & _ODP_ISHM_LOCK) ?
+ 'L' : '.';
+ flags[2] = 0;
+ switch (ishm_tbl->block[i].huge) {
+ case HUGE:
+ huge = 'H';
+ break;
+ case NORMAL:
+ huge = 'N';
+ break;
+ case EXTERNAL:
+ huge = 'E';
+ break;
+ default:
+ huge = '?';
+ }
+ proc_index = procfind_block(i);
+ ODP_DBG("%-3d: name:%-.24s file:%-.24s"
+ " flags:%s,%c len:0x%-08lx"
+ " user_len:%-8ld seq:%-3ld refcnt:%-4d\n",
+ i,
+ ishm_tbl->block[i].name,
+ ishm_tbl->block[i].filename,
+ flags, huge,
+ ishm_tbl->block[i].len,
+ ishm_tbl->block[i].user_len,
+ ishm_tbl->block[i].seq,
+ ishm_tbl->block[i].refcnt);
+
+ if (proc_index < 0)
+ continue;
+
+ ODP_DBG(" start:%-08lx fd:%-3d\n",
+ ishm_proctable->entry[proc_index].start,
+ ishm_proctable->entry[proc_index].fd);
+ }
+
+ /* display the virtual space allocations... : */
+ ODP_DBG("ishm virtual space:\n");
+ for (fragmnt = ishm_ftbl->used_fragmnts;
+ fragmnt; fragmnt = fragmnt->next) {
+ if (fragmnt->block_index >= 0) {
+ nb_allocated_frgments++;
+ ODP_DBG(" %08p - %08p: ALLOCATED by block:%d\n",
+ (uintptr_t)fragmnt->start,
+ (uintptr_t)fragmnt->start + fragmnt->len - 1,
+ fragmnt->block_index);
+ consecutive_unallocated = 0;
+ } else {
+ ODP_DBG(" %08p - %08p: NOT ALLOCATED\n",
+ (uintptr_t)fragmnt->start,
+ (uintptr_t)fragmnt->start + fragmnt->len - 1);
+ if (consecutive_unallocated++)
+ ODP_ERR("defragmentation error\n");
+ }
+
+ /* some other sanity checks: */
+ if (fragmnt->prev != previous)
+ ODP_ERR("chaining error\n");
+
+ if (fragmnt != ishm_ftbl->used_fragmnts) {
+ if ((uintptr_t)fragmnt->start != last_address + 1)
+ ODP_ERR("lost space error\n");
+ }
+
+ last_address = (uintptr_t)fragmnt->start + fragmnt->len - 1;
+ previous = fragmnt;
+ nb_used_frgments++;
+ }
+
+ /*
+ * the number of blocks with the single_VA flag set should match
+ * the number of used fragments:
+ */
+ if (single_va_blocks != nb_allocated_frgments)
+ ODP_ERR("single_va_blocks != nb_allocated_fragments!\n");
+
+ /* compute the number of unused fragments*/
+ for (fragmnt = ishm_ftbl->unused_fragmnts;
+ fragmnt; fragmnt = fragmnt->next)
+ nb_unused_frgments++;
+
+ ODP_DBG("ishm: %d fragment used. %d fragments unused. (total=%d)\n",
+ nb_used_frgments, nb_unused_frgments,
+ nb_used_frgments + nb_unused_frgments);
+
+ if ((nb_used_frgments + nb_unused_frgments) != ISHM_NB_FRAGMNTS)
+ ODP_ERR("lost fragments!\n");
+
+ if (nb_blocks < ishm_proctable->nb_entries)
+ ODP_ERR("process known block cannot exceed main total sum!\n");
+
+ ODP_DBG("\n");
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ return nb_blocks;
+}
diff --git a/platform/linux-generic/_ishmphy.c b/platform/linux-generic/_ishmphy.c
new file mode 100644
index 000000000..2b2d10073
--- /dev/null
+++ b/platform/linux-generic/_ishmphy.c
@@ -0,0 +1,185 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file handles the lower end of the ishm memory allocator:
+ * It performs the physical mappings.
+ */
+#include <odp_posix_extensions.h>
+#include <odp_config_internal.h>
+#include <odp_internal.h>
+#include <odp/api/align.h>
+#include <odp/api/system_info.h>
+#include <odp/api/debug.h>
+#include <odp_debug_internal.h>
+#include <odp_align_internal.h>
+#include <_ishm_internal.h>
+#include <_ishmphy_internal.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <_ishmphy_internal.h>
+
+static void *common_va_address;
+static uint64_t common_va_len;
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/* Book some virtual address space
+ * This function is called at odp_init_global() time to pre-book some
+ * virtual address space inherited by all odpthreads (i.e. descendant
+ * processes and threads) and later used to guarantee the unicity the
+ * the mapping VA address when memory is reserver with the _ODP_ISHM_SINGLE_VA
+ * flag.
+ * returns the address of the mapping or NULL on error.
+ */
+void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align)
+{
+ void *addr;
+
+ addr = mmap(NULL, len + align, PROT_NONE,
+ MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+ if (addr == MAP_FAILED) {
+ ODP_ERR("_ishmphy_book_va failure\n");
+ return NULL;
+ }
+
+ if (mprotect(addr, len, PROT_NONE))
+ ODP_ERR("failure for protect\n");
+
+ ODP_DBG("VA Reserved: %p, len=%p\n", addr, len + align);
+
+ common_va_address = addr;
+ common_va_len = len;
+
+ /* return the nearest aligned address: */
+ return (void *)(((uintptr_t)addr + align - 1) & (-align));
+}
+
+/* Un-book some virtual address space
+ * This function is called at odp_term_global() time to unbook
+ * the virtual address space booked by _ishmphy_book_va()
+ */
+int _odp_ishmphy_unbook_va(void)
+{
+ int ret;
+
+ ret = munmap(common_va_address, common_va_len);
+ if (ret)
+ ODP_ERR("_unishmphy_book_va failure\n");
+ return ret;
+}
+
+/*
+ * do a mapping:
+ * Performs a mapping of the provided file descriptor to the process VA
+ * space. If the _ODP_ISHM_SINGLE_VA flag is set, 'start' is assumed to be
+ * the VA address where the mapping is to be done.
+ * If the flag is not set, a new VA address is taken.
+ * returns the address of the mapping or NULL on error.
+ */
+void *_odp_ishmphy_map(int fd, void *start, uint64_t size,
+ int flags)
+{
+ void *mapped_addr;
+ int mmap_flags = 0;
+
+ if (flags & _ODP_ISHM_SINGLE_VA) {
+ if (!start) {
+ ODP_ERR("failure: missing address\n");
+ return NULL;
+ }
+ /* maps over fragment of reserved VA: */
+ mapped_addr = mmap(start, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED | mmap_flags, fd, 0);
+ /* if mapping fails, re-block the space we tried to take
+ * as it seems a mapping failure still affect what was there??*/
+ if (mapped_addr == MAP_FAILED) {
+ mmap_flags = MAP_SHARED | MAP_FIXED |
+ MAP_ANONYMOUS | MAP_NORESERVE;
+ mmap(start, size, PROT_NONE, mmap_flags, -1, 0);
+ mprotect(start, size, PROT_NONE);
+ }
+ } else {
+ /* just do a new mapping in the VA space: */
+ mapped_addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | mmap_flags, fd, 0);
+ if ((mapped_addr >= common_va_address) &&
+ ((char *)mapped_addr <
+ (char *)common_va_address + common_va_len)) {
+ ODP_ERR("VA SPACE OVERLAP!\n");
+ }
+ }
+
+ if (mapped_addr == MAP_FAILED) {
+ ODP_ERR("mmap failed:%s\n", strerror(errno));
+ return NULL;
+ }
+
+ /* if locking is requested, lock it...*/
+ if (flags & _ODP_ISHM_LOCK) {
+ if (mlock(mapped_addr, size)) {
+ if (munmap(mapped_addr, size))
+ ODP_ERR("munmap failed:%s\n", strerror(errno));
+ ODP_ERR("mlock failed:%s\n", strerror(errno));
+ return NULL;
+ }
+ }
+ return mapped_addr;
+}
+
+/* free a mapping:
+ * If the _ODP_ISHM_SINGLE_VA flag was given at creation time the virtual
+ * address range must be returned to the preoallocated "pool". this is
+ * done by mapping non accessibly memory there (hence blocking the VA but
+ * releasing the physical memory).
+ * If the _ODP_ISHM_SINGLE_VA flag was not given, both physical memory and
+ * virtual address space are realeased by calling the normal munmap.
+ * return 0 on success or -1 on error.
+ */
+int _odp_ishmphy_unmap(void *start, uint64_t len, int flags)
+{
+ void *addr;
+ int ret;
+ int mmap_flgs;
+
+ mmap_flgs = MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS | MAP_NORESERVE;
+
+ /* if locking was requested, unlock...*/
+ if (flags & _ODP_ISHM_LOCK)
+ munlock(start, len);
+
+ if (flags & _ODP_ISHM_SINGLE_VA) {
+ /* map unnaccessible memory overwrites previous mapping
+ * and free the physical memory, but guarantees to block
+ * the VA range from other mappings
+ */
+ addr = mmap(start, len, PROT_NONE, mmap_flgs, -1, 0);
+ if (addr == MAP_FAILED) {
+ ODP_ERR("_ishmphy_free failure for ISHM_SINGLE_VA\n");
+ return -1;
+ }
+ if (mprotect(start, len, PROT_NONE))
+ ODP_ERR("_ishmphy_free failure for protect\n");
+ return 0;
+ }
+
+ /* just release the mapping */
+ ret = munmap(start, len);
+ if (ret)
+ ODP_ERR("_ishmphy_free failure: %s\n", strerror(errno));
+ return ret;
+}
diff --git a/platform/linux-generic/arch/arm/odp/api/cpu_arch.h b/platform/linux-generic/arch/arm/odp/api/cpu_arch.h
index e86e132b7..22b1da2dd 120000..100644
--- a/platform/linux-generic/arch/arm/odp/api/cpu_arch.h
+++ b/platform/linux-generic/arch/arm/odp/api/cpu_arch.h
@@ -1 +1,24 @@
-../../../default/odp/api/cpu_arch.h \ No newline at end of file
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_ARCH_H_
+#define ODP_PLAT_CPU_ARCH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _ODP_CACHE_LINE_SIZE 64
+
+static inline void odp_cpu_pause(void)
+{
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/arm/odp_cpu_arch.c b/platform/linux-generic/arch/arm/odp_cpu_arch.c
index deebc474c..2ac223e07 120000..100644
--- a/platform/linux-generic/arch/arm/odp_cpu_arch.c
+++ b/platform/linux-generic/arch/arm/odp_cpu_arch.c
@@ -1 +1,48 @@
-../default/odp_cpu_arch.c \ No newline at end of file
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <stdlib.h>
+#include <time.h>
+
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+#include <odp/api/system_info.h>
+#include <odp_debug_internal.h>
+
+#define GIGA 1000000000
+
+uint64_t odp_cpu_cycles(void)
+{
+ struct timespec time;
+ uint64_t sec, ns, hz, cycles;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
+
+ if (ret != 0)
+ ODP_ABORT("clock_gettime failed\n");
+
+ hz = odp_cpu_hz_max();
+ sec = (uint64_t)time.tv_sec;
+ ns = (uint64_t)time.tv_nsec;
+
+ cycles = sec * hz;
+ cycles += (ns * hz) / GIGA;
+
+ return cycles;
+}
+
+uint64_t odp_cpu_cycles_max(void)
+{
+ return UINT64_MAX;
+}
+
+uint64_t odp_cpu_cycles_resolution(void)
+{
+ return 1;
+}
diff --git a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
index 39962b8c5..53e2aaeaf 120000..100644
--- a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
@@ -1 +1,27 @@
-../default/odp_sysinfo_parse.c \ No newline at end of file
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_internal.h>
+#include <odp_debug_internal.h>
+#include <string.h>
+
+int cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
+{
+ int i;
+
+ ODP_DBG("Warning: use dummy values for freq and model string\n");
+ for (i = 0; i < MAX_CPU_NUMBER; i++) {
+ sysinfo->cpu_hz_max[i] = 1400000000;
+ strcpy(sysinfo->model_str[i], "UNKNOWN");
+ }
+
+ return 0;
+}
+
+uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/powerpc/odp_cpu_arch.c b/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
index deebc474c..2ac223e07 120000..100644
--- a/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
+++ b/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
@@ -1 +1,48 @@
-../default/odp_cpu_arch.c \ No newline at end of file
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <stdlib.h>
+#include <time.h>
+
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+#include <odp/api/system_info.h>
+#include <odp_debug_internal.h>
+
+#define GIGA 1000000000
+
+uint64_t odp_cpu_cycles(void)
+{
+ struct timespec time;
+ uint64_t sec, ns, hz, cycles;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
+
+ if (ret != 0)
+ ODP_ABORT("clock_gettime failed\n");
+
+ hz = odp_cpu_hz_max();
+ sec = (uint64_t)time.tv_sec;
+ ns = (uint64_t)time.tv_nsec;
+
+ cycles = sec * hz;
+ cycles += (ns * hz) / GIGA;
+
+ return cycles;
+}
+
+uint64_t odp_cpu_cycles_max(void)
+{
+ return UINT64_MAX;
+}
+
+uint64_t odp_cpu_cycles_resolution(void)
+{
+ return 1;
+}
diff --git a/platform/linux-generic/include/_fdserver_internal.h b/platform/linux-generic/include/_fdserver_internal.h
new file mode 100644
index 000000000..22b280287
--- /dev/null
+++ b/platform/linux-generic/include/_fdserver_internal.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _FD_SERVER_INTERNAL_H
+#define _FD_SERVER_INTERNAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * the following enum defines the different contextes by which the
+ * FD server may be used: In the FD server, the keys used to store/retrieve
+ * a file descriptor are actually context based:
+ * Both the context and the key are stored at fd registration time,
+ * and both the context and the key are used to retrieve a fd.
+ * In other words a context identifies a FD server usage, so that different
+ * unrelated fd server users do not have to guarantee key unicity between
+ * them.
+ */
+typedef enum fd_server_context {
+ FD_SRV_CTX_NA, /* Not Applicable */
+ FD_SRV_CTX_ISHM,
+ FD_SRV_CTX_END, /* upper enum limit */
+} fd_server_context_e;
+
+int _odp_fdserver_register_fd(fd_server_context_e context, uint64_t key,
+ int fd);
+int _odp_fdserver_deregister_fd(fd_server_context_e context, uint64_t key);
+int _odp_fdserver_lookup_fd(fd_server_context_e context, uint64_t key);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/_ishm_internal.h b/platform/linux-generic/include/_ishm_internal.h
new file mode 100644
index 000000000..c7c330774
--- /dev/null
+++ b/platform/linux-generic/include/_ishm_internal.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ISHM_INTERNAL_H_
+#define ODP_ISHM_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+
+/* flags available at ishm_reserve: */
+#define _ODP_ISHM_SINGLE_VA 1
+#define _ODP_ISHM_LOCK 2
+#define _ODP_ISHM_EXPORT 4 /*create export descr file in /tmp */
+
+/**
+ * Shared memory block info
+ */
+typedef struct _odp_ishm_info_t {
+ const char *name; /**< Block name */
+ void *addr; /**< Block address */
+ uint64_t size; /**< Block size in bytes */
+ uint64_t page_size; /**< Memory page size */
+ uint32_t flags; /**< _ODP_ISHM_* flags */
+ uint32_t user_flags;/**< user specific flags */
+} _odp_ishm_info_t;
+
+int _odp_ishm_reserve(const char *name, uint64_t size, int fd, uint32_t align,
+ uint32_t flags, uint32_t user_flags);
+int _odp_ishm_free_by_index(int block_index);
+int _odp_ishm_free_by_name(const char *name);
+int _odp_ishm_free_by_address(void *addr);
+void *_odp_ishm_lookup_by_index(int block_index);
+int _odp_ishm_lookup_by_name(const char *name);
+int _odp_ishm_lookup_by_address(void *addr);
+int _odp_ishm_find_exported(const char *remote_name,
+ pid_t external_odp_pid,
+ const char *local_name);
+void *_odp_ishm_address(int block_index);
+int _odp_ishm_info(int block_index, _odp_ishm_info_t *info);
+int _odp_ishm_status(const char *title);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/_ishmphy_internal.h b/platform/linux-generic/include/_ishmphy_internal.h
new file mode 100644
index 000000000..4fe560fd2
--- /dev/null
+++ b/platform/linux-generic/include/_ishmphy_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ISHMPHY_INTERNAL_H
+#define _ISHMPHY_INTERNAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align);
+int _odp_ishmphy_unbook_va(void);
+void *_odp_ishmphy_map(int fd, void *start, uint64_t size, int flags);
+int _odp_ishmphy_unmap(void *start, uint64_t len, int flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/ishmphy_internal.h b/platform/linux-generic/include/ishmphy_internal.h
new file mode 100644
index 000000000..0bc4207af
--- /dev/null
+++ b/platform/linux-generic/include/ishmphy_internal.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ISHMPHY_INTERNAL_H_
+#define _ISHMPHY_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *_ishmphy_book_va(uint64_t len);
+int _ishmphy_unbook_va(void);
+void *_ishmphy_map(int fd, void *start, uint64_t size,
+ int flags, int mmap_flags);
+int _ishmphy_unmap(void *start, uint64_t len, int flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/atomic_types.h b/platform/linux-generic/include/odp/api/plat/atomic_types.h
index 33a0565b5..a674ac997 100644
--- a/platform/linux-generic/include/odp/api/plat/atomic_types.h
+++ b/platform/linux-generic/include/odp/api/plat/atomic_types.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
diff --git a/platform/linux-generic/include/odp/api/plat/barrier_types.h b/platform/linux-generic/include/odp/api/plat/barrier_types.h
index 440275e79..00b383cc6 100644
--- a/platform/linux-generic/include/odp/api/plat/barrier_types.h
+++ b/platform/linux-generic/include/odp/api/plat/barrier_types.h
@@ -4,7 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
diff --git a/platform/linux-generic/include/odp/api/plat/byteorder_types.h b/platform/linux-generic/include/odp/api/plat/byteorder_types.h
index 679d4cf92..09235b539 100644
--- a/platform/linux-generic/include/odp/api/plat/byteorder_types.h
+++ b/platform/linux-generic/include/odp/api/plat/byteorder_types.h
@@ -52,12 +52,16 @@ extern "C" {
#define ODP_LITTLE_ENDIAN 1
#define ODP_BIG_ENDIAN 0
#define ODP_BYTE_ORDER ODP_LITTLE_ENDIAN
- #define ODP_LITTLE_ENDIAN_BITFIELD
+ #define ODP_LITTLE_ENDIAN_BITFIELD 1
+ #define ODP_BIG_ENDIAN_BITFIELD 0
+ #define ODP_BITFIELD_ORDER ODP_LITTLE_ENDIAN_BITFIELD
#else
#define ODP_LITTLE_ENDIAN 0
#define ODP_BIG_ENDIAN 1
#define ODP_BYTE_ORDER ODP_BIG_ENDIAN
- #define ODP_BIG_ENDIAN_BITFIELD
+ #define ODP_LITTLE_ENDIAN_BITFIELD 0
+ #define ODP_BIG_ENDIAN_BITFIELD 1
+ #define ODP_BITFIELD_ORDER ODP_BIG_ENDIAN_BITFIELD
#endif
typedef uint16_t __odp_bitwise odp_u16le_t;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_types.h b/platform/linux-generic/include/odp/api/plat/packet_types.h
index b5345ed54..864494d43 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_types.h
@@ -32,9 +32,11 @@ typedef ODP_HANDLE_T(odp_packet_t);
#define ODP_PACKET_OFFSET_INVALID (0x0fffffff)
-typedef ODP_HANDLE_T(odp_packet_seg_t);
+/* A packet segment handle stores a small index. Strong type handles are
+ * pointers, which would be wasteful in this case. */
+typedef uint8_t odp_packet_seg_t;
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0xffffffff)
+#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)-1)
/** odp_packet_color_t assigns names to the various pkt "colors" */
typedef enum {
diff --git a/platform/linux-generic/include/odp/api/plat/pool_types.h b/platform/linux-generic/include/odp/api/plat/pool_types.h
index 1ca8f02ed..6baff0923 100644
--- a/platform/linux-generic/include/odp/api/plat/pool_types.h
+++ b/platform/linux-generic/include/odp/api/plat/pool_types.h
@@ -30,6 +30,8 @@ typedef ODP_HANDLE_T(odp_pool_t);
#define ODP_POOL_INVALID _odp_cast_scalar(odp_pool_t, 0xffffffff)
+#define ODP_POOL_NAME_LEN 32
+
/**
* Pool type
*/
@@ -39,12 +41,6 @@ typedef enum odp_pool_type_t {
ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT,
} odp_pool_type_t;
-/** Get printable format of odp_pool_t */
-static inline uint64_t odp_pool_to_u64(odp_pool_t hdl)
-{
- return _odp_pri(hdl);
-}
-
/**
* @}
*/
diff --git a/platform/linux-generic/include/odp/api/plat/shared_memory_types.h b/platform/linux-generic/include/odp/api/plat/shared_memory_types.h
index 4d8bbccb8..afa0bf9cd 100644
--- a/platform/linux-generic/include/odp/api/plat/shared_memory_types.h
+++ b/platform/linux-generic/include/odp/api/plat/shared_memory_types.h
@@ -31,6 +31,8 @@ typedef ODP_HANDLE_T(odp_shm_t);
#define ODP_SHM_INVALID _odp_cast_scalar(odp_shm_t, 0)
#define ODP_SHM_NULL ODP_SHM_INVALID
+#define ODP_SHM_NAME_LEN 32
+
/** Get printable format of odp_shm_t */
static inline uint64_t odp_shm_to_u64(odp_shm_t hdl)
{
diff --git a/platform/linux-generic/include/odp/api/plat/timer_types.h b/platform/linux-generic/include/odp/api/plat/timer_types.h
index 68d6f6fb3..8821bed60 100644
--- a/platform/linux-generic/include/odp/api/plat/timer_types.h
+++ b/platform/linux-generic/include/odp/api/plat/timer_types.h
@@ -30,6 +30,8 @@ typedef struct odp_timer_pool_s *odp_timer_pool_t;
#define ODP_TIMER_POOL_INVALID NULL
+#define ODP_TIMER_POOL_NAME_LEN 32
+
typedef ODP_HANDLE_T(odp_timer_t);
#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0xffffffff)
diff --git a/platform/linux-generic/include/odp/api/visibility_begin.h b/platform/linux-generic/include/odp/visibility_begin.h
index 1bbb43def..1bbb43def 100644
--- a/platform/linux-generic/include/odp/api/visibility_begin.h
+++ b/platform/linux-generic/include/odp/visibility_begin.h
diff --git a/platform/linux-generic/include/odp/api/visibility_end.h b/platform/linux-generic/include/odp/visibility_end.h
index 748af5103..748af5103 100644
--- a/platform/linux-generic/include/odp/api/visibility_end.h
+++ b/platform/linux-generic/include/odp/visibility_end.h
diff --git a/platform/linux-generic/include/odp_align_internal.h b/platform/linux-generic/include/odp_align_internal.h
index 9ccde5320..d9cd30bde 100644
--- a/platform/linux-generic/include/odp_align_internal.h
+++ b/platform/linux-generic/include/odp_align_internal.h
@@ -29,24 +29,18 @@ extern "C" {
/**
* @internal
- * Round up pointer 'x' to alignment 'align'
- */
-#define ODP_ALIGN_ROUNDUP_PTR(x, align)\
- ((void *)ODP_ALIGN_ROUNDUP((uintptr_t)(x), (uintptr_t)(align)))
-
-/**
- * @internal
- * Round up pointer 'x' to cache line size alignment
+ * Round up 'x' to alignment 'align'
*/
-#define ODP_CACHE_LINE_SIZE_ROUNDUP_PTR(x)\
- ((void *)ODP_CACHE_LINE_SIZE_ROUNDUP((uintptr_t)(x)))
+#define ODP_ALIGN_ROUNDUP(x, align)\
+ ((align) * (((x) + (align) - 1) / (align)))
/**
* @internal
- * Round up 'x' to alignment 'align'
+ * When 'x' is not already a power of two, round it up to the next
+ * power of two value. Zero is not supported as an input value.
*/
-#define ODP_ALIGN_ROUNDUP(x, align)\
- ((align) * (((x) + align - 1) / (align)))
+#define ODP_ROUNDUP_POWER_2(x)\
+ (1 << (((int)(8 * sizeof(x))) - __builtin_clz((x) - 1)))
/**
* @internal
@@ -82,20 +76,6 @@ extern "C" {
/**
* @internal
- * Round down pointer 'x' to 'align' alignment, which is a power of two
- */
-#define ODP_ALIGN_ROUNDDOWN_PTR_POWER_2(x, align)\
-((void *)ODP_ALIGN_ROUNDDOWN_POWER_2((uintptr_t)(x), (uintptr_t)(align)))
-
-/**
- * @internal
- * Round down pointer 'x' to cache line size alignment
- */
-#define ODP_CACHE_LINE_SIZE_ROUNDDOWN_PTR(x)\
- ((void *)ODP_CACHE_LINE_SIZE_ROUNDDOWN((uintptr_t)(x)))
-
-/**
- * @internal
* Round down 'x' to 'align' alignment, which is a power of two
*/
#define ODP_ALIGN_ROUNDDOWN_POWER_2(x, align)\
diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h
index 2b1ab42c3..cf817d907 100644
--- a/platform/linux-generic/include/odp_buffer_inlines.h
+++ b/platform/linux-generic/include/odp_buffer_inlines.h
@@ -18,177 +18,16 @@ extern "C" {
#endif
#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
-static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr)
-{
- odp_buffer_bits_t handle;
- uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl);
- struct pool_entry_s *pool = get_pool_entry(pool_id);
-
- handle.handle = 0;
- handle.pool_id = pool_id;
- handle.index = ((uint8_t *)hdr - pool->pool_mdata_addr) /
- ODP_CACHE_LINE_SIZE;
- handle.seg = 0;
-
- return handle.handle;
-}
+odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf);
+void _odp_buffer_event_type_set(odp_buffer_t buf, int ev);
+int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr)
{
return hdr->handle.handle;
}
-static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
-{
- odp_buffer_bits_t handle;
- uint32_t pool_id;
- uint32_t index;
- struct pool_entry_s *pool;
-
- handle.handle = buf;
- pool_id = handle.pool_id;
- index = handle.index;
- pool = get_pool_entry(pool_id);
-
- return (odp_buffer_hdr_t *)(void *)
- (pool->pool_mdata_addr + (index * ODP_CACHE_LINE_SIZE));
-}
-
-static inline uint32_t pool_id_from_buf(odp_buffer_t buf)
-{
- odp_buffer_bits_t handle;
-
- handle.handle = buf;
- return handle.pool_id;
-}
-
-static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf)
-{
- odp_buffer_bits_t handle;
- odp_buffer_hdr_t *buf_hdr;
- handle.handle = buf;
-
- /* For buffer handles, segment index must be 0 and pool id in range */
- if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_POOLS)
- return NULL;
-
- pool_entry_t *pool =
- odp_pool_to_entry(_odp_cast_scalar(odp_pool_t,
- handle.pool_id));
-
- /* If pool not created, handle is invalid */
- if (pool->s.pool_shm == ODP_SHM_INVALID)
- return NULL;
-
- uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE;
-
- /* A valid buffer index must be on stride, and must be in range */
- if ((handle.index % buf_stride != 0) ||
- ((uint32_t)(handle.index / buf_stride) >= pool->s.params.buf.num))
- return NULL;
-
- buf_hdr = (odp_buffer_hdr_t *)(void *)
- (pool->s.pool_mdata_addr +
- (handle.index * ODP_CACHE_LINE_SIZE));
-
- /* Handle is valid, so buffer is valid if it is allocated */
- return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr;
-}
-
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
-
-static inline void *buffer_map(odp_buffer_hdr_t *buf,
- uint32_t offset,
- uint32_t *seglen,
- uint32_t limit)
-{
- int seg_index;
- int seg_offset;
-
- if (odp_likely(offset < buf->segsize)) {
- seg_index = 0;
- seg_offset = offset;
- } else {
- seg_index = offset / buf->segsize;
- seg_offset = offset % buf->segsize;
- }
- if (seglen != NULL) {
- uint32_t buf_left = limit - offset;
- *seglen = seg_offset + buf_left <= buf->segsize ?
- buf_left : buf->segsize - seg_offset;
- }
-
- return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]);
-}
-
-static inline odp_buffer_seg_t segment_next(odp_buffer_hdr_t *buf,
- odp_buffer_seg_t seg)
-{
- odp_buffer_bits_t seghandle;
- seghandle.handle = (odp_buffer_t)seg;
-
- if (seg == ODP_SEGMENT_INVALID ||
- seghandle.prefix != buf->handle.prefix ||
- seghandle.seg >= buf->segcount - 1)
- return ODP_SEGMENT_INVALID;
- else {
- seghandle.seg++;
- return (odp_buffer_seg_t)seghandle.handle;
- }
-}
-
-static inline void *segment_map(odp_buffer_hdr_t *buf,
- odp_buffer_seg_t seg,
- uint32_t *seglen,
- uint32_t limit,
- uint32_t hr)
-{
- uint32_t seg_offset, buf_left;
- odp_buffer_bits_t seghandle;
- uint8_t *seg_addr;
- seghandle.handle = (odp_buffer_t)seg;
-
- if (seghandle.prefix != buf->handle.prefix ||
- seghandle.seg >= buf->segcount)
- return NULL;
-
- seg_addr = (uint8_t *)buf->addr[seghandle.seg];
- seg_offset = seghandle.seg * buf->segsize;
- limit += hr;
-
- /* Can't map this segment if it's nothing but headroom or tailroom */
- if (hr >= seg_offset + buf->segsize || seg_offset > limit)
- return NULL;
-
- /* Adjust address & offset if this segment contains any headroom */
- if (hr > seg_offset) {
- seg_addr += hr % buf->segsize;
- seg_offset += hr % buf->segsize;
- }
-
- /* Set seglen if caller is asking for it */
- if (seglen != NULL) {
- buf_left = limit - seg_offset;
- *seglen = buf_left < buf->segsize ? buf_left :
- (seg_offset >= buf->segsize ? buf->segsize :
- buf->segsize - seg_offset);
- }
-
- return (void *)seg_addr;
-}
-
-static inline odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
-{
- return odp_buf_to_hdr(buf)->event_type;
-}
-
-static inline void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
-{
- odp_buf_to_hdr(buf)->event_type = ev;
-}
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 1c09cd365..076abe96e 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -33,72 +33,15 @@ extern "C" {
#include <odp_schedule_if.h>
#include <stddef.h>
-#define ODP_BITSIZE(x) \
- ((x) <= 2 ? 1 : \
- ((x) <= 4 ? 2 : \
- ((x) <= 8 ? 3 : \
- ((x) <= 16 ? 4 : \
- ((x) <= 32 ? 5 : \
- ((x) <= 64 ? 6 : \
- ((x) <= 128 ? 7 : \
- ((x) <= 256 ? 8 : \
- ((x) <= 512 ? 9 : \
- ((x) <= 1024 ? 10 : \
- ((x) <= 2048 ? 11 : \
- ((x) <= 4096 ? 12 : \
- ((x) <= 8196 ? 13 : \
- ((x) <= 16384 ? 14 : \
- ((x) <= 32768 ? 15 : \
- ((x) <= 65536 ? 16 : \
- (0/0)))))))))))))))))
-
-ODP_STATIC_ASSERT(ODP_CONFIG_PACKET_SEG_LEN_MIN >= 256,
- "ODP Segment size must be a minimum of 256 bytes");
-
-ODP_STATIC_ASSERT((ODP_CONFIG_PACKET_BUF_LEN_MAX %
- ODP_CONFIG_PACKET_SEG_LEN_MIN) == 0,
- "Packet max size must be a multiple of segment size");
-
-#define ODP_BUFFER_MAX_SEG \
- (ODP_CONFIG_PACKET_BUF_LEN_MAX / ODP_CONFIG_PACKET_SEG_LEN_MIN)
-
-/* We can optimize storage of small raw buffers within metadata area */
-#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - 1))
-
-#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_POOLS)
-#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG)
-#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS)
-#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + ODP_BUFFER_INDEX_BITS)
-#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS)
-#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS)
-
-#define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2)
-#define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1)
-
typedef union odp_buffer_bits_t {
- odp_buffer_t handle;
+ odp_buffer_t handle;
+
union {
- uint32_t u32;
- struct {
-#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
- uint32_t pool_id:ODP_BUFFER_POOL_BITS;
- uint32_t index:ODP_BUFFER_INDEX_BITS;
- uint32_t seg:ODP_BUFFER_SEG_BITS;
-#else
- uint32_t seg:ODP_BUFFER_SEG_BITS;
- uint32_t index:ODP_BUFFER_INDEX_BITS;
- uint32_t pool_id:ODP_BUFFER_POOL_BITS;
-#endif
- };
+ uint32_t u32;
struct {
-#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
- uint32_t prefix:ODP_BUFFER_PREFIX_BITS;
- uint32_t pfxseg:ODP_BUFFER_SEG_BITS;
-#else
- uint32_t pfxseg:ODP_BUFFER_SEG_BITS;
- uint32_t prefix:ODP_BUFFER_PREFIX_BITS;
-#endif
+ uint32_t pool_id: 8;
+ uint32_t index: 24;
};
};
} odp_buffer_bits_t;
@@ -107,83 +50,73 @@ typedef union odp_buffer_bits_t {
/* Common buffer header */
struct odp_buffer_hdr_t {
- struct odp_buffer_hdr_t *next; /* next buf in a list--keep 1st */
- union { /* Multi-use secondary link */
- struct odp_buffer_hdr_t *prev;
- struct odp_buffer_hdr_t *link;
- };
- odp_buffer_bits_t handle; /* handle */
+ /* Handle union */
+ odp_buffer_bits_t handle;
- int burst_num;
- int burst_first;
- struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE];
+ /* Initial buffer data pointer and length */
+ uint8_t *base_data;
+ uint8_t *buf_end;
+ /* Max data size */
+ uint32_t size;
+
+ /* Pool type */
+ int8_t type;
+
+ /* Burst counts */
+ uint8_t burst_num;
+ uint8_t burst_first;
+
+ /* Segment count */
+ uint8_t segcount;
+
+ /* Segments */
+ struct {
+ void *hdr;
+ uint8_t *data;
+ uint32_t len;
+ } seg[CONFIG_PACKET_MAX_SEGS];
+
+ /* Next buf in a list */
+ struct odp_buffer_hdr_t *next;
+
+ /* User context pointer or u64 */
union {
- uint32_t all;
- struct {
- uint32_t hdrdata:1; /* Data is in buffer hdr */
- uint32_t sustain:1; /* Sustain order */
- };
- } flags;
- int16_t allocator; /* allocating thread id */
- int8_t type; /* buffer type */
- odp_event_type_t event_type; /* for reuse as event */
- uint32_t size; /* max data size */
- odp_pool_t pool_hdl; /* buffer pool handle */
- union {
- uint64_t buf_u64; /* user u64 */
- void *buf_ctx; /* user context */
- const void *buf_cctx; /* const alias for ctx */
- };
- void *uarea_addr; /* user area address */
- uint32_t uarea_size; /* size of user area */
- uint32_t segcount; /* segment count */
- uint32_t segsize; /* segment size */
- void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */
- uint64_t order; /* sequence for ordered queues */
- queue_entry_t *origin_qe; /* ordered queue origin */
- union {
- queue_entry_t *target_qe; /* ordered queue target */
- uint64_t sync[SCHEDULE_ORDERED_LOCKS_PER_QUEUE];
+ uint64_t buf_u64;
+ void *buf_ctx;
+ const void *buf_cctx; /* const alias for ctx */
};
-#ifdef _ODP_PKTIO_IPC
- /* ipc mapped process can not walk over pointers,
- * offset has to be used */
- uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG];
-#endif
-};
-/** @internal Compile time assert that the
- * allocator field can handle any allocator id*/
-ODP_STATIC_ASSERT(INT16_MAX >= ODP_THREAD_COUNT_MAX,
- "ODP_BUFFER_HDR_T__ALLOCATOR__SIZE_ERROR");
+ /* User area pointer */
+ void *uarea_addr;
-typedef struct odp_buffer_hdr_stride {
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))];
-} odp_buffer_hdr_stride;
+ /* User area size */
+ uint32_t uarea_size;
-typedef struct odp_buf_blk_t {
- struct odp_buf_blk_t *next;
- struct odp_buf_blk_t *prev;
-} odp_buf_blk_t;
+ /* Event type. Maybe different than pool type (crypto compl event) */
+ int8_t event_type;
+
+ /* Burst table */
+ struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE];
+
+ /* Used only if _ODP_PKTIO_IPC is set.
+ * ipc mapped process can not walk over pointers,
+ * offset has to be used */
+ uint64_t ipc_data_offset;
+
+ /* Pool handle */
+ odp_pool_t pool_hdl;
+
+ /* Data or next header */
+ uint8_t data[0];
+};
-/* Raw buffer header */
-typedef struct {
- odp_buffer_hdr_t buf_hdr; /* common buffer header */
-} odp_raw_buffer_hdr_t;
+ODP_STATIC_ASSERT(CONFIG_PACKET_MAX_SEGS < 256,
+ "CONFIG_PACKET_MAX_SEGS_TOO_LARGE");
-/* Free buffer marker */
-#define ODP_FREEBUF -1
+ODP_STATIC_ASSERT(BUFFER_BURST_SIZE < 256, "BUFFER_BURST_SIZE_TOO_LARGE");
/* Forward declarations */
-odp_buffer_t buffer_alloc(odp_pool_t pool, size_t size);
-int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size,
- odp_buffer_t buf[], int num);
-void buffer_free(uint32_t pool_id, const odp_buffer_t buf);
-void buffer_free_multi(uint32_t pool_id,
- const odp_buffer_t buf[], int num_free);
-int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount);
-void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount);
int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount);
void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount);
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index dc2190d83..8505c67da 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -77,7 +77,7 @@ Class Of Service
*/
struct cos_s {
queue_entry_t *queue; /* Associated Queue */
- pool_entry_t *pool; /* Associated Buffer pool */
+ odp_pool_t pool; /* Associated Buffer pool */
union pmr_u *pmr[ODP_PMR_PER_COS_MAX]; /* Chained PMR */
union cos_u *linked_cos[ODP_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/
uint32_t valid; /* validity Flag */
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index b7ff610bd..e7d84c904 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -22,6 +22,11 @@ extern "C" {
#define ODP_CONFIG_QUEUES 1024
/*
+ * Maximum number of ordered locks per queue
+ */
+#define CONFIG_QUEUE_MAX_ORD_LOCKS 4
+
+/*
* Maximum number of packet IO resources
*/
#define ODP_CONFIG_PKTIO_ENTRIES 64
@@ -32,7 +37,7 @@ extern "C" {
* This defines the minimum supported buffer alignment. Requests for values
* below this will be rounded up to this value.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MIN 16
+#define ODP_CONFIG_BUFFER_ALIGN_MIN 64
/*
* Maximum buffer alignment
@@ -54,7 +59,7 @@ extern "C" {
* The default value (66) allows a 1500-byte packet to be received into a single
* segment with Ethernet offset alignment and room for some header expansion.
*/
-#define ODP_CONFIG_PACKET_HEADROOM 66
+#define CONFIG_PACKET_HEADROOM 66
/*
* Default packet tailroom
@@ -65,44 +70,35 @@ extern "C" {
* without restriction. Note that most implementations will automatically
* consider any unused portion of the last segment of a packet as tailroom
*/
-#define ODP_CONFIG_PACKET_TAILROOM 0
+#define CONFIG_PACKET_TAILROOM 0
/*
* Maximum number of segments per packet
*/
-#define ODP_CONFIG_PACKET_MAX_SEGS 6
+#define CONFIG_PACKET_MAX_SEGS 6
/*
- * Minimum packet segment length
- *
- * This defines the minimum packet segment buffer length in bytes. The user
- * defined segment length (seg_len in odp_pool_param_t) will be rounded up into
- * this value.
+ * Maximum packet segment size including head- and tailrooms
*/
-#define ODP_CONFIG_PACKET_SEG_LEN_MIN 1598
+#define CONFIG_PACKET_SEG_SIZE (8 * 1024)
-/*
- * Maximum packet segment length
+/* Maximum data length in a segment
*
- * This defines the maximum packet segment buffer length in bytes. The user
- * defined segment length (seg_len in odp_pool_param_t) must not be larger than
- * this.
- */
-#define ODP_CONFIG_PACKET_SEG_LEN_MAX (64 * 1024)
+ * The user defined segment length (seg_len in odp_pool_param_t) must not
+ * be larger than this.
+*/
+#define CONFIG_PACKET_MAX_SEG_LEN (CONFIG_PACKET_SEG_SIZE - \
+ CONFIG_PACKET_HEADROOM - \
+ CONFIG_PACKET_TAILROOM)
/*
- * Maximum packet buffer length
- *
- * This defines the maximum number of bytes that can be stored into a packet
- * (maximum return value of odp_packet_buf_len(void)). Attempts to allocate
- * (including default head- and tailrooms) or extend packets to sizes larger
- * than this limit will fail.
+ * Minimum packet segment length
*
- * @internal In odp-linux implementation:
- * - The value MUST be an integral number of segments
- * - The value SHOULD be large enough to accommodate jumbo packets (9K)
+ * This defines the minimum packet segment buffer length in bytes. The user
+ * defined segment length (seg_len in odp_pool_param_t) will be rounded up into
+ * this value.
*/
-#define ODP_CONFIG_PACKET_BUF_LEN_MAX (ODP_CONFIG_PACKET_SEG_LEN_MIN * 6)
+#define CONFIG_PACKET_SEG_LEN_MIN CONFIG_PACKET_MAX_SEG_LEN
/* Maximum number of shared memory blocks.
*
@@ -118,6 +114,26 @@ extern "C" {
*/
#define CONFIG_BURST_SIZE 16
+/*
+ * Maximum number of events in a pool
+ */
+#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024)
+
+/*
+ * Maximum number of events in a thread local pool cache
+ */
+#define CONFIG_POOL_CACHE_SIZE 256
+
+/*
+ * Size of the virtual address space pre-reserver for ISHM
+ *
+ * This is just virtual space preallocation size, not memory allocation.
+ * This address space is used by ISHM to map things at a common address in
+ * all ODP threads (when the _ODP_ISHM_SINGLE_VA flag is used).
+ * In bytes.
+ */
+#define ODP_CONFIG_ISHM_VA_PREALLOC_SZ (536870912L)
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_crypto_internal.h b/platform/linux-generic/include/odp_crypto_internal.h
index 7b104afa8..c7b893aaf 100644
--- a/platform/linux-generic/include/odp_crypto_internal.h
+++ b/platform/linux-generic/include/odp_crypto_internal.h
@@ -14,6 +14,7 @@ extern "C" {
#include <openssl/des.h>
#include <openssl/aes.h>
+#define MAX_IV_LEN 64
#define OP_RESULT_MAGIC 0x91919191
/** Forward declaration of session structure */
@@ -23,7 +24,7 @@ typedef struct odp_crypto_generic_session odp_crypto_generic_session_t;
* Algorithm handler function prototype
*/
typedef
-odp_crypto_alg_err_t (*crypto_func_t)(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t (*crypto_func_t)(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session);
/**
@@ -31,16 +32,16 @@ odp_crypto_alg_err_t (*crypto_func_t)(odp_crypto_op_params_t *params,
*/
struct odp_crypto_generic_session {
struct odp_crypto_generic_session *next;
- odp_crypto_op_t op;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
odp_bool_t do_cipher_first;
- odp_queue_t compl_queue;
- odp_pool_t output_pool;
+
struct {
- odp_cipher_alg_t alg;
- struct {
- uint8_t *data;
- size_t len;
- } iv;
+ /* Copy of session IV data */
+ uint8_t iv_data[MAX_IV_LEN];
+
union {
struct {
DES_key_schedule ks1;
@@ -56,8 +57,8 @@ struct odp_crypto_generic_session {
} data;
crypto_func_t func;
} cipher;
+
struct {
- odp_auth_alg_t alg;
union {
struct {
uint8_t key[16];
diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
index 3429781a0..b313b1fef 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -29,7 +29,6 @@ extern __thread int __odp_errno;
typedef struct {
uint64_t cpu_hz_max[MAX_CPU_NUMBER];
- uint64_t default_huge_page_size;
uint64_t page_size;
int cache_line_size;
int cpu_count;
@@ -37,15 +36,20 @@ typedef struct {
char model_str[MAX_CPU_NUMBER][128];
} system_info_t;
+typedef struct {
+ uint64_t default_huge_page_size;
+ char *default_huge_page_dir;
+} hugepage_info_t;
+
struct odp_global_data_s {
pid_t main_pid;
odp_log_func_t log_fn;
odp_abort_func_t abort_fn;
system_info_t system_info;
+ hugepage_info_t hugepage_info;
odp_cpumask_t control_cpus;
odp_cpumask_t worker_cpus;
int num_cpus_installed;
- int ipc_ns;
};
enum init_stage {
@@ -53,7 +57,8 @@ enum init_stage {
CPUMASK_INIT,
TIME_INIT,
SYSINFO_INIT,
- SHM_INIT,
+ FDSERVER_INIT,
+ ISHM_INIT,
THREAD_INIT,
POOL_INIT,
QUEUE_INIT,
@@ -83,10 +88,6 @@ int odp_thread_init_local(odp_thread_type_t type);
int odp_thread_term_local(void);
int odp_thread_term_global(void);
-int odp_shm_init_global(void);
-int odp_shm_term_global(void);
-int odp_shm_init_local(void);
-
int odp_pool_init_global(void);
int odp_pool_init_local(void);
int odp_pool_term_global(void);
@@ -118,6 +119,14 @@ int odp_tm_term_global(void);
int _odp_int_name_tbl_init_global(void);
int _odp_int_name_tbl_term_global(void);
+int _odp_fdserver_init_global(void);
+int _odp_fdserver_term_global(void);
+
+int _odp_ishm_init_global(void);
+int _odp_ishm_init_local(void);
+int _odp_ishm_term_global(void);
+int _odp_ishm_term_local(void);
+
int cpuinfo_parser(FILE *file, system_info_t *sysinfo);
uint64_t odp_cpu_hz_current(int id);
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index b23ad9c75..e6e9d7447 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -27,8 +27,6 @@ extern "C" {
#include <odp/api/crypto.h>
#include <odp_crypto_internal.h>
-#define PACKET_JUMBO_LEN (9 * 1024)
-
/** Minimum segment length expected by packet_parse_common() */
#define PACKET_PARSE_SEG_LEN 96
@@ -116,13 +114,14 @@ typedef union {
uint32_t all;
struct {
+ /** adjustment for traffic mgr */
+ uint32_t shaper_len_adj:8;
+
/* Bitfield flags for each output option */
uint32_t l3_chksum_set:1; /**< L3 chksum bit is valid */
uint32_t l3_chksum:1; /**< L3 chksum override */
uint32_t l4_chksum_set:1; /**< L3 chksum bit is valid */
uint32_t l4_chksum:1; /**< L4 chksum override */
-
- int8_t shaper_len_adj; /**< adjustment for traffic mgr */
};
} output_flags_t;
@@ -156,9 +155,9 @@ typedef struct {
uint32_t l3_len; /**< Layer 3 length */
uint32_t l4_len; /**< Layer 4 length */
- layer_t parsed_layers; /**< Highest parsed protocol stack layer */
uint16_t ethtype; /**< EtherType */
- uint8_t ip_proto; /**< IP protocol */
+ uint8_t ip_proto; /**< IP protocol */
+ uint8_t parsed_layers; /**< Highest parsed protocol stack layer */
} packet_parser_t;
@@ -173,34 +172,44 @@ typedef struct {
/* common buffer header */
odp_buffer_hdr_t buf_hdr;
- /* Following members are initialized by packet_init() */
+ /*
+ * Following members are initialized by packet_init()
+ */
+
packet_parser_t p;
+ odp_pktio_t input;
+
uint32_t frame_len;
uint32_t headroom;
uint32_t tailroom;
- odp_pktio_t input;
+ /*
+ * Members below are not initialized by packet_init()
+ */
- /* Members below are not initialized by packet_init() */
- odp_queue_t dst_queue; /**< Classifier destination queue */
+ /* Flow hash value */
+ uint32_t flow_hash;
- uint32_t flow_hash; /**< Flow hash value */
- odp_time_t timestamp; /**< Timestamp value */
+ /* Timestamp value */
+ odp_time_t timestamp;
- odp_crypto_generic_op_result_t op_result; /**< Result for crypto */
-} odp_packet_hdr_t;
+ /* Classifier destination queue */
+ odp_queue_t dst_queue;
-typedef struct odp_packet_hdr_stride {
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))];
-} odp_packet_hdr_stride;
+ /* Result for crypto */
+ odp_crypto_generic_op_result_t op_result;
+
+ /* Packet data storage */
+ uint8_t data[0];
+} odp_packet_hdr_t;
/**
* Return the packet header
*/
static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
{
- return (odp_packet_hdr_t *)odp_buf_to_hdr((odp_buffer_t)pkt);
+ return (odp_packet_hdr_t *)buf_hdl_to_hdr((odp_buffer_t)pkt);
}
static inline void copy_packet_parser_metadata(odp_packet_hdr_t *src_hdr,
@@ -219,83 +228,13 @@ static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
dst_hdr->op_result = src_hdr->op_result;
}
-static inline void *packet_map(odp_packet_hdr_t *pkt_hdr,
- uint32_t offset, uint32_t *seglen)
-{
- if (offset > pkt_hdr->frame_len)
- return NULL;
-
- return buffer_map(&pkt_hdr->buf_hdr,
- pkt_hdr->headroom + offset, seglen,
- pkt_hdr->headroom + pkt_hdr->frame_len);
-}
-
-static inline void push_head(odp_packet_hdr_t *pkt_hdr, size_t len)
-{
- pkt_hdr->headroom -= len;
- pkt_hdr->frame_len += len;
-}
-
-static inline void pull_head(odp_packet_hdr_t *pkt_hdr, size_t len)
-{
- pkt_hdr->headroom += len;
- pkt_hdr->frame_len -= len;
-}
-
-static inline int push_head_seg(odp_packet_hdr_t *pkt_hdr, size_t len)
+static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
- uint32_t extrasegs =
- (len - pkt_hdr->headroom + pkt_hdr->buf_hdr.segsize - 1) /
- pkt_hdr->buf_hdr.segsize;
+ int last = pkt_hdr->buf_hdr.segcount - 1;
- if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG ||
- seg_alloc_head(&pkt_hdr->buf_hdr, extrasegs))
- return -1;
-
- pkt_hdr->headroom += extrasegs * pkt_hdr->buf_hdr.segsize;
- return 0;
-}
-
-static inline void pull_head_seg(odp_packet_hdr_t *pkt_hdr)
-{
- uint32_t extrasegs = (pkt_hdr->headroom - 1) / pkt_hdr->buf_hdr.segsize;
-
- seg_free_head(&pkt_hdr->buf_hdr, extrasegs);
- pkt_hdr->headroom -= extrasegs * pkt_hdr->buf_hdr.segsize;
-}
-
-static inline void push_tail(odp_packet_hdr_t *pkt_hdr, size_t len)
-{
- pkt_hdr->tailroom -= len;
- pkt_hdr->frame_len += len;
-}
-
-static inline int push_tail_seg(odp_packet_hdr_t *pkt_hdr, size_t len)
-{
- uint32_t extrasegs =
- (len - pkt_hdr->tailroom + pkt_hdr->buf_hdr.segsize - 1) /
- pkt_hdr->buf_hdr.segsize;
-
- if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG ||
- seg_alloc_tail(&pkt_hdr->buf_hdr, extrasegs))
- return -1;
-
- pkt_hdr->tailroom += extrasegs * pkt_hdr->buf_hdr.segsize;
- return 0;
-}
-
-static inline void pull_tail_seg(odp_packet_hdr_t *pkt_hdr)
-{
- uint32_t extrasegs = pkt_hdr->tailroom / pkt_hdr->buf_hdr.segsize;
-
- seg_free_tail(&pkt_hdr->buf_hdr, extrasegs);
- pkt_hdr->tailroom -= extrasegs * pkt_hdr->buf_hdr.segsize;
-}
-
-static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, size_t len)
-{
pkt_hdr->tailroom += len;
pkt_hdr->frame_len -= len;
+ pkt_hdr->buf_hdr.seg[last].len -= len;
}
static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index bdf6316fa..2001c4211 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -102,6 +102,8 @@ typedef struct {
packet, 0 - not yet ready */
void *pinfo;
odp_shm_t pinfo_shm;
+ odp_shm_t remote_pool_shm; /**< shm of remote pool get with
+ _ipc_map_remote_pool() */
} _ipc_pktio_t;
struct pktio_entry {
diff --git a/platform/linux-generic/include/odp_packet_io_ipc_internal.h b/platform/linux-generic/include/odp_packet_io_ipc_internal.h
index 851114d91..7cd294886 100644
--- a/platform/linux-generic/include/odp_packet_io_ipc_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_ipc_internal.h
@@ -26,22 +26,31 @@
*/
struct pktio_info {
struct {
- /* number of buffer in remote pool */
- int shm_pool_bufs_num;
- /* size of remote pool */
- size_t shm_pkt_pool_size;
+ /* number of buffer*/
+ int num;
/* size of packet/segment in remote pool */
- uint32_t shm_pkt_size;
+ uint32_t block_size;
/* offset from shared memory block start
- * to pool_mdata_addr (odp-linux pool specific) */
- size_t mdata_offset;
+ * to pool *base_addr in remote process.
+ * (odp-linux pool specific) */
+ size_t base_addr_offset;
char pool_name[ODP_POOL_NAME_LEN];
+ /* 1 if master finished creation of all shared objects */
+ int init_done;
} master;
struct {
/* offset from shared memory block start
- * to pool_mdata_addr in remote process.
+ * to pool *base_addr in remote process.
* (odp-linux pool specific) */
- size_t mdata_offset;
+ size_t base_addr_offset;
+ void *base_addr;
+ uint32_t block_size;
char pool_name[ODP_POOL_NAME_LEN];
+ /* pid of the slave process written to shm and
+ * used by master to look up memory created by
+ * slave
+ */
+ int pid;
+ int init_done;
} slave;
} ODP_PACKED;
diff --git a/platform/linux-generic/include/odp_packet_io_queue.h b/platform/linux-generic/include/odp_packet_io_queue.h
index 13b79f3ff..d1d4b2251 100644
--- a/platform/linux-generic/include/odp_packet_io_queue.h
+++ b/platform/linux-generic/include/odp_packet_io_queue.h
@@ -28,11 +28,10 @@ extern "C" {
ODP_STATIC_ASSERT(ODP_PKTIN_QUEUE_MAX_BURST >= QUEUE_MULTI_MAX,
"ODP_PKTIN_DEQ_MULTI_MAX_ERROR");
-int pktin_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int sustain);
+int pktin_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *queue);
-int pktin_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num,
- int sustain);
+int pktin_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
int pktin_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index ca59ade04..b0805ac3c 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -18,239 +18,112 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/align.h>
-#include <odp_align_internal.h>
-#include <odp/api/pool.h>
-#include <odp_buffer_internal.h>
-#include <odp/api/hints.h>
-#include <odp_config_internal.h>
-#include <odp/api/debug.h>
#include <odp/api/shared_memory.h>
-#include <odp/api/atomic.h>
-#include <odp/api/thread.h>
-#include <string.h>
+#include <odp/api/ticketlock.h>
-/**
- * Buffer initialization routine prototype
- *
- * @note Routines of this type MAY be passed as part of the
- * _odp_buffer_pool_init_t structure to be called whenever a
- * buffer is allocated to initialize the user metadata
- * associated with that buffer.
- */
-typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg);
-
-/**
- * Buffer pool initialization parameters
- * Used to communicate buffer pool initialization options. Internal for now.
- */
-typedef struct _odp_buffer_pool_init_t {
- size_t udata_size; /**< Size of user metadata for each buffer */
- _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */
- void *buf_init_arg; /**< Argument to be passed to buf_init() */
-} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */
-
-#define POOL_MAX_LOCAL_CHUNKS 4
-#define POOL_CHUNK_SIZE (4 * CONFIG_BURST_SIZE)
-#define POOL_MAX_LOCAL_BUFS (POOL_MAX_LOCAL_CHUNKS * POOL_CHUNK_SIZE)
-
-struct local_cache_s {
- uint64_t bufallocs; /* Local buffer alloc count */
- uint64_t buffrees; /* Local buffer free count */
-
- uint32_t num_buf;
- odp_buffer_hdr_t *buf[POOL_MAX_LOCAL_BUFS];
-};
+#include <odp_buffer_internal.h>
+#include <odp_config_internal.h>
+#include <odp_ring_internal.h>
-/* Local cache for buffer alloc/free acceleration */
-typedef struct local_cache_t {
- union {
- struct local_cache_s s;
+typedef struct pool_cache_t {
+ uint32_t num;
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(
- sizeof(struct local_cache_s))];
- };
-} local_cache_t;
+ odp_buffer_t buf[CONFIG_POOL_CACHE_SIZE];
-#include <odp/api/plat/ticketlock_inlines.h>
-#define POOL_LOCK(a) _odp_ticketlock_lock(a)
-#define POOL_UNLOCK(a) _odp_ticketlock_unlock(a)
-#define POOL_LOCK_INIT(a) odp_ticketlock_init(a)
+} pool_cache_t ODP_ALIGNED_CACHE;
-/**
- * ODP Pool stats - Maintain some useful stats regarding pool utilization
- */
+/* Buffer header ring */
typedef struct {
- odp_atomic_u64_t bufallocs; /**< Count of successful buf allocs */
- odp_atomic_u64_t buffrees; /**< Count of successful buf frees */
- odp_atomic_u64_t blkallocs; /**< Count of successful blk allocs */
- odp_atomic_u64_t blkfrees; /**< Count of successful blk frees */
- odp_atomic_u64_t bufempty; /**< Count of unsuccessful buf allocs */
- odp_atomic_u64_t blkempty; /**< Count of unsuccessful blk allocs */
- odp_atomic_u64_t buf_high_wm_count; /**< Count of high buf wm conditions */
- odp_atomic_u64_t buf_low_wm_count; /**< Count of low buf wm conditions */
- odp_atomic_u64_t blk_high_wm_count; /**< Count of high blk wm conditions */
- odp_atomic_u64_t blk_low_wm_count; /**< Count of low blk wm conditions */
-} _odp_pool_stats_t;
-
-struct pool_entry_s {
- odp_ticketlock_t lock ODP_ALIGNED_CACHE;
- odp_ticketlock_t buf_lock;
- odp_ticketlock_t blk_lock;
-
- char name[ODP_POOL_NAME_LEN];
- odp_pool_param_t params;
- uint32_t udata_size;
- odp_pool_t pool_hdl;
- uint32_t pool_id;
- odp_shm_t pool_shm;
- union {
- uint32_t all;
- struct {
- uint32_t has_name:1;
- uint32_t user_supplied_shm:1;
- uint32_t unsegmented:1;
- uint32_t zeroized:1;
- uint32_t predefined:1;
- };
- } flags;
- uint32_t quiesced;
- uint32_t buf_low_wm_assert;
- uint32_t blk_low_wm_assert;
- uint8_t *pool_base_addr;
- uint8_t *pool_mdata_addr;
- size_t pool_size;
- uint32_t buf_align;
- uint32_t buf_stride;
- odp_buffer_hdr_t *buf_freelist;
- void *blk_freelist;
- odp_atomic_u32_t bufcount;
- odp_atomic_u32_t blkcount;
- _odp_pool_stats_t poolstats;
- uint32_t buf_num;
- uint32_t seg_size;
- uint32_t blk_size;
- uint32_t buf_high_wm;
- uint32_t buf_low_wm;
- uint32_t blk_high_wm;
- uint32_t blk_low_wm;
- uint32_t headroom;
- uint32_t tailroom;
-
- local_cache_t local_cache[ODP_THREAD_COUNT_MAX] ODP_ALIGNED_CACHE;
-};
-
-typedef union pool_entry_u {
- struct pool_entry_s s;
-
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
-} pool_entry_t;
-
-extern void *pool_entry_ptr[];
-
-#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1)
-#define buffer_is_secure(buf) (buf->flags.zeroized)
-#define pool_is_secure(pool) (pool->flags.zeroized)
-#else
-#define buffer_is_secure(buf) 0
-#define pool_is_secure(pool) 0
-#endif
-
-static inline void *get_blk(struct pool_entry_s *pool)
+ /* Ring header */
+ ring_t hdr;
+
+ /* Ring data: buffer handles */
+ uint32_t buf[CONFIG_POOL_MAX_NUM];
+
+} pool_ring_t ODP_ALIGNED_CACHE;
+
+typedef struct pool_t {
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+
+ char name[ODP_POOL_NAME_LEN];
+ odp_pool_param_t params;
+ odp_pool_t pool_hdl;
+ uint32_t pool_idx;
+ uint32_t ring_mask;
+ odp_shm_t shm;
+ odp_shm_t uarea_shm;
+ int reserved;
+ uint32_t num;
+ uint32_t align;
+ uint32_t headroom;
+ uint32_t tailroom;
+ uint32_t data_size;
+ uint32_t max_len;
+ uint32_t max_seg_len;
+ uint32_t uarea_size;
+ uint32_t block_size;
+ uint32_t shm_size;
+ uint32_t uarea_shm_size;
+ uint8_t *base_addr;
+ uint8_t *uarea_base_addr;
+
+ pool_cache_t local_cache[ODP_THREAD_COUNT_MAX];
+
+ odp_shm_t ring_shm;
+ pool_ring_t *ring;
+
+} pool_t;
+
+typedef struct pool_table_t {
+ pool_t pool[ODP_CONFIG_POOLS];
+ odp_shm_t shm;
+} pool_table_t;
+
+extern pool_table_t *pool_tbl;
+
+static inline pool_t *pool_entry(uint32_t pool_idx)
{
- void *myhead;
- uint64_t blkcount;
-
- POOL_LOCK(&pool->blk_lock);
-
- myhead = pool->blk_freelist;
-
- if (odp_unlikely(myhead == NULL)) {
- POOL_UNLOCK(&pool->blk_lock);
- odp_atomic_inc_u64(&pool->poolstats.blkempty);
- } else {
- pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next;
- POOL_UNLOCK(&pool->blk_lock);
- blkcount = odp_atomic_fetch_sub_u32(&pool->blkcount, 1) - 1;
-
- /* Check for low watermark condition */
- if (blkcount == pool->blk_low_wm && !pool->blk_low_wm_assert) {
- pool->blk_low_wm_assert = 1;
- odp_atomic_inc_u64(&pool->poolstats.blk_low_wm_count);
- }
-
- odp_atomic_inc_u64(&pool->poolstats.blkallocs);
- }
-
- return myhead;
-}
-
-static inline void ret_blk(struct pool_entry_s *pool, void *block)
-{
- uint64_t blkcount;
-
- POOL_LOCK(&pool->blk_lock);
-
- ((odp_buf_blk_t *)block)->next = pool->blk_freelist;
- pool->blk_freelist = block;
-
- POOL_UNLOCK(&pool->blk_lock);
-
- blkcount = odp_atomic_fetch_add_u32(&pool->blkcount, 1);
-
- /* Check if low watermark condition should be deasserted */
- if (blkcount == pool->blk_high_wm && pool->blk_low_wm_assert) {
- pool->blk_low_wm_assert = 0;
- odp_atomic_inc_u64(&pool->poolstats.blk_high_wm_count);
- }
-
- odp_atomic_inc_u64(&pool->poolstats.blkfrees);
+ return &pool_tbl->pool[pool_idx];
}
-static inline odp_pool_t pool_index_to_handle(uint32_t pool_id)
+static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl)
{
- return _odp_cast_scalar(odp_pool_t, pool_id);
+ return &pool_tbl->pool[_odp_typeval(pool_hdl)];
}
-static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl)
+static inline odp_buffer_hdr_t *pool_buf_hdl_to_hdr(pool_t *pool,
+ odp_buffer_t buf)
{
- return _odp_typeval(pool_hdl);
-}
+ odp_buffer_bits_t handle;
+ uint32_t index, block_offset;
+ odp_buffer_hdr_t *buf_hdr;
-static inline void *get_pool_entry(uint32_t pool_id)
-{
- return pool_entry_ptr[pool_id];
-}
+ handle.handle = buf;
+ index = handle.index;
+ block_offset = index * pool->block_size;
-static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool)
-{
- return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool));
-}
+ /* clang requires cast to uintptr_t */
+ buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
-static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf)
-{
- return odp_pool_to_entry(buf->pool_hdl);
+ return buf_hdr;
}
-static inline uint32_t odp_buffer_pool_segment_size(odp_pool_t pool)
+static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
{
- return odp_pool_to_entry(pool)->s.seg_size;
-}
+ odp_buffer_bits_t handle;
+ uint32_t pool_id;
+ pool_t *pool;
-static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool)
-{
- return odp_pool_to_entry(pool)->s.headroom;
-}
+ handle.handle = buf;
+ pool_id = handle.pool_id;
+ pool = pool_entry(pool_id);
-static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool)
-{
- return odp_pool_to_entry(pool)->s.tailroom;
+ return pool_buf_hdl_to_hdr(pool, buf);
}
-odp_pool_t _pool_create(const char *name,
- odp_pool_param_t *params,
- uint32_t shmflags);
+int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
+ odp_buffer_hdr_t *buf_hdr[], int num);
+void buffer_free_multi(const odp_buffer_t buf[], int num_free);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h
index e223d9f2b..8b55de1ab 100644
--- a/platform/linux-generic/include/odp_queue_internal.h
+++ b/platform/linux-generic/include/odp_queue_internal.h
@@ -41,11 +41,11 @@ extern "C" {
/* forward declaration */
union queue_entry_u;
-typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *, int);
+typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *);
typedef odp_buffer_hdr_t *(*deq_func_t)(union queue_entry_u *);
typedef int (*enq_multi_func_t)(union queue_entry_u *,
- odp_buffer_hdr_t **, int, int);
+ odp_buffer_hdr_t **, int);
typedef int (*deq_multi_func_t)(union queue_entry_u *,
odp_buffer_hdr_t **, int);
@@ -56,6 +56,13 @@ struct queue_entry_s {
odp_buffer_hdr_t *tail;
int status;
+ struct {
+ odp_atomic_u64_t ctx; /**< Current ordered context id */
+ odp_atomic_u64_t next_ctx; /**< Next unallocated context id */
+ /** Array of ordered locks */
+ odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ } ordered ODP_ALIGNED_CACHE;
+
enq_func_t enqueue ODP_ALIGNED_CACHE;
deq_func_t dequeue;
enq_multi_func_t enqueue_multi;
@@ -68,12 +75,6 @@ struct queue_entry_s {
odp_pktin_queue_t pktin;
odp_pktout_queue_t pktout;
char name[ODP_QUEUE_NAME_LEN];
- uint64_t order_in;
- uint64_t order_out;
- odp_buffer_hdr_t *reorder_head;
- odp_buffer_hdr_t *reorder_tail;
- odp_atomic_u64_t sync_in[SCHEDULE_ORDERED_LOCKS_PER_QUEUE];
- odp_atomic_u64_t sync_out[SCHEDULE_ORDERED_LOCKS_PER_QUEUE];
};
union queue_entry_u {
@@ -84,24 +85,12 @@ union queue_entry_u {
queue_entry_t *get_qentry(uint32_t queue_id);
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int sustain);
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
odp_buffer_hdr_t *queue_deq(queue_entry_t *queue);
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num,
- int sustain);
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-int queue_pktout_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr,
- int sustain);
-int queue_pktout_enq_multi(queue_entry_t *queue,
- odp_buffer_hdr_t *buf_hdr[], int num, int sustain);
-
-int queue_tm_reenq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr,
- int sustain);
-int queue_tm_reenq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num, int sustain);
-int queue_tm_reorder(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
-
void queue_lock(queue_entry_t *queue);
void queue_unlock(queue_entry_t *queue);
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
new file mode 100644
index 000000000..55fedeb3a
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -0,0 +1,176 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_INTERNAL_H_
+#define ODP_RING_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+#include <odp/api/hints.h>
+#include <odp_align_internal.h>
+
+/* Ring empty, not a valid data value. */
+#define RING_EMPTY ((uint32_t)-1)
+
+/* Ring of uint32_t data
+ *
+ * Ring stores head and tail counters. Ring indexes are formed from these
+ * counters with a mask (mask = ring_size - 1), which requires that ring size
+ * must be a power of two. Also ring size must be larger than the maximum
+ * number of data items that will be stored on it (there's no check against
+ * overwriting). */
+typedef struct {
+ /* Writer head and tail */
+ odp_atomic_u32_t w_head;
+ odp_atomic_u32_t w_tail;
+ uint8_t pad[ODP_CACHE_LINE_SIZE - (2 * sizeof(odp_atomic_u32_t))];
+
+ /* Reader head and tail */
+ odp_atomic_u32_t r_head;
+ odp_atomic_u32_t r_tail;
+
+ uint32_t data[0];
+} ring_t ODP_ALIGNED_CACHE;
+
+/* Initialize ring */
+static inline void ring_init(ring_t *ring)
+{
+ odp_atomic_init_u32(&ring->w_head, 0);
+ odp_atomic_init_u32(&ring->w_tail, 0);
+ odp_atomic_init_u32(&ring->r_head, 0);
+ odp_atomic_init_u32(&ring->r_tail, 0);
+}
+
+/* Dequeue data from the ring head */
+static inline uint32_t ring_deq(ring_t *ring, uint32_t mask)
+{
+ uint32_t head, tail, new_head;
+ uint32_t data;
+
+ head = odp_atomic_load_u32(&ring->r_head);
+
+ /* Move reader head. This thread owns data at the new head. */
+ do {
+ tail = odp_atomic_load_u32(&ring->w_tail);
+
+ if (head == tail)
+ return RING_EMPTY;
+
+ new_head = head + 1;
+
+ } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head,
+ new_head) == 0));
+
+ /* Read queue index */
+ data = ring->data[new_head & mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head))
+ odp_cpu_pause();
+
+ /* Now update the reader tail */
+ odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+
+ return data;
+}
+
+/* Dequeue multiple data from the ring head. Num is smaller than ring size. */
+static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask,
+ uint32_t data[], uint32_t num)
+{
+ uint32_t head, tail, new_head, i;
+
+ head = odp_atomic_load_u32(&ring->r_head);
+
+ /* Move reader head. This thread owns data at the new head. */
+ do {
+ tail = odp_atomic_load_u32(&ring->w_tail);
+
+ /* Ring is empty */
+ if (head == tail)
+ return 0;
+
+ /* Try to take all available */
+ if ((tail - head) < num)
+ num = tail - head;
+
+ new_head = head + num;
+
+ } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head,
+ new_head) == 0));
+
+ /* Read queue index */
+ for (i = 0; i < num; i++)
+ data[i] = ring->data[(head + 1 + i) & mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head))
+ odp_cpu_pause();
+
+ /* Now update the reader tail */
+ odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+
+ return num;
+}
+
+/* Enqueue data into the ring tail */
+static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data)
+{
+ uint32_t old_head, new_head;
+
+ /* Reserve a slot in the ring for writing */
+ old_head = odp_atomic_fetch_inc_u32(&ring->w_head);
+ new_head = old_head + 1;
+
+ /* Ring is full. Wait for the last reader to finish. */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head))
+ odp_cpu_pause();
+
+ /* Write data */
+ ring->data[new_head & mask] = data;
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Now update the writer tail */
+ odp_atomic_store_rel_u32(&ring->w_tail, new_head);
+}
+
+/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */
+static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, i;
+
+ /* Reserve a slot in the ring for writing */
+ old_head = odp_atomic_fetch_add_u32(&ring->w_head, num);
+ new_head = old_head + 1;
+
+ /* Ring is full. Wait for the last reader to finish. */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head))
+ odp_cpu_pause();
+
+ /* Write data */
+ for (i = 0; i < num; i++)
+ ring->data[(new_head + i) & mask] = data[i];
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Now update the writer tail */
+ odp_atomic_store_rel_u32(&ring->w_tail, old_head + num);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index df73e70bf..6c2b05011 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -14,12 +14,6 @@ extern "C" {
#include <odp/api/queue.h>
#include <odp/api/schedule.h>
-/* Constants defined by the scheduler. These should be converted into interface
- * functions. */
-
-/* Number of ordered locks per queue */
-#define SCHEDULE_ORDERED_LOCKS_PER_QUEUE 2
-
typedef void (*schedule_pktio_start_fn_t)(int pktio_index, int num_in_queue,
int in_queue_idx[]);
typedef int (*schedule_thr_add_fn_t)(odp_schedule_group_t group, int thr);
@@ -31,12 +25,14 @@ typedef int (*schedule_init_queue_fn_t)(uint32_t queue_index,
typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_ord_enq_multi_fn_t)(uint32_t queue_index,
- void *buf_hdr[], int num,
- int sustain, int *ret);
+ void *buf_hdr[], int num, int *ret);
typedef int (*schedule_init_global_fn_t)(void);
typedef int (*schedule_term_global_fn_t)(void);
typedef int (*schedule_init_local_fn_t)(void);
typedef int (*schedule_term_local_fn_t)(void);
+typedef void (*schedule_order_lock_fn_t)(void);
+typedef void (*schedule_order_unlock_fn_t)(void);
+typedef unsigned (*schedule_max_ordered_locks_fn_t)(void);
typedef struct schedule_fn_t {
schedule_pktio_start_fn_t pktio_start;
@@ -51,6 +47,9 @@ typedef struct schedule_fn_t {
schedule_term_global_fn_t term_global;
schedule_init_local_fn_t init_local;
schedule_term_local_fn_t term_local;
+ schedule_order_lock_fn_t order_lock;
+ schedule_order_unlock_fn_t order_unlock;
+ schedule_max_ordered_locks_fn_t max_ordered_locks;
} schedule_fn_t;
/* Interface towards the scheduler */
diff --git a/platform/linux-generic/include/odp_schedule_internal.h b/platform/linux-generic/include/odp_schedule_internal.h
deleted file mode 100644
index 02637c2b2..000000000
--- a/platform/linux-generic/include/odp_schedule_internal.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_SCHEDULE_INTERNAL_H_
-#define ODP_SCHEDULE_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Maximum number of dequeues */
-#define MAX_DEQ CONFIG_BURST_SIZE
-
-typedef struct {
- int thr;
- int num;
- int index;
- int pause;
- uint16_t round;
- uint16_t prefer_offset;
- uint16_t pktin_polls;
- uint32_t queue_index;
- odp_queue_t queue;
- odp_event_t ev_stash[MAX_DEQ];
- void *origin_qe;
- uint64_t order;
- uint64_t sync[SCHEDULE_ORDERED_LOCKS_PER_QUEUE];
- odp_pool_t pool;
- int enq_called;
- int ignore_ordered_context;
-} sched_local_t;
-
-extern __thread sched_local_t sched_local;
-
-void cache_order_info(uint32_t queue_index);
-int release_order(void *origin_qe, uint64_t order,
- odp_pool_t pool, int enq_called);
-
-/* API functions implemented in odp_schedule_ordered.c */
-void schedule_order_lock(unsigned lock_index);
-void schedule_order_unlock(unsigned lock_index);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_schedule_ordered_internal.h b/platform/linux-generic/include/odp_schedule_ordered_internal.h
deleted file mode 100644
index 0ffbe3ad1..000000000
--- a/platform/linux-generic/include/odp_schedule_ordered_internal.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_SCHEDULE_ORDERED_INTERNAL_H_
-#define ODP_SCHEDULE_ORDERED_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define SUSTAIN_ORDER 1
-
-int schedule_ordered_queue_enq(uint32_t queue_index, void *p_buf_hdr,
- int sustain, int *ret);
-int schedule_ordered_queue_enq_multi(uint32_t queue_index, void *p_buf_hdr[],
- int num, int sustain, int *ret);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h
index 30e60f78c..8bd105d9f 100644
--- a/platform/linux-generic/include/odp_shm_internal.h
+++ b/platform/linux-generic/include/odp_shm_internal.h
@@ -16,8 +16,8 @@ extern "C" {
#define SHM_DEVNAME_MAXLEN (ODP_SHM_NAME_LEN + 16)
#define SHM_DEVNAME_FORMAT "/odp-%d-%s" /* /dev/shm/odp-<pid>-<name> */
-#define _ODP_SHM_PROC_NOCREAT 0x4 /**< Do not create shm if not exist */
-#define _ODP_SHM_O_EXCL 0x8 /**< Do not create shm if exist */
+#define _ODP_SHM_PROC_NOCREAT 0x40 /**< Do not create shm if not exist */
+#define _ODP_SHM_O_EXCL 0x80 /**< Do not create shm if exist */
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index b1cd73f71..91b12c545 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -35,8 +35,4 @@ typedef struct {
odp_timer_t timer;
} odp_timeout_hdr_t;
-typedef struct odp_timeout_hdr_stride {
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))];
-} odp_timeout_hdr_stride;
-
#endif
diff --git a/platform/linux-generic/include/protocols/tcp.h b/platform/linux-generic/include/protocols/tcp.h
index 4e92e4bef..114262e97 100644
--- a/platform/linux-generic/include/protocols/tcp.h
+++ b/platform/linux-generic/include/protocols/tcp.h
@@ -34,7 +34,7 @@ typedef struct ODP_PACKED {
odp_u32be_t ack_no; /**< Acknowledgment number */
union {
odp_u16be_t doffset_flags;
-#if defined(ODP_BIG_ENDIAN_BITFIELD)
+#if ODP_BIG_ENDIAN_BITFIELD
struct {
odp_u16be_t rsvd1:8;
odp_u16be_t flags:8; /**< TCP flags as a byte */
@@ -51,7 +51,7 @@ typedef struct ODP_PACKED {
odp_u16be_t syn:1;
odp_u16be_t fin:1;
};
-#elif defined(ODP_LITTLE_ENDIAN_BITFIELD)
+#elif ODP_LITTLE_ENDIAN_BITFIELD
struct {
odp_u16be_t flags:8;
odp_u16be_t rsvd1:8; /**< TCP flags as a byte */
diff --git a/platform/linux-generic/odp_barrier.c b/platform/linux-generic/odp_barrier.c
index ef10f29da..a2c62676b 100644
--- a/platform/linux-generic/odp_barrier.c
+++ b/platform/linux-generic/odp_barrier.c
@@ -37,7 +37,7 @@ void odp_barrier_wait(odp_barrier_t *barrier)
count = odp_atomic_fetch_inc_u32(&barrier->bar);
wasless = count < barrier->count;
- if (count == 2*barrier->count-1) {
+ if (count == 2 * barrier->count - 1) {
/* Wrap around *atomically* */
odp_atomic_sub_u32(&barrier->bar, 2 * barrier->count);
} else {
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index ce2fdba41..b79103973 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -26,25 +26,18 @@ odp_event_t odp_buffer_to_event(odp_buffer_t buf)
void *odp_buffer_addr(odp_buffer_t buf)
{
- odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+ odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
- return hdr->addr[0];
+ return hdr->seg[0].data;
}
-
uint32_t odp_buffer_size(odp_buffer_t buf)
{
- odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+ odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
return hdr->size;
}
-int odp_buffer_is_valid(odp_buffer_t buf)
-{
- return validate_buf(buf) != NULL;
-}
-
-
int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
@@ -55,7 +48,7 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
return len;
}
- hdr = odp_buf_to_hdr(buf);
+ hdr = buf_hdl_to_hdr(buf);
len += snprintf(&str[len], n-len,
"Buffer\n");
@@ -63,16 +56,15 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
" pool %" PRIu64 "\n",
odp_pool_to_u64(hdr->pool_hdl));
len += snprintf(&str[len], n-len,
- " addr %p\n", hdr->addr);
+ " addr %p\n", hdr->seg[0].data);
len += snprintf(&str[len], n-len,
- " size %" PRIu32 "\n", hdr->size);
+ " size %" PRIu32 "\n", hdr->size);
len += snprintf(&str[len], n-len,
- " type %i\n", hdr->type);
+ " type %i\n", hdr->type);
return len;
}
-
void odp_buffer_print(odp_buffer_t buf)
{
int max_len = 512;
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 82760e8e2..50a7e5473 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -16,7 +16,6 @@
#include <odp_classification_datamodel.h>
#include <odp_classification_inlines.h>
#include <odp_classification_internal.h>
-#include <odp_pool_internal.h>
#include <odp/api/shared_memory.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -159,7 +158,6 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
{
int i, j;
queue_entry_t *queue;
- pool_entry_t *pool;
odp_cls_drop_t drop_policy;
/* Packets are dropped if Queue or Pool is invalid*/
@@ -168,25 +166,25 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
else
queue = queue_to_qentry(param->queue);
- if (param->pool == ODP_POOL_INVALID)
- pool = NULL;
- else
- pool = odp_pool_to_entry(param->pool);
-
drop_policy = param->drop_policy;
for (i = 0; i < ODP_COS_MAX_ENTRY; i++) {
LOCK(&cos_tbl->cos_entry[i].s.lock);
if (0 == cos_tbl->cos_entry[i].s.valid) {
- strncpy(cos_tbl->cos_entry[i].s.name, name,
- ODP_COS_NAME_LEN - 1);
- cos_tbl->cos_entry[i].s.name[ODP_COS_NAME_LEN - 1] = 0;
+ char *cos_name = cos_tbl->cos_entry[i].s.name;
+
+ if (name == NULL) {
+ cos_name[0] = 0;
+ } else {
+ strncpy(cos_name, name, ODP_COS_NAME_LEN - 1);
+ cos_name[ODP_COS_NAME_LEN - 1] = 0;
+ }
for (j = 0; j < ODP_PMR_PER_COS_MAX; j++) {
cos_tbl->cos_entry[i].s.pmr[j] = NULL;
cos_tbl->cos_entry[i].s.linked_cos[j] = NULL;
}
cos_tbl->cos_entry[i].s.queue = queue;
- cos_tbl->cos_entry[i].s.pool = pool;
+ cos_tbl->cos_entry[i].s.pool = param->pool;
cos_tbl->cos_entry[i].s.flow_set = 0;
cos_tbl->cos_entry[i].s.headroom = 0;
cos_tbl->cos_entry[i].s.valid = 1;
@@ -550,7 +548,7 @@ odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms,
return id;
}
-int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id)
+int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool)
{
cos_t *cos;
@@ -560,10 +558,7 @@ int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id)
return -1;
}
- if (pool_id == ODP_POOL_INVALID)
- cos->s.pool = NULL;
- else
- cos->s.pool = odp_pool_to_entry(pool_id);
+ cos->s.pool = pool;
return 0;
}
@@ -578,10 +573,7 @@ odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id)
return ODP_POOL_INVALID;
}
- if (!cos->s.pool)
- return ODP_POOL_INVALID;
-
- return cos->s.pool->s.pool_hdl;
+ return cos->s.pool;
}
int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr)
@@ -827,10 +819,10 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
if (cos == NULL)
return -EINVAL;
- if (cos->s.queue == NULL || cos->s.pool == NULL)
+ if (cos->s.queue == NULL || cos->s.pool == ODP_POOL_INVALID)
return -EFAULT;
- *pool = cos->s.pool->s.pool_hdl;
+ *pool = cos->s.pool;
pkt_hdr->p.input_flags.dst_queue = 1;
pkt_hdr->dst_queue = cos->s.queue->s.handle;
diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c
index 9e09d42cd..4f17fd66c 100644
--- a/platform/linux-generic/odp_crypto.c
+++ b/platform/linux-generic/odp_crypto.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp_posix_extensions.h>
#include <odp/api/crypto.h>
#include <odp_internal.h>
#include <odp/api/atomic.h>
@@ -19,6 +20,7 @@
#include <odp_packet_internal.h>
#include <string.h>
+#include <stdlib.h>
#include <openssl/des.h>
#include <openssl/rand.h>
@@ -27,10 +29,42 @@
#define MAX_SESSIONS 32
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_des[] = {
+{.key_len = 24, .iv_len = 8} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_trides_cbc[] = {
+{.key_len = 24, .iv_len = 8} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_cbc[] = {
+{.key_len = 16, .iv_len = 16} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_gcm[] = {
+{.key_len = 16, .iv_len = 12} };
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_md5_hmac[] = {
+{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha256_hmac[] = {
+{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_gcm[] = {
+{.digest_len = 16, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
+
typedef struct odp_crypto_global_s odp_crypto_global_t;
struct odp_crypto_global_s {
odp_spinlock_t lock;
+ odp_ticketlock_t **openssl_lock;
odp_crypto_generic_session_t *free;
odp_crypto_generic_session_t sessions[0];
};
@@ -40,7 +74,9 @@ static odp_crypto_global_t *global;
static
odp_crypto_generic_op_result_t *get_op_result_from_event(odp_event_t ev)
{
- return &(odp_packet_hdr(odp_packet_from_event(ev))->op_result);
+ odp_packet_hdr_t *hdr = odp_packet_hdr(odp_packet_from_event(ev));
+
+ return &hdr->op_result;
}
static
@@ -67,24 +103,24 @@ void free_session(odp_crypto_generic_session_t *session)
}
static odp_crypto_alg_err_t
-null_crypto_routine(odp_crypto_op_params_t *params ODP_UNUSED,
+null_crypto_routine(odp_crypto_op_param_t *param ODP_UNUSED,
odp_crypto_generic_session_t *session ODP_UNUSED)
{
return ODP_CRYPTO_ALG_ERR_NONE;
}
static
-odp_crypto_alg_err_t md5_gen(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t md5_gen(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
+ uint8_t *data = odp_packet_data(param->out_pkt);
uint8_t *icv = data;
- uint32_t len = params->auth_range.length;
+ uint32_t len = param->auth_range.length;
uint8_t hash[EVP_MAX_MD_SIZE];
/* Adjust pointer for beginning of area to auth */
- data += params->auth_range.offset;
- icv += params->hash_result_offset;
+ data += param->auth_range.offset;
+ icv += param->hash_result_offset;
/* Hash it */
HMAC(EVP_md5(),
@@ -102,19 +138,19 @@ odp_crypto_alg_err_t md5_gen(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t md5_check(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t md5_check(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
+ uint8_t *data = odp_packet_data(param->out_pkt);
uint8_t *icv = data;
- uint32_t len = params->auth_range.length;
+ uint32_t len = param->auth_range.length;
uint32_t bytes = session->auth.data.md5.bytes;
uint8_t hash_in[EVP_MAX_MD_SIZE];
uint8_t hash_out[EVP_MAX_MD_SIZE];
/* Adjust pointer for beginning of area to auth */
- data += params->auth_range.offset;
- icv += params->hash_result_offset;
+ data += param->auth_range.offset;
+ icv += param->hash_result_offset;
/* Copy current value out and clear it before authentication */
memset(hash_in, 0, sizeof(hash_in));
@@ -140,17 +176,17 @@ odp_crypto_alg_err_t md5_check(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t sha256_gen(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t sha256_gen(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
+ uint8_t *data = odp_packet_data(param->out_pkt);
uint8_t *icv = data;
- uint32_t len = params->auth_range.length;
+ uint32_t len = param->auth_range.length;
uint8_t hash[EVP_MAX_MD_SIZE];
/* Adjust pointer for beginning of area to auth */
- data += params->auth_range.offset;
- icv += params->hash_result_offset;
+ data += param->auth_range.offset;
+ icv += param->hash_result_offset;
/* Hash it */
HMAC(EVP_sha256(),
@@ -168,19 +204,19 @@ odp_crypto_alg_err_t sha256_gen(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t sha256_check(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t sha256_check(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
+ uint8_t *data = odp_packet_data(param->out_pkt);
uint8_t *icv = data;
- uint32_t len = params->auth_range.length;
+ uint32_t len = param->auth_range.length;
uint32_t bytes = session->auth.data.sha256.bytes;
uint8_t hash_in[EVP_MAX_MD_SIZE];
uint8_t hash_out[EVP_MAX_MD_SIZE];
/* Adjust pointer for beginning of area to auth */
- data += params->auth_range.offset;
- icv += params->hash_result_offset;
+ data += param->auth_range.offset;
+ icv += param->hash_result_offset;
/* Copy current value out and clear it before authentication */
memset(hash_in, 0, sizeof(hash_in));
@@ -206,18 +242,18 @@ odp_crypto_alg_err_t sha256_check(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t aes_encrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t aes_encrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t len = params->cipher_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t len = param->cipher_range.length;
unsigned char iv_enc[AES_BLOCK_SIZE];
void *iv_ptr;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
@@ -229,7 +265,7 @@ odp_crypto_alg_err_t aes_encrypt(odp_crypto_op_params_t *params,
memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
/* Adjust pointer for beginning of area to cipher */
- data += params->cipher_range.offset;
+ data += param->cipher_range.offset;
/* Encrypt it */
AES_cbc_encrypt(data, data, len, &session->cipher.data.aes.key,
iv_enc, AES_ENCRYPT);
@@ -238,18 +274,18 @@ odp_crypto_alg_err_t aes_encrypt(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t aes_decrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t aes_decrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t len = params->cipher_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t len = param->cipher_range.length;
unsigned char iv_enc[AES_BLOCK_SIZE];
void *iv_ptr;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
@@ -261,7 +297,7 @@ odp_crypto_alg_err_t aes_decrypt(odp_crypto_op_params_t *params,
memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
/* Adjust pointer for beginning of area to cipher */
- data += params->cipher_range.offset;
+ data += param->cipher_range.offset;
/* Encrypt it */
AES_cbc_encrypt(data, data, len, &session->cipher.data.aes.key,
iv_enc, AES_DECRYPT);
@@ -269,22 +305,20 @@ odp_crypto_alg_err_t aes_decrypt(odp_crypto_op_params_t *params,
return ODP_CRYPTO_ALG_ERR_NONE;
}
-static
-int process_aes_params(odp_crypto_generic_session_t *session,
- odp_crypto_session_params_t *params)
+static int process_aes_param(odp_crypto_generic_session_t *session)
{
/* Verify IV len is either 0 or 16 */
- if (!((0 == params->iv.length) || (16 == params->iv.length)))
+ if (!((0 == session->p.iv.length) || (16 == session->p.iv.length)))
return -1;
/* Set function */
- if (ODP_CRYPTO_OP_ENCODE == params->op) {
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
session->cipher.func = aes_encrypt;
- AES_set_encrypt_key(params->cipher_key.data, 128,
+ AES_set_encrypt_key(session->p.cipher_key.data, 128,
&session->cipher.data.aes.key);
} else {
session->cipher.func = aes_decrypt;
- AES_set_decrypt_key(params->cipher_key.data, 128,
+ AES_set_decrypt_key(session->p.cipher_key.data, 128,
&session->cipher.data.aes.key);
}
@@ -292,30 +326,30 @@ int process_aes_params(odp_crypto_generic_session_t *session,
}
static
-odp_crypto_alg_err_t aes_gcm_encrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t aes_gcm_encrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t plain_len = params->cipher_range.length;
- uint8_t *aad_head = data + params->auth_range.offset;
- uint8_t *aad_tail = data + params->cipher_range.offset +
- params->cipher_range.length;
- uint32_t auth_len = params->auth_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t plain_len = param->cipher_range.length;
+ uint8_t *aad_head = data + param->auth_range.offset;
+ uint8_t *aad_tail = data + param->cipher_range.offset +
+ param->cipher_range.length;
+ uint32_t auth_len = param->auth_range.length;
unsigned char iv_enc[AES_BLOCK_SIZE];
void *iv_ptr;
- uint8_t *tag = data + params->hash_result_offset;
+ uint8_t *tag = data + param->hash_result_offset;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
/* All cipher data must be part of the authentication */
- if (params->auth_range.offset > params->cipher_range.offset ||
- params->auth_range.offset + auth_len <
- params->cipher_range.offset + plain_len)
+ if (param->auth_range.offset > param->cipher_range.offset ||
+ param->auth_range.offset + auth_len <
+ param->cipher_range.offset + plain_len)
return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
/*
@@ -326,7 +360,7 @@ odp_crypto_alg_err_t aes_gcm_encrypt(odp_crypto_op_params_t *params,
memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
/* Adjust pointer for beginning of area to cipher/auth */
- uint8_t *plaindata = data + params->cipher_range.offset;
+ uint8_t *plaindata = data + param->cipher_range.offset;
/* Encrypt it */
EVP_CIPHER_CTX *ctx = session->cipher.data.aes_gcm.ctx;
@@ -357,30 +391,30 @@ odp_crypto_alg_err_t aes_gcm_encrypt(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t aes_gcm_decrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t aes_gcm_decrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t cipher_len = params->cipher_range.length;
- uint8_t *aad_head = data + params->auth_range.offset;
- uint8_t *aad_tail = data + params->cipher_range.offset +
- params->cipher_range.length;
- uint32_t auth_len = params->auth_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t cipher_len = param->cipher_range.length;
+ uint8_t *aad_head = data + param->auth_range.offset;
+ uint8_t *aad_tail = data + param->cipher_range.offset +
+ param->cipher_range.length;
+ uint32_t auth_len = param->auth_range.length;
unsigned char iv_enc[AES_BLOCK_SIZE];
void *iv_ptr;
- uint8_t *tag = data + params->hash_result_offset;
+ uint8_t *tag = data + param->hash_result_offset;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
/* All cipher data must be part of the authentication */
- if (params->auth_range.offset > params->cipher_range.offset ||
- params->auth_range.offset + auth_len <
- params->cipher_range.offset + cipher_len)
+ if (param->auth_range.offset > param->cipher_range.offset ||
+ param->auth_range.offset + auth_len <
+ param->cipher_range.offset + cipher_len)
return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
/*
@@ -391,7 +425,7 @@ odp_crypto_alg_err_t aes_gcm_decrypt(odp_crypto_op_params_t *params,
memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
/* Adjust pointer for beginning of area to cipher/auth */
- uint8_t *cipherdata = data + params->cipher_range.offset;
+ uint8_t *cipherdata = data + param->cipher_range.offset;
/* Encrypt it */
EVP_CIPHER_CTX *ctx = session->cipher.data.aes_gcm.ctx;
int plain_len = 0;
@@ -422,19 +456,17 @@ odp_crypto_alg_err_t aes_gcm_decrypt(odp_crypto_op_params_t *params,
return ODP_CRYPTO_ALG_ERR_NONE;
}
-static
-int process_aes_gcm_params(odp_crypto_generic_session_t *session,
- odp_crypto_session_params_t *params)
+static int process_aes_gcm_param(odp_crypto_generic_session_t *session)
{
/* Verify Key len is 16 */
- if (params->cipher_key.length != 16)
+ if (session->p.cipher_key.length != 16)
return -1;
/* Set function */
EVP_CIPHER_CTX *ctx =
session->cipher.data.aes_gcm.ctx = EVP_CIPHER_CTX_new();
- if (ODP_CRYPTO_OP_ENCODE == params->op) {
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
session->cipher.func = aes_gcm_encrypt;
EVP_EncryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, NULL, NULL);
} else {
@@ -443,31 +475,31 @@ int process_aes_gcm_params(odp_crypto_generic_session_t *session,
}
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
- params->iv.length, NULL);
- if (ODP_CRYPTO_OP_ENCODE == params->op) {
+ session->p.iv.length, NULL);
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
EVP_EncryptInit_ex(ctx, NULL, NULL,
- params->cipher_key.data, NULL);
+ session->p.cipher_key.data, NULL);
} else {
EVP_DecryptInit_ex(ctx, NULL, NULL,
- params->cipher_key.data, NULL);
+ session->p.cipher_key.data, NULL);
}
return 0;
}
static
-odp_crypto_alg_err_t des_encrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t des_encrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t len = params->cipher_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t len = param->cipher_range.length;
DES_cblock iv;
void *iv_ptr;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
@@ -479,7 +511,7 @@ odp_crypto_alg_err_t des_encrypt(odp_crypto_op_params_t *params,
memcpy(iv, iv_ptr, sizeof(iv));
/* Adjust pointer for beginning of area to cipher */
- data += params->cipher_range.offset;
+ data += param->cipher_range.offset;
/* Encrypt it */
DES_ede3_cbc_encrypt(data,
data,
@@ -494,18 +526,18 @@ odp_crypto_alg_err_t des_encrypt(odp_crypto_op_params_t *params,
}
static
-odp_crypto_alg_err_t des_decrypt(odp_crypto_op_params_t *params,
+odp_crypto_alg_err_t des_decrypt(odp_crypto_op_param_t *param,
odp_crypto_generic_session_t *session)
{
- uint8_t *data = odp_packet_data(params->out_pkt);
- uint32_t len = params->cipher_range.length;
+ uint8_t *data = odp_packet_data(param->out_pkt);
+ uint32_t len = param->cipher_range.length;
DES_cblock iv;
void *iv_ptr;
- if (params->override_iv_ptr)
- iv_ptr = params->override_iv_ptr;
- else if (session->cipher.iv.data)
- iv_ptr = session->cipher.iv.data;
+ if (param->override_iv_ptr)
+ iv_ptr = param->override_iv_ptr;
+ else if (session->p.iv.data)
+ iv_ptr = session->cipher.iv_data;
else
return ODP_CRYPTO_ALG_ERR_IV_INVALID;
@@ -517,7 +549,7 @@ odp_crypto_alg_err_t des_decrypt(odp_crypto_op_params_t *params,
memcpy(iv, iv_ptr, sizeof(iv));
/* Adjust pointer for beginning of area to cipher */
- data += params->cipher_range.offset;
+ data += param->cipher_range.offset;
/* Decrypt it */
DES_ede3_cbc_encrypt(data,
@@ -532,38 +564,34 @@ odp_crypto_alg_err_t des_decrypt(odp_crypto_op_params_t *params,
return ODP_CRYPTO_ALG_ERR_NONE;
}
-static
-int process_des_params(odp_crypto_generic_session_t *session,
- odp_crypto_session_params_t *params)
+static int process_des_param(odp_crypto_generic_session_t *session)
{
/* Verify IV len is either 0 or 8 */
- if (!((0 == params->iv.length) || (8 == params->iv.length)))
+ if (!((0 == session->p.iv.length) || (8 == session->p.iv.length)))
return -1;
/* Set function */
- if (ODP_CRYPTO_OP_ENCODE == params->op)
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
session->cipher.func = des_encrypt;
else
session->cipher.func = des_decrypt;
/* Convert keys */
- DES_set_key((DES_cblock *)&params->cipher_key.data[0],
+ DES_set_key((DES_cblock *)&session->p.cipher_key.data[0],
&session->cipher.data.des.ks1);
- DES_set_key((DES_cblock *)&params->cipher_key.data[8],
+ DES_set_key((DES_cblock *)&session->p.cipher_key.data[8],
&session->cipher.data.des.ks2);
- DES_set_key((DES_cblock *)&params->cipher_key.data[16],
+ DES_set_key((DES_cblock *)&session->p.cipher_key.data[16],
&session->cipher.data.des.ks3);
return 0;
}
-static
-int process_md5_params(odp_crypto_generic_session_t *session,
- odp_crypto_session_params_t *params,
- uint32_t bits)
+static int process_md5_param(odp_crypto_generic_session_t *session,
+ uint32_t bits)
{
/* Set function */
- if (ODP_CRYPTO_OP_ENCODE == params->op)
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
session->auth.func = md5_gen;
else
session->auth.func = md5_check;
@@ -572,18 +600,16 @@ int process_md5_params(odp_crypto_generic_session_t *session,
session->auth.data.md5.bytes = bits / 8;
/* Convert keys */
- memcpy(session->auth.data.md5.key, params->auth_key.data, 16);
+ memcpy(session->auth.data.md5.key, session->p.auth_key.data, 16);
return 0;
}
-static
-int process_sha256_params(odp_crypto_generic_session_t *session,
- odp_crypto_session_params_t *params,
- uint32_t bits)
+static int process_sha256_param(odp_crypto_generic_session_t *session,
+ uint32_t bits)
{
/* Set function */
- if (ODP_CRYPTO_OP_ENCODE == params->op)
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
session->auth.func = sha256_gen;
else
session->auth.func = sha256_check;
@@ -592,7 +618,7 @@ int process_sha256_params(odp_crypto_generic_session_t *session,
session->auth.data.sha256.bytes = bits / 8;
/* Convert keys */
- memcpy(session->auth.data.sha256.key, params->auth_key.data, 32);
+ memcpy(session->auth.data.sha256.key, session->p.auth_key.data, 32);
return 0;
}
@@ -605,24 +631,108 @@ int odp_crypto_capability(odp_crypto_capability_t *capa)
/* Initialize crypto capability structure */
memset(capa, 0, sizeof(odp_crypto_capability_t));
- capa->ciphers.bit.null = 1;
- capa->ciphers.bit.des = 1;
- capa->ciphers.bit.trides_cbc = 1;
- capa->ciphers.bit.aes128_cbc = 1;
- capa->ciphers.bit.aes128_gcm = 1;
+ capa->ciphers.bit.null = 1;
+ capa->ciphers.bit.des = 1;
+ capa->ciphers.bit.trides_cbc = 1;
+ capa->ciphers.bit.aes_cbc = 1;
+ capa->ciphers.bit.aes_gcm = 1;
+
+ capa->auths.bit.null = 1;
+ capa->auths.bit.md5_hmac = 1;
+ capa->auths.bit.sha256_hmac = 1;
+ capa->auths.bit.aes_gcm = 1;
- capa->auths.bit.null = 1;
- capa->auths.bit.md5_96 = 1;
- capa->auths.bit.sha256_128 = 1;
- capa->auths.bit.aes128_gcm = 1;
+ /* Deprecated */
+ capa->ciphers.bit.aes128_cbc = 1;
+ capa->ciphers.bit.aes128_gcm = 1;
+ capa->auths.bit.md5_96 = 1;
+ capa->auths.bit.sha256_128 = 1;
+ capa->auths.bit.aes128_gcm = 1;
capa->max_sessions = MAX_SESSIONS;
return 0;
}
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = NULL;
+ num = 0;
+ break;
+ case ODP_CIPHER_ALG_DES:
+ src = cipher_capa_des;
+ num = sizeof(cipher_capa_des) / size;
+ break;
+ case ODP_CIPHER_ALG_3DES_CBC:
+ src = cipher_capa_trides_cbc;
+ num = sizeof(cipher_capa_trides_cbc) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ src = cipher_capa_aes_cbc;
+ num = sizeof(cipher_capa_aes_cbc) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ src = cipher_capa_aes_gcm;
+ num = sizeof(cipher_capa_aes_gcm) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = NULL;
+ num = 0;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ src = auth_capa_md5_hmac;
+ num = sizeof(auth_capa_md5_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ src = auth_capa_sha256_hmac;
+ num = sizeof(auth_capa_sha256_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ src = auth_capa_aes_gcm;
+ num = sizeof(auth_capa_aes_gcm) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
int
-odp_crypto_session_create(odp_crypto_session_params_t *params,
+odp_crypto_session_create(odp_crypto_session_param_t *param,
odp_crypto_session_t *session_out,
odp_crypto_ses_create_err_t *status)
{
@@ -639,42 +749,51 @@ odp_crypto_session_create(odp_crypto_session_params_t *params,
return -1;
}
+ /* Copy parameters */
+ session->p = *param;
+
+ /* Copy IV data */
+ if (session->p.iv.data) {
+ if (session->p.iv.length > MAX_IV_LEN) {
+ ODP_DBG("Maximum IV length exceeded\n");
+ return -1;
+ }
+
+ memcpy(session->cipher.iv_data, session->p.iv.data,
+ session->p.iv.length);
+ }
+
/* Derive order */
- if (ODP_CRYPTO_OP_ENCODE == params->op)
- session->do_cipher_first = params->auth_cipher_text;
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ session->do_cipher_first = param->auth_cipher_text;
else
- session->do_cipher_first = !params->auth_cipher_text;
-
- /* Copy stuff over */
- session->op = params->op;
- session->compl_queue = params->compl_queue;
- session->cipher.alg = params->cipher_alg;
- session->cipher.iv.data = params->iv.data;
- session->cipher.iv.len = params->iv.length;
- session->auth.alg = params->auth_alg;
- session->output_pool = params->output_pool;
+ session->do_cipher_first = !param->auth_cipher_text;
/* Process based on cipher */
- switch (params->cipher_alg) {
+ switch (param->cipher_alg) {
case ODP_CIPHER_ALG_NULL:
session->cipher.func = null_crypto_routine;
rc = 0;
break;
case ODP_CIPHER_ALG_DES:
case ODP_CIPHER_ALG_3DES_CBC:
- rc = process_des_params(session, params);
+ rc = process_des_param(session);
break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ /* deprecated */
case ODP_CIPHER_ALG_AES128_CBC:
- rc = process_aes_params(session, params);
+ rc = process_aes_param(session);
break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ /* deprecated */
case ODP_CIPHER_ALG_AES128_GCM:
/* AES-GCM requires to do both auth and
* cipher at the same time */
- if (params->auth_alg != ODP_AUTH_ALG_AES128_GCM) {
+ if (param->auth_alg == ODP_AUTH_ALG_AES_GCM ||
+ param->auth_alg == ODP_AUTH_ALG_AES128_GCM)
+ rc = process_aes_gcm_param(session);
+ else
rc = -1;
- break;
- }
- rc = process_aes_gcm_params(session, params);
break;
default:
rc = -1;
@@ -687,26 +806,33 @@ odp_crypto_session_create(odp_crypto_session_params_t *params,
}
/* Process based on auth */
- switch (params->auth_alg) {
+ switch (param->auth_alg) {
case ODP_AUTH_ALG_NULL:
session->auth.func = null_crypto_routine;
rc = 0;
break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ /* deprecated */
case ODP_AUTH_ALG_MD5_96:
- rc = process_md5_params(session, params, 96);
+ rc = process_md5_param(session, 96);
break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ /* deprecated */
case ODP_AUTH_ALG_SHA256_128:
- rc = process_sha256_params(session, params, 128);
+ rc = process_sha256_param(session, 128);
break;
+ case ODP_AUTH_ALG_AES_GCM:
+ /* deprecated */
case ODP_AUTH_ALG_AES128_GCM:
/* AES-GCM requires to do both auth and
* cipher at the same time */
- if (params->cipher_alg != ODP_CIPHER_ALG_AES128_GCM) {
+ if (param->cipher_alg == ODP_CIPHER_ALG_AES_GCM ||
+ param->cipher_alg == ODP_CIPHER_ALG_AES128_GCM) {
+ session->auth.func = null_crypto_routine;
+ rc = 0;
+ } else {
rc = -1;
- break;
}
- session->auth.func = null_crypto_routine;
- rc = 0;
break;
default:
rc = -1;
@@ -728,7 +854,8 @@ int odp_crypto_session_destroy(odp_crypto_session_t session)
odp_crypto_generic_session_t *generic;
generic = (odp_crypto_generic_session_t *)(intptr_t)session;
- if (generic->cipher.alg == ODP_CIPHER_ALG_AES128_GCM)
+ if (generic->p.cipher_alg == ODP_CIPHER_ALG_AES128_GCM ||
+ generic->p.cipher_alg == ODP_CIPHER_ALG_AES_GCM)
EVP_CIPHER_CTX_free(generic->cipher.data.aes_gcm.ctx);
memset(generic, 0, sizeof(*generic));
free_session(generic);
@@ -736,7 +863,7 @@ int odp_crypto_session_destroy(odp_crypto_session_t session)
}
int
-odp_crypto_operation(odp_crypto_op_params_t *params,
+odp_crypto_operation(odp_crypto_op_param_t *param,
odp_bool_t *posted,
odp_crypto_op_result_t *result)
{
@@ -745,38 +872,42 @@ odp_crypto_operation(odp_crypto_op_params_t *params,
odp_crypto_generic_session_t *session;
odp_crypto_op_result_t local_result;
- session = (odp_crypto_generic_session_t *)(intptr_t)params->session;
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
/* Resolve output buffer */
- if (ODP_PACKET_INVALID == params->out_pkt &&
- ODP_POOL_INVALID != session->output_pool)
- params->out_pkt = odp_packet_alloc(session->output_pool,
- odp_packet_len(params->pkt));
- if (params->pkt != params->out_pkt) {
- if (odp_unlikely(ODP_PACKET_INVALID == params->out_pkt))
- ODP_ABORT();
- (void)odp_packet_copy_from_pkt(params->out_pkt,
+ if (ODP_PACKET_INVALID == param->out_pkt &&
+ ODP_POOL_INVALID != session->p.output_pool)
+ param->out_pkt = odp_packet_alloc(session->p.output_pool,
+ odp_packet_len(param->pkt));
+
+ if (odp_unlikely(ODP_PACKET_INVALID == param->out_pkt)) {
+ ODP_DBG("Alloc failed.\n");
+ return -1;
+ }
+
+ if (param->pkt != param->out_pkt) {
+ (void)odp_packet_copy_from_pkt(param->out_pkt,
0,
- params->pkt,
+ param->pkt,
0,
- odp_packet_len(params->pkt));
- _odp_packet_copy_md_to_packet(params->pkt, params->out_pkt);
- odp_packet_free(params->pkt);
- params->pkt = ODP_PACKET_INVALID;
+ odp_packet_len(param->pkt));
+ _odp_packet_copy_md_to_packet(param->pkt, param->out_pkt);
+ odp_packet_free(param->pkt);
+ param->pkt = ODP_PACKET_INVALID;
}
/* Invoke the functions */
if (session->do_cipher_first) {
- rc_cipher = session->cipher.func(params, session);
- rc_auth = session->auth.func(params, session);
+ rc_cipher = session->cipher.func(param, session);
+ rc_auth = session->auth.func(param, session);
} else {
- rc_auth = session->auth.func(params, session);
- rc_cipher = session->cipher.func(params, session);
+ rc_auth = session->auth.func(param, session);
+ rc_cipher = session->cipher.func(param, session);
}
/* Fill in result */
- local_result.ctx = params->ctx;
- local_result.pkt = params->out_pkt;
+ local_result.ctx = param->ctx;
+ local_result.pkt = param->out_pkt;
local_result.cipher_status.alg_err = rc_cipher;
local_result.cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
local_result.auth_status.alg_err = rc_auth;
@@ -786,12 +917,12 @@ odp_crypto_operation(odp_crypto_op_params_t *params,
(rc_auth == ODP_CRYPTO_ALG_ERR_NONE);
/* If specified during creation post event to completion queue */
- if (ODP_QUEUE_INVALID != session->compl_queue) {
+ if (ODP_QUEUE_INVALID != session->p.compl_queue) {
odp_event_t completion_event;
odp_crypto_generic_op_result_t *op_result;
/* Linux generic will always use packet for completion event */
- completion_event = odp_packet_to_event(params->out_pkt);
+ completion_event = odp_packet_to_event(param->out_pkt);
_odp_buffer_event_type_set(
odp_buffer_from_event(completion_event),
ODP_EVENT_CRYPTO_COMPL);
@@ -799,7 +930,7 @@ odp_crypto_operation(odp_crypto_op_params_t *params,
op_result = get_op_result_from_event(completion_event);
op_result->magic = OP_RESULT_MAGIC;
op_result->result = local_result;
- if (odp_queue_enq(session->compl_queue, completion_event)) {
+ if (odp_queue_enq(session->p.compl_queue, completion_event)) {
odp_event_free(completion_event);
return -1;
}
@@ -818,16 +949,35 @@ odp_crypto_operation(odp_crypto_op_params_t *params,
return 0;
}
+static unsigned long openssl_thread_id(void)
+{
+ return (unsigned long)odp_thread_id();
+}
+
+static void openssl_lock(int mode, int n,
+ const char *file ODP_UNUSED,
+ int line ODP_UNUSED)
+{
+ if (mode & CRYPTO_LOCK)
+ odp_ticketlock_lock((odp_ticketlock_t *)
+ &global->openssl_lock[n]);
+ else
+ odp_ticketlock_unlock((odp_ticketlock_t *)
+ &global->openssl_lock[n]);
+}
+
int
odp_crypto_init_global(void)
{
size_t mem_size;
odp_shm_t shm;
int idx;
+ int nlocks = CRYPTO_num_locks();
/* Calculate the memory size we need */
mem_size = sizeof(*global);
mem_size += (MAX_SESSIONS * sizeof(odp_crypto_generic_session_t));
+ mem_size += nlocks * sizeof(odp_ticketlock_t);
/* Allocate our globally shared memory */
shm = odp_shm_reserve("crypto_pool", mem_size,
@@ -845,6 +995,18 @@ odp_crypto_init_global(void)
}
odp_spinlock_init(&global->lock);
+ if (nlocks > 0) {
+ global->openssl_lock =
+ (odp_ticketlock_t **)&global->sessions[MAX_SESSIONS];
+
+ for (idx = 0; idx < nlocks; idx++)
+ odp_ticketlock_init((odp_ticketlock_t *)
+ &global->openssl_lock[idx]);
+
+ CRYPTO_set_id_callback(openssl_thread_id);
+ CRYPTO_set_locking_callback(openssl_lock);
+ }
+
return 0;
}
@@ -862,6 +1024,9 @@ int odp_crypto_term_global(void)
rc = -1;
}
+ CRYPTO_set_locking_callback(NULL);
+ CRYPTO_set_id_callback(NULL);
+
ret = odp_shm_free(odp_shm_lookup("crypto_pool"));
if (ret < 0) {
ODP_ERR("shm free failed for crypto_pool\n");
@@ -871,12 +1036,48 @@ int odp_crypto_term_global(void)
return rc;
}
-int32_t
-odp_random_data(uint8_t *buf, int32_t len, odp_bool_t use_entropy ODP_UNUSED)
+odp_random_kind_t odp_random_max_kind(void)
{
- int32_t rc;
- rc = RAND_bytes(buf, len);
- return (1 == rc) ? len /*success*/: -1 /*failure*/;
+ return ODP_RANDOM_CRYPTO;
+}
+
+int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
+{
+ int rc;
+
+ switch (kind) {
+ case ODP_RANDOM_BASIC:
+ RAND_pseudo_bytes(buf, len);
+ return len;
+
+ case ODP_RANDOM_CRYPTO:
+ rc = RAND_bytes(buf, len);
+ return (1 == rc) ? (int)len /*success*/: -1 /*failure*/;
+
+ case ODP_RANDOM_TRUE:
+ default:
+ return -1;
+ }
+}
+
+int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
+{
+ union {
+ uint32_t rand_word;
+ uint8_t rand_byte[4];
+ } u;
+ uint32_t i = 0, j;
+ uint32_t seed32 = (*seed) & 0xffffffff;
+
+ while (i < len) {
+ u.rand_word = rand_r(&seed32);
+
+ for (j = 0; j < 4 && i < len; j++, i++)
+ *buf++ = u.rand_byte[j];
+ }
+
+ *seed = seed32;
+ return len;
}
odp_crypto_compl_t odp_crypto_compl_from_event(odp_event_t ev)
@@ -914,3 +1115,8 @@ odp_crypto_compl_free(odp_crypto_compl_t completion_event)
odp_buffer_from_event((odp_event_t)completion_event),
ODP_EVENT_PACKET);
}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 77f4f8afb..06c61435e 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -10,17 +10,69 @@
#include <odp_internal.h>
#include <odp_schedule_if.h>
#include <string.h>
+#include <stdio.h>
+#include <linux/limits.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#define _ODP_FILES_FMT "odp-%d-"
+#define _ODP_TMPDIR "/tmp"
struct odp_global_data_s odp_global_data;
+/* remove all files staring with "odp-<pid>" from a directory "dir" */
+static int cleanup_files(const char *dirpath, int odp_pid)
+{
+ struct dirent *e;
+ DIR *dir;
+ char prefix[PATH_MAX];
+ char *fullpath;
+ int d_len = strlen(dirpath);
+ int p_len;
+ int f_len;
+
+ dir = opendir(dirpath);
+ if (!dir) {
+ /* ok if the dir does not exist. no much to delete then! */
+ ODP_DBG("opendir failed for %s: %s\n",
+ dirpath, strerror(errno));
+ return 0;
+ }
+ snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_pid);
+ p_len = strlen(prefix);
+ while ((e = readdir(dir)) != NULL) {
+ if (strncmp(e->d_name, prefix, p_len) == 0) {
+ f_len = strlen(e->d_name);
+ fullpath = malloc(d_len + f_len + 2);
+ if (fullpath == NULL) {
+ closedir(dir);
+ return -1;
+ }
+ snprintf(fullpath, PATH_MAX, "%s/%s",
+ dirpath, e->d_name);
+ ODP_DBG("deleting obsolete file: %s\n", fullpath);
+ if (unlink(fullpath))
+ ODP_ERR("unlink failed for %s: %s\n",
+ fullpath, strerror(errno));
+ free(fullpath);
+ }
+ }
+ closedir(dir);
+
+ return 0;
+}
+
int odp_init_global(odp_instance_t *instance,
const odp_init_t *params,
- const odp_platform_init_t *platform_params)
+ const odp_platform_init_t *platform_params ODP_UNUSED)
{
+ char *hpdir;
+
memset(&odp_global_data, 0, sizeof(struct odp_global_data_s));
odp_global_data.main_pid = getpid();
- if (platform_params)
- odp_global_data.ipc_ns = platform_params->ipc_ns;
enum init_stage stage = NO_INIT;
odp_global_data.log_fn = odp_override_log;
@@ -33,6 +85,8 @@ int odp_init_global(odp_instance_t *instance,
odp_global_data.abort_fn = params->abort_fn;
}
+ cleanup_files(_ODP_TMPDIR, odp_global_data.main_pid);
+
if (odp_cpumask_init_global(params)) {
ODP_ERR("ODP cpumask init failed.\n");
goto init_failed;
@@ -49,13 +103,23 @@ int odp_init_global(odp_instance_t *instance,
ODP_ERR("ODP system_info init failed.\n");
goto init_failed;
}
+ hpdir = odp_global_data.hugepage_info.default_huge_page_dir;
+ /* cleanup obsolete huge page files, if any */
+ if (hpdir)
+ cleanup_files(hpdir, odp_global_data.main_pid);
stage = SYSINFO_INIT;
- if (odp_shm_init_global()) {
- ODP_ERR("ODP shm init failed.\n");
+ if (_odp_fdserver_init_global()) {
+ ODP_ERR("ODP fdserver init failed.\n");
+ goto init_failed;
+ }
+ stage = FDSERVER_INIT;
+
+ if (_odp_ishm_init_global()) {
+ ODP_ERR("ODP ishm init failed.\n");
goto init_failed;
}
- stage = SHM_INIT;
+ stage = ISHM_INIT;
if (odp_thread_init_global()) {
ODP_ERR("ODP thread init failed.\n");
@@ -210,9 +274,16 @@ int _odp_term_global(enum init_stage stage)
}
/* Fall through */
- case SHM_INIT:
- if (odp_shm_term_global()) {
- ODP_ERR("ODP shm term failed.\n");
+ case ISHM_INIT:
+ if (_odp_ishm_term_global()) {
+ ODP_ERR("ODP ishm term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case FDSERVER_INIT:
+ if (_odp_fdserver_term_global()) {
+ ODP_ERR("ODP fdserver term failed.\n");
rc = -1;
}
/* Fall through */
@@ -254,11 +325,11 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
goto init_fail;
}
- if (odp_shm_init_local()) {
- ODP_ERR("ODP shm local init failed.\n");
+ if (_odp_ishm_init_local()) {
+ ODP_ERR("ODP ishm local init failed.\n");
goto init_fail;
}
- stage = SHM_INIT;
+ stage = ISHM_INIT;
if (odp_thread_init_local(thr_type)) {
ODP_ERR("ODP thread local init failed.\n");
@@ -329,6 +400,13 @@ int _odp_term_local(enum init_stage stage)
}
/* Fall through */
+ case ISHM_INIT:
+ if (_odp_ishm_term_local()) {
+ ODP_ERR("ODP ishm local term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
default:
break;
}
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index c2b26fd0f..f632a513f 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -20,12 +20,176 @@
#include <stdio.h>
#include <inttypes.h>
-/*
- *
- * Alloc and free
- * ********************************************************
- *
- */
+/* Initial packet segment data length */
+#define BASE_LEN CONFIG_PACKET_MAX_SEG_LEN
+
+static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr)
+{
+ return (odp_packet_t)pkt_hdr->buf_hdr.handle.handle;
+}
+
+static inline odp_buffer_t buffer_handle(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->buf_hdr.handle.handle;
+}
+
+static inline uint32_t packet_seg_len(odp_packet_hdr_t *pkt_hdr,
+ uint32_t seg_idx)
+{
+ return pkt_hdr->buf_hdr.seg[seg_idx].len;
+}
+
+static inline void *packet_seg_data(odp_packet_hdr_t *pkt_hdr, uint32_t seg_idx)
+{
+ return pkt_hdr->buf_hdr.seg[seg_idx].data;
+}
+
+static inline int packet_last_seg(odp_packet_hdr_t *pkt_hdr)
+{
+ if (CONFIG_PACKET_MAX_SEGS == 1)
+ return 0;
+ else
+ return pkt_hdr->buf_hdr.segcount - 1;
+}
+
+static inline uint32_t packet_first_seg_len(odp_packet_hdr_t *pkt_hdr)
+{
+ return packet_seg_len(pkt_hdr, 0);
+}
+
+static inline uint32_t packet_last_seg_len(odp_packet_hdr_t *pkt_hdr)
+{
+ int last = packet_last_seg(pkt_hdr);
+
+ return packet_seg_len(pkt_hdr, last);
+}
+
+static inline void *packet_data(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->buf_hdr.seg[0].data;
+}
+
+static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr)
+{
+ int last = packet_last_seg(pkt_hdr);
+ uint32_t seg_len = pkt_hdr->buf_hdr.seg[last].len;
+
+ return pkt_hdr->buf_hdr.seg[last].data + seg_len;
+}
+
+static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_hdr, int seg)
+{
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
+ uint8_t *base = hdr->base_data;
+ uint8_t *head = pkt_hdr->buf_hdr.seg[seg].data;
+
+ return CONFIG_PACKET_HEADROOM + (head - base);
+}
+
+static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_hdr, int seg)
+{
+ uint32_t seg_len = pkt_hdr->buf_hdr.seg[seg].len;
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
+ uint8_t *tail = pkt_hdr->buf_hdr.seg[seg].data + seg_len;
+
+ return hdr->buf_end - tail;
+}
+
+static inline void push_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ pkt_hdr->headroom -= len;
+ pkt_hdr->frame_len += len;
+ pkt_hdr->buf_hdr.seg[0].data -= len;
+ pkt_hdr->buf_hdr.seg[0].len += len;
+}
+
+static inline void pull_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ pkt_hdr->headroom += len;
+ pkt_hdr->frame_len -= len;
+ pkt_hdr->buf_hdr.seg[0].data += len;
+ pkt_hdr->buf_hdr.seg[0].len -= len;
+}
+
+static inline void push_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ int last = packet_last_seg(pkt_hdr);
+
+ pkt_hdr->tailroom -= len;
+ pkt_hdr->frame_len += len;
+ pkt_hdr->buf_hdr.seg[last].len += len;
+}
+
+/* Copy all metadata for segmentation modification. Segment data and lengths
+ * are not copied. */
+static inline void packet_seg_copy_md(odp_packet_hdr_t *dst,
+ odp_packet_hdr_t *src)
+{
+ dst->p = src->p;
+
+ /* lengths are not copied:
+ * .frame_len
+ * .headroom
+ * .tailroom
+ */
+
+ dst->input = src->input;
+ dst->dst_queue = src->dst_queue;
+ dst->flow_hash = src->flow_hash;
+ dst->timestamp = src->timestamp;
+ dst->op_result = src->op_result;
+
+ /* buffer header side packet metadata */
+ dst->buf_hdr.buf_u64 = src->buf_hdr.buf_u64;
+ dst->buf_hdr.uarea_addr = src->buf_hdr.uarea_addr;
+ dst->buf_hdr.uarea_size = src->buf_hdr.uarea_size;
+
+ /* segmentation data is not copied:
+ * buf_hdr.seg[]
+ * buf_hdr.segcount
+ */
+}
+
+static inline void *packet_map(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset, uint32_t *seg_len, int *seg_idx)
+{
+ void *addr;
+ uint32_t len;
+ int seg = 0;
+ int seg_count = pkt_hdr->buf_hdr.segcount;
+
+ if (odp_unlikely(offset >= pkt_hdr->frame_len))
+ return NULL;
+
+ if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || seg_count == 1)) {
+ addr = pkt_hdr->buf_hdr.seg[0].data + offset;
+ len = pkt_hdr->buf_hdr.seg[0].len - offset;
+ } else {
+ int i;
+ uint32_t seg_start = 0, seg_end = 0;
+
+ for (i = 0; i < seg_count; i++) {
+ seg_end += pkt_hdr->buf_hdr.seg[i].len;
+
+ if (odp_likely(offset < seg_end))
+ break;
+
+ seg_start = seg_end;
+ }
+
+ addr = pkt_hdr->buf_hdr.seg[i].data + (offset - seg_start);
+ len = pkt_hdr->buf_hdr.seg[i].len - (offset - seg_start);
+ seg = i;
+ }
+
+ if (seg_len)
+ *seg_len = len;
+
+ if (seg_idx)
+ *seg_idx = seg;
+
+ return addr;
+}
static inline void packet_parse_disable(odp_packet_hdr_t *pkt_hdr)
{
@@ -48,11 +212,23 @@ void packet_parse_reset(odp_packet_hdr_t *pkt_hdr)
/**
* Initialize packet
*/
-static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr,
- size_t size, int parse)
+static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len,
+ int parse)
{
- pkt_hdr->p.parsed_layers = LAYER_NONE;
+ uint32_t seg_len;
+ int num = pkt_hdr->buf_hdr.segcount;
+
+ if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num == 1)) {
+ seg_len = len;
+ pkt_hdr->buf_hdr.seg[0].len = len;
+ } else {
+ seg_len = len - ((num - 1) * CONFIG_PACKET_MAX_SEG_LEN);
+
+ /* Last segment data length */
+ pkt_hdr->buf_hdr.seg[num - 1].len = seg_len;
+ }
+ pkt_hdr->p.parsed_layers = LAYER_NONE;
pkt_hdr->p.input_flags.all = 0;
pkt_hdr->p.output_flags.all = 0;
pkt_hdr->p.error_flags.all = 0;
@@ -70,115 +246,360 @@ static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr,
* Packet tailroom is rounded up to fill the last
* segment occupied by the allocated length.
*/
- pkt_hdr->frame_len = size;
- pkt_hdr->headroom = pool->s.headroom;
- pkt_hdr->tailroom =
- (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) -
- (pool->s.headroom + size);
+ pkt_hdr->frame_len = len;
+ pkt_hdr->headroom = CONFIG_PACKET_HEADROOM;
+ pkt_hdr->tailroom = CONFIG_PACKET_MAX_SEG_LEN - seg_len +
+ CONFIG_PACKET_TAILROOM;
pkt_hdr->input = ODP_PKTIO_INVALID;
}
-int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
- odp_packet_t pkt[], int max_num)
+static inline void init_segments(odp_packet_hdr_t *pkt_hdr[], int num)
{
- odp_packet_hdr_t *pkt_hdr;
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
- int num, i;
+ odp_packet_hdr_t *hdr;
+ int i;
+
+ /* First segment is the packet descriptor */
+ hdr = pkt_hdr[0];
+
+ hdr->buf_hdr.seg[0].data = hdr->buf_hdr.base_data;
+ hdr->buf_hdr.seg[0].len = BASE_LEN;
- num = buffer_alloc_multi(pool_hdl, len, (odp_buffer_t *)pkt, max_num);
+ /* Link segments */
+ if (CONFIG_PACKET_MAX_SEGS != 1) {
+ hdr->buf_hdr.segcount = num;
+
+ if (odp_unlikely(num > 1)) {
+ for (i = 1; i < num; i++) {
+ odp_buffer_hdr_t *buf_hdr;
+
+ buf_hdr = &pkt_hdr[i]->buf_hdr;
+ hdr->buf_hdr.seg[i].hdr = buf_hdr;
+ hdr->buf_hdr.seg[i].data = buf_hdr->base_data;
+ hdr->buf_hdr.seg[i].len = BASE_LEN;
+ }
+ }
+ }
+}
+
+/* Calculate the number of segments */
+static inline int num_segments(uint32_t len)
+{
+ uint32_t max_seg_len;
+ int num;
+
+ if (CONFIG_PACKET_MAX_SEGS == 1)
+ return 1;
+
+ num = 1;
+ max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+
+ if (odp_unlikely(len > max_seg_len)) {
+ num = len / max_seg_len;
+
+ if (odp_likely((num * max_seg_len) != len))
+ num += 1;
+ }
+
+ return num;
+}
+
+static inline void add_all_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from)
+{
+ int i;
+ int n = to->buf_hdr.segcount;
+ int num = from->buf_hdr.segcount;
for (i = 0; i < num; i++) {
- pkt_hdr = odp_packet_hdr(pkt[i]);
- packet_init(pool, pkt_hdr, len, 1 /* do parse */);
+ to->buf_hdr.seg[n + i].hdr = from->buf_hdr.seg[i].hdr;
+ to->buf_hdr.seg[n + i].data = from->buf_hdr.seg[i].data;
+ to->buf_hdr.seg[n + i].len = from->buf_hdr.seg[i].len;
+ }
+
+ to->buf_hdr.segcount = n + num;
+}
+
+static inline void copy_num_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from,
+ int first, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ to->buf_hdr.seg[i].hdr = from->buf_hdr.seg[first + i].hdr;
+ to->buf_hdr.seg[i].data = from->buf_hdr.seg[first + i].data;
+ to->buf_hdr.seg[i].len = from->buf_hdr.seg[first + i].len;
+ }
+
+ to->buf_hdr.segcount = num;
+}
+
+static inline odp_packet_hdr_t *alloc_segments(pool_t *pool, int num)
+{
+ odp_buffer_t buf[num];
+ odp_packet_hdr_t *pkt_hdr[num];
+ int ret;
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize)
- pull_tail_seg(pkt_hdr);
+ ret = buffer_alloc_multi(pool, buf, (odp_buffer_hdr_t **)pkt_hdr, num);
+ if (odp_unlikely(ret != num)) {
+ if (ret > 0)
+ buffer_free_multi(buf, ret);
+
+ return NULL;
+ }
+
+ init_segments(pkt_hdr, num);
+
+ return pkt_hdr[0];
+}
+
+static inline odp_packet_hdr_t *add_segments(odp_packet_hdr_t *pkt_hdr,
+ pool_t *pool, uint32_t len,
+ int num, int head)
+{
+ odp_packet_hdr_t *new_hdr;
+ uint32_t seg_len, offset;
+
+ new_hdr = alloc_segments(pool, num);
+
+ if (new_hdr == NULL)
+ return NULL;
+
+ seg_len = len - ((num - 1) * pool->max_seg_len);
+ offset = pool->max_seg_len - seg_len;
+
+ if (head) {
+ /* add into the head*/
+ add_all_segs(new_hdr, pkt_hdr);
+
+ /* adjust first segment length */
+ new_hdr->buf_hdr.seg[0].data += offset;
+ new_hdr->buf_hdr.seg[0].len = seg_len;
+
+ packet_seg_copy_md(new_hdr, pkt_hdr);
+ new_hdr->frame_len = pkt_hdr->frame_len + len;
+ new_hdr->headroom = pool->headroom + offset;
+ new_hdr->tailroom = pkt_hdr->tailroom;
+
+ pkt_hdr = new_hdr;
+ } else {
+ int last;
+
+ /* add into the tail */
+ add_all_segs(pkt_hdr, new_hdr);
+
+ /* adjust last segment length */
+ last = packet_last_seg(pkt_hdr);
+ pkt_hdr->buf_hdr.seg[last].len = seg_len;
+
+ pkt_hdr->frame_len += len;
+ pkt_hdr->tailroom = pool->tailroom + offset;
+ }
+
+ return pkt_hdr;
+}
+
+static inline void free_bufs(odp_packet_hdr_t *pkt_hdr, int first, int num)
+{
+ int i;
+ odp_buffer_t buf[num];
+
+ for (i = 0; i < num; i++)
+ buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[first + i].hdr);
+
+ buffer_free_multi(buf, num);
+}
+
+static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
+ int num, uint32_t free_len,
+ uint32_t pull_len, int head)
+{
+ int num_remain = pkt_hdr->buf_hdr.segcount - num;
+
+ if (head) {
+ odp_packet_hdr_t *new_hdr;
+ int i;
+ odp_buffer_t buf[num];
+
+ for (i = 0; i < num; i++)
+ buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[i].hdr);
+
+ /* First remaining segment is the new packet descriptor */
+ new_hdr = pkt_hdr->buf_hdr.seg[num].hdr;
+
+ copy_num_segs(new_hdr, pkt_hdr, num, num_remain);
+ packet_seg_copy_md(new_hdr, pkt_hdr);
+
+ /* Tailroom not changed */
+ new_hdr->tailroom = pkt_hdr->tailroom;
+ new_hdr->headroom = seg_headroom(new_hdr, 0);
+ new_hdr->frame_len = pkt_hdr->frame_len - free_len;
+
+ pull_head(new_hdr, pull_len);
+
+ pkt_hdr = new_hdr;
+
+ buffer_free_multi(buf, num);
+ } else {
+ /* Free last 'num' bufs */
+ free_bufs(pkt_hdr, num_remain, num);
+
+ /* Head segment remains, no need to copy or update majority
+ * of the metadata. */
+ pkt_hdr->buf_hdr.segcount = num_remain;
+ pkt_hdr->frame_len -= free_len;
+ pkt_hdr->tailroom = seg_tailroom(pkt_hdr, num_remain - 1);
+
+ pull_tail(pkt_hdr, pull_len);
+ }
+
+ return pkt_hdr;
+}
+
+static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt,
+ int num_seg, odp_packet_t *pkt, int parse)
+{
+ int num_buf, i;
+ int num = max_pkt;
+ int max_buf = max_pkt * num_seg;
+ odp_buffer_t buf[max_buf];
+ odp_packet_hdr_t *pkt_hdr[max_buf];
+
+ num_buf = buffer_alloc_multi(pool, buf, (odp_buffer_hdr_t **)pkt_hdr,
+ max_buf);
+
+ /* Failed to allocate all segments */
+ if (odp_unlikely(num_buf != max_buf)) {
+ int num_free;
+
+ num = num_buf / num_seg;
+ num_free = num_buf - (num * num_seg);
+
+ if (num_free > 0)
+ buffer_free_multi(&buf[num_buf - num_free], num_free);
+
+ if (num == 0)
+ return 0;
+ }
+
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *hdr;
+
+ /* First buffer is the packet descriptor */
+ pkt[i] = (odp_packet_t)buf[i * num_seg];
+ hdr = pkt_hdr[i * num_seg];
+ init_segments(&pkt_hdr[i * num_seg], num_seg);
+
+ packet_init(hdr, len, parse);
}
return num;
}
+int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
+ odp_packet_t pkt[], int max_num)
+{
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ int num, num_seg;
+
+ num_seg = num_segments(len);
+ num = packet_alloc(pool, len, max_num, num_seg, pkt, 1);
+
+ return num;
+}
+
odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
{
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
- size_t pkt_size = len ? len : pool->s.params.buf.size;
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
odp_packet_t pkt;
- odp_packet_hdr_t *pkt_hdr;
+ int num, num_seg;
- if (pool->s.params.type != ODP_POOL_PACKET) {
+ if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
__odp_errno = EINVAL;
return ODP_PACKET_INVALID;
}
- pkt = (odp_packet_t)buffer_alloc(pool_hdl, pkt_size);
- if (pkt == ODP_PACKET_INVALID)
+ if (odp_unlikely(len > pool->max_len))
return ODP_PACKET_INVALID;
- pkt_hdr = odp_packet_hdr(pkt);
- packet_init(pool, pkt_hdr, pkt_size, 0 /* do not parse */);
- if (len == 0)
- pull_tail(pkt_hdr, pkt_size);
+ num_seg = num_segments(len);
+ num = packet_alloc(pool, len, 1, num_seg, &pkt, 0);
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize)
- pull_tail_seg(pkt_hdr);
+ if (odp_unlikely(num == 0))
+ return ODP_PACKET_INVALID;
return pkt;
}
int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
- odp_packet_t pkt[], int num)
+ odp_packet_t pkt[], int max_num)
{
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
- size_t pkt_size = len ? len : pool->s.params.buf.size;
- int count, i;
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ int num, num_seg;
- if (pool->s.params.type != ODP_POOL_PACKET) {
+ if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
__odp_errno = EINVAL;
return -1;
}
- count = buffer_alloc_multi(pool_hdl, pkt_size,
- (odp_buffer_t *)pkt, num);
-
- for (i = 0; i < count; ++i) {
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]);
+ if (odp_unlikely(len > pool->max_len))
+ return -1;
- packet_init(pool, pkt_hdr, pkt_size, 0 /* do not parse */);
- if (len == 0)
- pull_tail(pkt_hdr, pkt_size);
+ num_seg = num_segments(len);
+ num = packet_alloc(pool, len, max_num, num_seg, pkt, 0);
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize)
- pull_tail_seg(pkt_hdr);
- }
-
- return count;
+ return num;
}
void odp_packet_free(odp_packet_t pkt)
{
- uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt);
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ int num_seg = pkt_hdr->buf_hdr.segcount;
- buffer_free(pool_id, (odp_buffer_t)pkt);
+ if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num_seg == 1))
+ buffer_free_multi((odp_buffer_t *)&pkt, 1);
+ else
+ free_bufs(pkt_hdr, 0, num_seg);
}
void odp_packet_free_multi(const odp_packet_t pkt[], int num)
{
- uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt[0]);
+ if (CONFIG_PACKET_MAX_SEGS == 1) {
+ buffer_free_multi((const odp_buffer_t * const)pkt, num);
+ } else {
+ odp_buffer_t buf[num * CONFIG_PACKET_MAX_SEGS];
+ int i, j;
+ int bufs = 0;
+
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]);
+ int num_seg = pkt_hdr->buf_hdr.segcount;
+ odp_buffer_hdr_t *buf_hdr = &pkt_hdr->buf_hdr;
+
+ buf[bufs] = (odp_buffer_t)pkt[i];
+ bufs++;
+
+ if (odp_likely(num_seg == 1))
+ continue;
+
+ for (j = 1; j < num_seg; j++) {
+ buf[bufs] = buffer_handle(buf_hdr->seg[j].hdr);
+ bufs++;
+ }
+ }
- buffer_free_multi(pool_id, (const odp_buffer_t * const)pkt, num);
+ buffer_free_multi(buf, bufs);
+ }
}
int odp_packet_reset(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
- pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr);
- uint32_t totsize = pool->s.headroom + len + pool->s.tailroom;
+ pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
- if (totsize > pkt_hdr->buf_hdr.size)
+ if (len > pool->headroom + pool->data_size + pool->tailroom)
return -1;
- packet_init(pool, pkt_hdr, len, 0);
+ packet_init(pkt_hdr, len, 0);
return 0;
}
@@ -214,7 +635,7 @@ void *odp_packet_head(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return buffer_map(&pkt_hdr->buf_hdr, 0, NULL, 0);
+ return pkt_hdr->buf_hdr.seg[0].data - pkt_hdr->headroom;
}
uint32_t odp_packet_buf_len(odp_packet_t pkt)
@@ -228,17 +649,14 @@ void *odp_packet_data(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return packet_map(pkt_hdr, 0, NULL);
+ return packet_data(pkt_hdr);
}
uint32_t odp_packet_seg_len(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- uint32_t seglen;
- /* Call returns length of 1st data segment */
- packet_map(pkt_hdr, 0, &seglen);
- return seglen;
+ return packet_first_seg_len(pkt_hdr);
}
uint32_t odp_packet_len(odp_packet_t pkt)
@@ -260,7 +678,7 @@ void *odp_packet_tail(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return packet_map(pkt_hdr, pkt_hdr->frame_len, NULL);
+ return packet_tail(pkt_hdr);
}
void *odp_packet_push_head(odp_packet_t pkt, uint32_t len)
@@ -271,22 +689,285 @@ void *odp_packet_push_head(odp_packet_t pkt, uint32_t len)
return NULL;
push_head(pkt_hdr, len);
- return packet_map(pkt_hdr, 0, NULL);
+ return packet_data(pkt_hdr);
+}
+
+static inline uint32_t pack_seg_head(odp_packet_hdr_t *pkt_hdr, int seg)
+{
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
+ uint32_t len = pkt_hdr->buf_hdr.seg[seg].len;
+ uint8_t *src = pkt_hdr->buf_hdr.seg[seg].data;
+ uint8_t *dst = hdr->base_data;
+
+ if (dst != src) {
+ memmove(dst, src, len);
+ pkt_hdr->buf_hdr.seg[seg].data = dst;
+ }
+
+ return len;
+}
+
+static inline uint32_t pack_seg_tail(odp_packet_hdr_t *pkt_hdr, int seg)
+{
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
+ uint32_t len = pkt_hdr->buf_hdr.seg[seg].len;
+ uint8_t *src = pkt_hdr->buf_hdr.seg[seg].data;
+ uint8_t *dst = hdr->base_data + BASE_LEN - len;
+
+ if (dst != src) {
+ memmove(dst, src, len);
+ pkt_hdr->buf_hdr.seg[seg].data = dst;
+ }
+
+ return len;
+}
+
+static inline uint32_t fill_seg_head(odp_packet_hdr_t *pkt_hdr, int dst_seg,
+ int src_seg, uint32_t max_len)
+{
+ uint32_t len = pkt_hdr->buf_hdr.seg[src_seg].len;
+ uint8_t *src = pkt_hdr->buf_hdr.seg[src_seg].data;
+ uint32_t offset = pkt_hdr->buf_hdr.seg[dst_seg].len;
+ uint8_t *dst = pkt_hdr->buf_hdr.seg[dst_seg].data + offset;
+
+ if (len > max_len)
+ len = max_len;
+
+ memmove(dst, src, len);
+
+ pkt_hdr->buf_hdr.seg[dst_seg].len += len;
+ pkt_hdr->buf_hdr.seg[src_seg].len -= len;
+ pkt_hdr->buf_hdr.seg[src_seg].data += len;
+
+ if (pkt_hdr->buf_hdr.seg[src_seg].len == 0) {
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[src_seg].hdr;
+
+ pkt_hdr->buf_hdr.seg[src_seg].data = hdr->base_data;
+ }
+
+ return len;
+}
+
+static inline uint32_t fill_seg_tail(odp_packet_hdr_t *pkt_hdr, int dst_seg,
+ int src_seg, uint32_t max_len)
+{
+ uint32_t src_len = pkt_hdr->buf_hdr.seg[src_seg].len;
+ uint8_t *src = pkt_hdr->buf_hdr.seg[src_seg].data;
+ uint8_t *dst = pkt_hdr->buf_hdr.seg[dst_seg].data;
+ uint32_t len = src_len;
+
+ if (len > max_len)
+ len = max_len;
+
+ src += src_len - len;
+ dst -= len;
+
+ memmove(dst, src, len);
+
+ pkt_hdr->buf_hdr.seg[dst_seg].data -= len;
+ pkt_hdr->buf_hdr.seg[dst_seg].len += len;
+ pkt_hdr->buf_hdr.seg[src_seg].len -= len;
+
+ if (pkt_hdr->buf_hdr.seg[src_seg].len == 0) {
+ odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[src_seg].hdr;
+
+ pkt_hdr->buf_hdr.seg[src_seg].data = hdr->base_data;
+ }
+
+ return len;
+}
+
+static inline int move_data_to_head(odp_packet_hdr_t *pkt_hdr, int segs)
+{
+ int dst_seg, src_seg;
+ uint32_t len, free_len;
+ uint32_t moved = 0;
+
+ for (dst_seg = 0; dst_seg < segs; dst_seg++) {
+ len = pack_seg_head(pkt_hdr, dst_seg);
+ moved += len;
+
+ if (len == BASE_LEN)
+ continue;
+
+ free_len = BASE_LEN - len;
+
+ for (src_seg = dst_seg + 1; src_seg < segs; src_seg++) {
+ len = fill_seg_head(pkt_hdr, dst_seg, src_seg,
+ free_len);
+ moved += len;
+
+ if (len == free_len) {
+ /* dst seg is full */
+ break;
+ }
+
+ /* src seg is empty */
+ free_len -= len;
+ }
+
+ if (moved == pkt_hdr->frame_len)
+ break;
+ }
+
+ /* last segment which have data */
+ return dst_seg;
+}
+
+static inline int move_data_to_tail(odp_packet_hdr_t *pkt_hdr, int segs)
+{
+ int dst_seg, src_seg;
+ uint32_t len, free_len;
+ uint32_t moved = 0;
+
+ for (dst_seg = segs - 1; dst_seg >= 0; dst_seg--) {
+ len = pack_seg_tail(pkt_hdr, dst_seg);
+ moved += len;
+
+ if (len == BASE_LEN)
+ continue;
+
+ free_len = BASE_LEN - len;
+
+ for (src_seg = dst_seg - 1; src_seg >= 0; src_seg--) {
+ len = fill_seg_tail(pkt_hdr, dst_seg, src_seg,
+ free_len);
+ moved += len;
+
+ if (len == free_len) {
+ /* dst seg is full */
+ break;
+ }
+
+ /* src seg is empty */
+ free_len -= len;
+ }
+
+ if (moved == pkt_hdr->frame_len)
+ break;
+ }
+
+ /* first segment which have data */
+ return dst_seg;
+}
+
+static inline void reset_seg(odp_packet_hdr_t *pkt_hdr, int first, int num)
+{
+ odp_buffer_hdr_t *hdr;
+ void *base;
+ int i;
+
+ for (i = first; i < first + num; i++) {
+ hdr = pkt_hdr->buf_hdr.seg[i].hdr;
+ base = hdr->base_data;
+ pkt_hdr->buf_hdr.seg[i].len = BASE_LEN;
+ pkt_hdr->buf_hdr.seg[i].data = base;
+ }
}
int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len,
void **data_ptr, uint32_t *seg_len)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
+ uint32_t frame_len = pkt_hdr->frame_len;
+ uint32_t headroom = pkt_hdr->headroom;
+ int ret = 0;
- if (len > pkt_hdr->headroom && push_head_seg(pkt_hdr, len))
- return -1;
+ if (len > headroom) {
+ pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
+ int num;
+ int segs;
- push_head(pkt_hdr, len);
+ if (odp_unlikely((frame_len + len) > pool->max_len))
+ return -1;
+
+ num = num_segments(len - headroom);
+ segs = pkt_hdr->buf_hdr.segcount;
+
+ if (odp_unlikely((segs + num) > CONFIG_PACKET_MAX_SEGS)) {
+ /* Cannot directly add new segments */
+ odp_packet_hdr_t *new_hdr;
+ int new_segs = 0;
+ int free_segs = 0;
+ uint32_t offset;
+
+ num = num_segments(frame_len + len);
+
+ if (num > segs) {
+ /* Allocate additional segments */
+ new_segs = num - segs;
+ new_hdr = alloc_segments(pool, new_segs);
+
+ if (new_hdr == NULL)
+ return -1;
+
+ } else if (num < segs) {
+ free_segs = segs - num;
+ }
+
+ /* Pack all data to packet tail */
+ move_data_to_tail(pkt_hdr, segs);
+ reset_seg(pkt_hdr, 0, segs);
+
+ if (new_segs) {
+ add_all_segs(new_hdr, pkt_hdr);
+ packet_seg_copy_md(new_hdr, pkt_hdr);
+ segs += new_segs;
+
+ pkt_hdr = new_hdr;
+ *pkt = packet_handle(pkt_hdr);
+ } else if (free_segs) {
+ new_hdr = pkt_hdr->buf_hdr.seg[free_segs].hdr;
+ packet_seg_copy_md(new_hdr, pkt_hdr);
+
+ /* Free extra segs */
+ free_bufs(pkt_hdr, 0, free_segs);
+
+ segs -= free_segs;
+ pkt_hdr = new_hdr;
+ *pkt = packet_handle(pkt_hdr);
+ }
+
+ frame_len += len;
+ offset = (segs * BASE_LEN) - frame_len;
+
+ pkt_hdr->buf_hdr.seg[0].data += offset;
+ pkt_hdr->buf_hdr.seg[0].len -= offset;
+
+ pkt_hdr->buf_hdr.segcount = segs;
+ pkt_hdr->frame_len = frame_len;
+ pkt_hdr->headroom = offset + pool->headroom;
+ pkt_hdr->tailroom = pool->tailroom;
+
+ /* Data was moved */
+ ret = 1;
+ } else {
+ void *ptr;
+
+ push_head(pkt_hdr, headroom);
+ ptr = add_segments(pkt_hdr, pool, len - headroom,
+ num, 1);
+
+ if (ptr == NULL) {
+ /* segment alloc failed, rollback changes */
+ pull_head(pkt_hdr, headroom);
+ return -1;
+ }
+
+ *pkt = packet_handle(ptr);
+ pkt_hdr = ptr;
+ }
+ } else {
+ push_head(pkt_hdr, len);
+ }
if (data_ptr)
- *data_ptr = packet_map(pkt_hdr, 0, seg_len);
- return 0;
+ *data_ptr = packet_data(pkt_hdr);
+
+ if (seg_len)
+ *seg_len = packet_first_seg_len(pkt_hdr);
+
+ return ret;
}
void *odp_packet_pull_head(odp_packet_t pkt, uint32_t len)
@@ -297,79 +978,193 @@ void *odp_packet_pull_head(odp_packet_t pkt, uint32_t len)
return NULL;
pull_head(pkt_hdr, len);
- return packet_map(pkt_hdr, 0, NULL);
+ return packet_data(pkt_hdr);
}
int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len,
- void **data_ptr, uint32_t *seg_len)
+ void **data_ptr, uint32_t *seg_len_out)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
+ uint32_t seg_len = packet_first_seg_len(pkt_hdr);
if (len > pkt_hdr->frame_len)
return -1;
- pull_head(pkt_hdr, len);
- if (pkt_hdr->headroom >= pkt_hdr->buf_hdr.segsize)
- pull_head_seg(pkt_hdr);
+ if (len < seg_len) {
+ pull_head(pkt_hdr, len);
+ } else if (CONFIG_PACKET_MAX_SEGS != 1) {
+ int num = 0;
+ uint32_t pull_len = 0;
+
+ while (seg_len <= len) {
+ pull_len = len - seg_len;
+ num++;
+ seg_len += packet_seg_len(pkt_hdr, num);
+ }
+
+ pkt_hdr = free_segments(pkt_hdr, num, len - pull_len,
+ pull_len, 1);
+ *pkt = packet_handle(pkt_hdr);
+ }
if (data_ptr)
- *data_ptr = packet_map(pkt_hdr, 0, seg_len);
+ *data_ptr = packet_data(pkt_hdr);
+
+ if (seg_len_out)
+ *seg_len_out = packet_first_seg_len(pkt_hdr);
+
return 0;
}
void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- uint32_t origin = pkt_hdr->frame_len;
+ void *old_tail;
if (len > pkt_hdr->tailroom)
return NULL;
+ old_tail = packet_tail(pkt_hdr);
push_tail(pkt_hdr, len);
- return packet_map(pkt_hdr, origin, NULL);
+
+ return old_tail;
}
int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len,
- void **data_ptr, uint32_t *seg_len)
+ void **data_ptr, uint32_t *seg_len_out)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
- uint32_t origin = pkt_hdr->frame_len;
+ uint32_t frame_len = pkt_hdr->frame_len;
+ uint32_t tailroom = pkt_hdr->tailroom;
+ uint32_t tail_off = frame_len;
+ int ret = 0;
- if (len > pkt_hdr->tailroom && push_tail_seg(pkt_hdr, len))
- return -1;
+ if (len > tailroom) {
+ pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
+ int num;
+ int segs;
- push_tail(pkt_hdr, len);
+ if (odp_unlikely((frame_len + len) > pool->max_len))
+ return -1;
+
+ num = num_segments(len - tailroom);
+ segs = pkt_hdr->buf_hdr.segcount;
+
+ if (odp_unlikely((segs + num) > CONFIG_PACKET_MAX_SEGS)) {
+ /* Cannot directly add new segments */
+ odp_packet_hdr_t *new_hdr;
+ int new_segs = 0;
+ int free_segs = 0;
+ uint32_t offset;
+
+ num = num_segments(frame_len + len);
+
+ if (num > segs) {
+ /* Allocate additional segments */
+ new_segs = num - segs;
+ new_hdr = alloc_segments(pool, new_segs);
+
+ if (new_hdr == NULL)
+ return -1;
+
+ } else if (num < segs) {
+ free_segs = segs - num;
+ }
+
+ /* Pack all data to packet head */
+ move_data_to_head(pkt_hdr, segs);
+ reset_seg(pkt_hdr, 0, segs);
+
+ if (new_segs) {
+ /* Add new segs */
+ add_all_segs(pkt_hdr, new_hdr);
+ segs += new_segs;
+ } else if (free_segs) {
+ /* Free extra segs */
+ free_bufs(pkt_hdr, segs - free_segs, free_segs);
+
+ segs -= free_segs;
+ }
+
+ frame_len += len;
+ offset = (segs * BASE_LEN) - frame_len;
+
+ pkt_hdr->buf_hdr.seg[segs - 1].len -= offset;
+
+ pkt_hdr->buf_hdr.segcount = segs;
+ pkt_hdr->frame_len = frame_len;
+ pkt_hdr->headroom = pool->headroom;
+ pkt_hdr->tailroom = offset + pool->tailroom;
+
+ /* Data was moved */
+ ret = 1;
+ } else {
+ void *ptr;
+
+ push_tail(pkt_hdr, tailroom);
+
+ ptr = add_segments(pkt_hdr, pool, len - tailroom,
+ num, 0);
+
+ if (ptr == NULL) {
+ /* segment alloc failed, rollback changes */
+ pull_tail(pkt_hdr, tailroom);
+ return -1;
+ }
+ }
+ } else {
+ push_tail(pkt_hdr, len);
+ }
if (data_ptr)
- *data_ptr = packet_map(pkt_hdr, origin, seg_len);
- return 0;
+ *data_ptr = packet_map(pkt_hdr, tail_off, seg_len_out, NULL);
+
+ return ret;
}
void *odp_packet_pull_tail(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- if (len > pkt_hdr->frame_len)
+ if (len > packet_last_seg_len(pkt_hdr))
return NULL;
pull_tail(pkt_hdr, len);
- return packet_map(pkt_hdr, pkt_hdr->frame_len, NULL);
+
+ return packet_tail(pkt_hdr);
}
int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len,
void **tail_ptr, uint32_t *tailroom)
{
+ int last;
+ uint32_t seg_len;
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
if (len > pkt_hdr->frame_len)
return -1;
- pull_tail(pkt_hdr, len);
- if (pkt_hdr->tailroom >= pkt_hdr->buf_hdr.segsize)
- pull_tail_seg(pkt_hdr);
+ last = packet_last_seg(pkt_hdr);
+ seg_len = packet_seg_len(pkt_hdr, last);
+
+ if (len < seg_len) {
+ pull_tail(pkt_hdr, len);
+ } else if (CONFIG_PACKET_MAX_SEGS != 1) {
+ int num = 0;
+ uint32_t pull_len = 0;
+
+ while (seg_len <= len) {
+ pull_len = len - seg_len;
+ num++;
+ seg_len += packet_seg_len(pkt_hdr, last - num);
+ }
+
+ free_segments(pkt_hdr, num, len - pull_len, pull_len, 0);
+ }
if (tail_ptr)
- *tail_ptr = packet_map(pkt_hdr, pkt_hdr->frame_len, NULL);
+ *tail_ptr = packet_tail(pkt_hdr);
+
if (tailroom)
*tailroom = pkt_hdr->tailroom;
return 0;
@@ -378,17 +1173,12 @@ int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len,
void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len,
odp_packet_seg_t *seg)
{
+ int seg_idx;
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- void *addr = packet_map(pkt_hdr, offset, len);
-
- if (addr != NULL && seg != NULL) {
- odp_buffer_bits_t seghandle;
+ void *addr = packet_map(pkt_hdr, offset, len, &seg_idx);
- seghandle.handle = (odp_buffer_t)pkt;
- seghandle.seg = (pkt_hdr->headroom + offset) /
- pkt_hdr->buf_hdr.segsize;
- *seg = (odp_packet_seg_t)seghandle.handle;
- }
+ if (addr != NULL && seg != NULL)
+ *seg = seg_idx;
return addr;
}
@@ -448,7 +1238,7 @@ void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
if (!packet_hdr_has_l2(pkt_hdr))
return NULL;
- return packet_map(pkt_hdr, pkt_hdr->p.l2_offset, len);
+ return packet_map(pkt_hdr, pkt_hdr->p.l2_offset, len, NULL);
}
uint32_t odp_packet_l2_offset(odp_packet_t pkt)
@@ -478,7 +1268,7 @@ void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
if (pkt_hdr->p.parsed_layers < LAYER_L3)
packet_parse_layer(pkt_hdr, LAYER_L3);
- return packet_map(pkt_hdr, pkt_hdr->p.l3_offset, len);
+ return packet_map(pkt_hdr, pkt_hdr->p.l3_offset, len, NULL);
}
uint32_t odp_packet_l3_offset(odp_packet_t pkt)
@@ -509,7 +1299,7 @@ void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
if (pkt_hdr->p.parsed_layers < LAYER_L4)
packet_parse_layer(pkt_hdr, LAYER_L4);
- return packet_map(pkt_hdr, pkt_hdr->p.l4_offset, len);
+ return packet_map(pkt_hdr, pkt_hdr->p.l4_offset, len, NULL);
}
uint32_t odp_packet_l4_offset(odp_packet_t pkt)
@@ -571,30 +1361,33 @@ int odp_packet_is_segmented(odp_packet_t pkt)
int odp_packet_num_segs(odp_packet_t pkt)
{
- return odp_packet_hdr(pkt)->buf_hdr.segcount;
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+ return pkt_hdr->buf_hdr.segcount;
}
odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
{
- return (odp_packet_seg_t)pkt;
+ (void)pkt;
+
+ return 0;
}
odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- odp_buffer_bits_t seghandle;
- seghandle.handle = (odp_buffer_t)pkt;
- seghandle.seg = pkt_hdr->buf_hdr.segcount - 1;
- return (odp_packet_seg_t)seghandle.handle;
+ return packet_last_seg(pkt_hdr);
}
odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return (odp_packet_seg_t)segment_next(&pkt_hdr->buf_hdr,
- (odp_buffer_seg_t)seg);
+ if (odp_unlikely(seg >= (odp_packet_seg_t)packet_last_seg(pkt_hdr)))
+ return ODP_PACKET_SEG_INVALID;
+
+ return seg + 1;
}
/*
@@ -608,19 +1401,20 @@ void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- return segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, NULL,
- pkt_hdr->frame_len, pkt_hdr->headroom);
+ if (odp_unlikely(seg >= pkt_hdr->buf_hdr.segcount))
+ return NULL;
+
+ return packet_seg_data(pkt_hdr, seg);
}
uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg)
{
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- uint32_t seglen = 0;
- segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, &seglen,
- pkt_hdr->frame_len, pkt_hdr->headroom);
+ if (odp_unlikely(seg >= pkt_hdr->buf_hdr.segcount))
+ return 0;
- return seglen;
+ return packet_seg_len(pkt_hdr, seg);
}
/*
@@ -695,7 +1489,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
uint32_t shift;
uint32_t seglen = 0; /* GCC */
odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(*pkt);
- void *addr = packet_map(pkt_hdr, offset, &seglen);
+ void *addr = packet_map(pkt_hdr, offset, &seglen, NULL);
uint64_t uaddr = (uint64_t)(uintptr_t)addr;
uint64_t misalign;
@@ -709,7 +1503,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
return 0;
shift = align - misalign;
} else {
- if (len > pkt_hdr->buf_hdr.segsize)
+ if (len > pkt_hdr->buf_hdr.size)
return -1;
shift = len - seglen;
uaddr -= shift;
@@ -732,18 +1526,38 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
{
- uint32_t dst_len = odp_packet_len(*dst);
- uint32_t src_len = odp_packet_len(src);
-
- if (odp_packet_extend_tail(dst, src_len, NULL, NULL) >= 0) {
- (void)odp_packet_copy_from_pkt(*dst, dst_len,
- src, 0, src_len);
- if (src != *dst)
+ odp_packet_hdr_t *dst_hdr = odp_packet_hdr(*dst);
+ odp_packet_hdr_t *src_hdr = odp_packet_hdr(src);
+ int dst_segs = dst_hdr->buf_hdr.segcount;
+ int src_segs = src_hdr->buf_hdr.segcount;
+ odp_pool_t dst_pool = dst_hdr->buf_hdr.pool_hdl;
+ odp_pool_t src_pool = src_hdr->buf_hdr.pool_hdl;
+ uint32_t dst_len = dst_hdr->frame_len;
+ uint32_t src_len = src_hdr->frame_len;
+
+ /* Do a copy if resulting packet would be out of segments or packets
+ * are from different pools. */
+ if (odp_unlikely((dst_segs + src_segs) > CONFIG_PACKET_MAX_SEGS) ||
+ odp_unlikely(dst_pool != src_pool)) {
+ if (odp_packet_extend_tail(dst, src_len, NULL, NULL) >= 0) {
+ (void)odp_packet_copy_from_pkt(*dst, dst_len,
+ src, 0, src_len);
odp_packet_free(src);
- return 0;
+
+ /* Data was moved in memory */
+ return 1;
+ }
+
+ return -1;
}
- return -1;
+ add_all_segs(dst_hdr, src_hdr);
+
+ dst_hdr->frame_len = dst_len + src_len;
+ dst_hdr->tailroom = src_hdr->tailroom;
+
+ /* Data was not moved in memory */
+ return 0;
}
int odp_packet_split(odp_packet_t *pkt, uint32_t len, odp_packet_t *tail)
@@ -815,7 +1629,7 @@ int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset,
return -1;
while (len > 0) {
- mapaddr = packet_map(pkt_hdr, offset, &seglen);
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
cpylen = len > seglen ? seglen : len;
memcpy(dstaddr, mapaddr, cpylen);
offset += cpylen;
@@ -839,7 +1653,7 @@ int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
return -1;
while (len > 0) {
- mapaddr = packet_map(pkt_hdr, offset, &seglen);
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
cpylen = len > seglen ? seglen : len;
memcpy(mapaddr, srcaddr, cpylen);
offset += cpylen;
@@ -885,8 +1699,8 @@ int odp_packet_copy_from_pkt(odp_packet_t dst, uint32_t dst_offset,
}
while (len > 0) {
- dst_map = packet_map(dst_hdr, dst_offset, &dst_seglen);
- src_map = packet_map(src_hdr, src_offset, &src_seglen);
+ dst_map = packet_map(dst_hdr, dst_offset, &dst_seglen, NULL);
+ src_map = packet_map(src_hdr, src_offset, &src_seglen, NULL);
minseg = dst_seglen > src_seglen ? src_seglen : dst_seglen;
cpylen = len > minseg ? minseg : len;
@@ -927,6 +1741,7 @@ int odp_packet_move_data(odp_packet_t pkt, uint32_t dst_offset,
void odp_packet_print(odp_packet_t pkt)
{
+ odp_packet_seg_t seg;
int max_len = 512;
char str[max_len];
int len = 0;
@@ -953,6 +1768,25 @@ void odp_packet_print(odp_packet_t pkt)
len += snprintf(&str[len], n - len,
" input %" PRIu64 "\n",
odp_pktio_to_u64(hdr->input));
+ len += snprintf(&str[len], n - len,
+ " headroom %" PRIu32 "\n",
+ odp_packet_headroom(pkt));
+ len += snprintf(&str[len], n - len,
+ " tailroom %" PRIu32 "\n",
+ odp_packet_tailroom(pkt));
+ len += snprintf(&str[len], n - len,
+ " num_segs %i\n", odp_packet_num_segs(pkt));
+
+ seg = odp_packet_first_seg(pkt);
+
+ while (seg != ODP_PACKET_SEG_INVALID) {
+ len += snprintf(&str[len], n - len,
+ " seg_len %" PRIu32 "\n",
+ odp_packet_seg_data_len(pkt, seg));
+
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
str[len] = '\0';
ODP_PRINT("\n%s\n", str);
@@ -960,9 +1794,13 @@ void odp_packet_print(odp_packet_t pkt)
int odp_packet_is_valid(odp_packet_t pkt)
{
- odp_buffer_hdr_t *buf = validate_buf((odp_buffer_t)pkt);
+ if (odp_buffer_is_valid((odp_buffer_t)pkt) == 0)
+ return 0;
+
+ if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET)
+ return 0;
- return (buf != NULL && buf->type == ODP_EVENT_PACKET);
+ return 1;
}
/*
@@ -1367,8 +2205,8 @@ parse_exit:
*/
int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer)
{
- uint32_t seg_len;
- void *base = packet_map(pkt_hdr, 0, &seg_len);
+ uint32_t seg_len = packet_first_seg_len(pkt_hdr);
+ void *base = packet_data(pkt_hdr);
return packet_parse_common(&pkt_hdr->p, base, pkt_hdr->frame_len,
seg_len, layer);
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 3524ff80a..98460a566 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -563,14 +563,14 @@ static inline int pktin_recv_buf(odp_pktin_queue_t queue,
pkt = packets[i];
pkt_hdr = odp_packet_hdr(pkt);
buf = _odp_packet_to_buffer(pkt);
- buf_hdr = odp_buf_to_hdr(buf);
+ buf_hdr = buf_hdl_to_hdr(buf);
if (pkt_hdr->p.input_flags.dst_queue) {
queue_entry_t *dst_queue;
int ret;
dst_queue = queue_to_qentry(pkt_hdr->dst_queue);
- ret = queue_enq(dst_queue, buf_hdr, 0);
+ ret = queue_enq(dst_queue, buf_hdr);
if (ret < 0)
odp_packet_free(pkt);
continue;
@@ -619,7 +619,7 @@ int pktout_deq_multi(queue_entry_t *qentry ODP_UNUSED,
}
int pktin_enqueue(queue_entry_t *qentry ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr ODP_UNUSED, int sustain ODP_UNUSED)
+ odp_buffer_hdr_t *buf_hdr ODP_UNUSED)
{
ODP_ABORT("attempted enqueue to a pktin queue");
return -1;
@@ -641,14 +641,13 @@ odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *qentry)
return NULL;
if (pkts > 1)
- queue_enq_multi(qentry, &hdr_tbl[1], pkts - 1, 0);
+ queue_enq_multi(qentry, &hdr_tbl[1], pkts - 1);
buf_hdr = hdr_tbl[0];
return buf_hdr;
}
int pktin_enq_multi(queue_entry_t *qentry ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED,
- int num ODP_UNUSED, int sustain ODP_UNUSED)
+ odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED, int num ODP_UNUSED)
{
ODP_ABORT("attempted enqueue to a pktin queue");
return 0;
@@ -682,7 +681,7 @@ int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
hdr_tbl[j] = hdr_tbl[i];
if (j)
- queue_enq_multi(qentry, hdr_tbl, j, 0);
+ queue_enq_multi(qentry, hdr_tbl, j);
return nbr;
}
@@ -720,7 +719,7 @@ int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[])
queue = entry->s.in_queue[index[idx]].queue;
qentry = queue_to_qentry(queue);
- queue_enq_multi(qentry, hdr_tbl, num, 0);
+ queue_enq_multi(qentry, hdr_tbl, num);
}
return 0;
@@ -1386,9 +1385,9 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
qentry->s.pktout.pktio = pktio;
/* Override default enqueue / dequeue functions */
- qentry->s.enqueue = queue_pktout_enq;
+ qentry->s.enqueue = pktout_enqueue;
qentry->s.dequeue = pktout_dequeue;
- qentry->s.enqueue_multi = queue_pktout_enq_multi;
+ qentry->s.enqueue_multi = pktout_enq_multi;
qentry->s.dequeue_multi = pktout_deq_multi;
entry->s.out_queue[i].queue = queue;
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 415c9fa16..090a55f66 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -4,77 +4,70 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_types.h>
#include <odp/api/pool.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_inlines.h>
-#include <odp_packet_internal.h>
-#include <odp_timer_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/shared_memory.h>
#include <odp/api/align.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_pool_internal.h>
#include <odp_internal.h>
+#include <odp_buffer_inlines.h>
+#include <odp_packet_internal.h>
#include <odp_config_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/thread.h>
#include <odp_debug_internal.h>
+#include <odp_ring_internal.h>
#include <string.h>
-#include <stdlib.h>
+#include <stdio.h>
#include <inttypes.h>
-#if ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
-#error ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
-#endif
-
-
-typedef union buffer_type_any_u {
- odp_buffer_hdr_t buf;
- odp_packet_hdr_t pkt;
- odp_timeout_hdr_t tmo;
-} odp_anybuf_t;
+#include <odp/api/plat/ticketlock_inlines.h>
+#define LOCK(a) _odp_ticketlock_lock(a)
+#define UNLOCK(a) _odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
-/* Any buffer type header */
-typedef struct {
- union buffer_type_any_u any_hdr; /* any buffer type */
-} odp_any_buffer_hdr_t;
+#define CACHE_BURST 32
+#define RING_SIZE_MIN (2 * CACHE_BURST)
-typedef struct odp_any_hdr_stride {
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))];
-} odp_any_hdr_stride;
+/* Define a practical limit for contiguous memory allocations */
+#define MAX_SIZE (10 * 1024 * 1024)
+ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST),
+ "cache_burst_size_too_large_compared_to_cache_size");
-typedef struct pool_table_t {
- pool_entry_t pool[ODP_CONFIG_POOLS];
-} pool_table_t;
-
-
-/* The pool table */
-static pool_table_t *pool_tbl;
-static const char SHM_DEFAULT_NAME[] = "odp_buffer_pools";
-
-/* Pool entry pointers (for inlining) */
-void *pool_entry_ptr[ODP_CONFIG_POOLS];
+ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256,
+ "ODP Segment size must be a minimum of 256 bytes");
/* Thread local variables */
typedef struct pool_local_t {
- local_cache_t *cache[ODP_CONFIG_POOLS];
+ pool_cache_t *cache[ODP_CONFIG_POOLS];
int thr_id;
} pool_local_t;
+pool_table_t *pool_tbl;
static __thread pool_local_t local;
-static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool);
+static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx)
+{
+ return _odp_cast_scalar(odp_pool_t, pool_idx);
+}
+
+static inline uint32_t pool_id_from_buf(odp_buffer_t buf)
+{
+ odp_buffer_bits_t handle;
+
+ handle.handle = buf;
+ return handle.pool_id;
+}
int odp_pool_init_global(void)
{
uint32_t i;
odp_shm_t shm;
- shm = odp_shm_reserve(SHM_DEFAULT_NAME,
+ shm = odp_shm_reserve("_odp_pool_table",
sizeof(pool_table_t),
- sizeof(pool_entry_t), 0);
+ ODP_CACHE_LINE_SIZE, 0);
pool_tbl = odp_shm_addr(shm);
@@ -82,1069 +75,793 @@ int odp_pool_init_global(void)
return -1;
memset(pool_tbl, 0, sizeof(pool_table_t));
+ pool_tbl->shm = shm;
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- /* init locks */
- pool_entry_t *pool = &pool_tbl->pool[i];
- POOL_LOCK_INIT(&pool->s.lock);
- POOL_LOCK_INIT(&pool->s.buf_lock);
- POOL_LOCK_INIT(&pool->s.blk_lock);
- pool->s.pool_hdl = pool_index_to_handle(i);
- pool->s.pool_id = i;
- pool_entry_ptr[i] = pool;
- odp_atomic_init_u32(&pool->s.bufcount, 0);
- odp_atomic_init_u32(&pool->s.blkcount, 0);
-
- /* Initialize pool statistics counters */
- odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0);
- odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0);
- odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0);
- odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0);
- odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0);
- odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0);
- odp_atomic_init_u64(&pool->s.poolstats.buf_high_wm_count, 0);
- odp_atomic_init_u64(&pool->s.poolstats.buf_low_wm_count, 0);
- odp_atomic_init_u64(&pool->s.poolstats.blk_high_wm_count, 0);
- odp_atomic_init_u64(&pool->s.poolstats.blk_low_wm_count, 0);
+ pool_t *pool = pool_entry(i);
+
+ LOCK_INIT(&pool->lock);
+ pool->pool_hdl = pool_index_to_handle(i);
+ pool->pool_idx = i;
}
ODP_DBG("\nPool init global\n");
- ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
- ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
+ ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
ODP_DBG("\n");
return 0;
}
-int odp_pool_init_local(void)
-{
- pool_entry_t *pool;
- int i;
- int thr_id = odp_thread_id();
-
- memset(&local, 0, sizeof(pool_local_t));
-
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
- local.cache[i] = &pool->s.local_cache[thr_id];
- local.cache[i]->s.num_buf = 0;
- }
-
- local.thr_id = thr_id;
- return 0;
-}
-
int odp_pool_term_global(void)
{
int i;
- pool_entry_t *pool;
+ pool_t *pool;
int ret = 0;
int rc = 0;
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
+ pool = pool_entry(i);
- POOL_LOCK(&pool->s.lock);
- if (pool->s.pool_shm != ODP_SHM_INVALID) {
- ODP_ERR("Not destroyed pool: %s\n", pool->s.name);
+ LOCK(&pool->lock);
+ if (pool->reserved) {
+ ODP_ERR("Not destroyed pool: %s\n", pool->name);
rc = -1;
}
- POOL_UNLOCK(&pool->s.lock);
+ UNLOCK(&pool->lock);
}
- ret = odp_shm_free(odp_shm_lookup(SHM_DEFAULT_NAME));
+ ret = odp_shm_free(pool_tbl->shm);
if (ret < 0) {
- ODP_ERR("shm free failed for %s", SHM_DEFAULT_NAME);
+ ODP_ERR("shm free failed");
rc = -1;
}
return rc;
}
-int odp_pool_term_local(void)
+int odp_pool_init_local(void)
{
+ pool_t *pool;
int i;
+ int thr_id = odp_thread_id();
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool_entry_t *pool = get_pool_entry(i);
+ memset(&local, 0, sizeof(pool_local_t));
- flush_cache(local.cache[i], &pool->s);
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool = pool_entry(i);
+ local.cache[i] = &pool->local_cache[thr_id];
+ local.cache[i]->num = 0;
}
+ local.thr_id = thr_id;
return 0;
}
-int odp_pool_capability(odp_pool_capability_t *capa)
+static void flush_cache(pool_cache_t *cache, pool_t *pool)
{
- memset(capa, 0, sizeof(odp_pool_capability_t));
+ ring_t *ring;
+ uint32_t mask;
+ uint32_t cache_num, i, data;
- capa->max_pools = ODP_CONFIG_POOLS;
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
+ cache_num = cache->num;
- /* Buffer pools */
- capa->buf.max_pools = ODP_CONFIG_POOLS;
- capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
- capa->buf.max_size = 0;
- capa->buf.max_num = 0;
+ for (i = 0; i < cache_num; i++) {
+ data = (uint32_t)(uintptr_t)cache->buf[i];
+ ring_enq(ring, mask, data);
+ }
- /* Packet pools */
- capa->pkt.max_pools = ODP_CONFIG_POOLS;
- capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS *
- ODP_CONFIG_PACKET_SEG_LEN_MIN;
- capa->pkt.max_num = 0;
- capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM;
- capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM;
- capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS;
- capa->pkt.min_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MIN;
- capa->pkt.max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX;
- capa->pkt.max_uarea_size = 0;
+ cache->num = 0;
+}
- /* Timeout pools */
- capa->tmo.max_pools = ODP_CONFIG_POOLS;
- capa->tmo.max_num = 0;
+int odp_pool_term_local(void)
+{
+ int i;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool_t *pool = pool_entry(i);
+
+ flush_cache(local.cache[i], pool);
+ }
return 0;
}
-static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool)
+static pool_t *reserve_pool(void)
{
- odp_buffer_hdr_t *myhead;
+ int i;
+ pool_t *pool;
+ char ring_name[ODP_POOL_NAME_LEN];
- POOL_LOCK(&pool->buf_lock);
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool = pool_entry(i);
+
+ LOCK(&pool->lock);
+ if (pool->reserved == 0) {
+ pool->reserved = 1;
+ UNLOCK(&pool->lock);
+ sprintf(ring_name, "pool_ring_%d", i);
+ pool->ring_shm =
+ odp_shm_reserve(ring_name,
+ sizeof(pool_ring_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (odp_unlikely(pool->ring_shm == ODP_SHM_INVALID)) {
+ ODP_ERR("Unable to alloc pool ring %d\n", i);
+ LOCK(&pool->lock);
+ pool->reserved = 0;
+ UNLOCK(&pool->lock);
+ break;
+ }
+ pool->ring = odp_shm_addr(pool->ring_shm);
+ return pool;
+ }
+ UNLOCK(&pool->lock);
+ }
- myhead = pool->buf_freelist;
+ return NULL;
+}
- if (odp_unlikely(myhead == NULL)) {
- POOL_UNLOCK(&pool->buf_lock);
- odp_atomic_inc_u64(&pool->poolstats.bufempty);
- } else {
- pool->buf_freelist = myhead->next;
- POOL_UNLOCK(&pool->buf_lock);
+static odp_buffer_t form_buffer_handle(uint32_t pool_idx, uint32_t buffer_idx)
+{
+ odp_buffer_bits_t bits;
- odp_atomic_fetch_sub_u32(&pool->bufcount, 1);
- odp_atomic_inc_u64(&pool->poolstats.bufallocs);
- }
+ bits.handle = 0;
+ bits.pool_id = pool_idx;
+ bits.index = buffer_idx;
- return (void *)myhead;
+ return bits.handle;
}
-static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
+static void init_buffers(pool_t *pool)
{
- if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) {
- while (buf->segcount > 0) {
- if (buffer_is_secure(buf) || pool_is_secure(pool))
- memset(buf->addr[buf->segcount - 1],
- 0, buf->segsize);
- ret_blk(pool, buf->addr[--buf->segcount]);
- }
- buf->size = 0;
+ uint32_t i;
+ odp_buffer_hdr_t *buf_hdr;
+ odp_packet_hdr_t *pkt_hdr;
+ odp_buffer_t buf_hdl;
+ void *addr;
+ void *uarea = NULL;
+ uint8_t *data;
+ uint32_t offset;
+ ring_t *ring;
+ uint32_t mask;
+ int type;
+ uint32_t seg_size;
+
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
+ type = pool->params.type;
+
+ for (i = 0; i < pool->num; i++) {
+ addr = &pool->base_addr[i * pool->block_size];
+ buf_hdr = addr;
+ pkt_hdr = addr;
+
+ if (pool->uarea_size)
+ uarea = &pool->uarea_base_addr[i * pool->uarea_size];
+
+ data = buf_hdr->data;
+
+ if (type == ODP_POOL_PACKET)
+ data = pkt_hdr->data;
+
+ offset = pool->headroom;
+
+ /* move to correct align */
+ while (((uintptr_t)&data[offset]) % pool->align != 0)
+ offset++;
+
+ memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
+
+ seg_size = pool->headroom + pool->data_size + pool->tailroom;
+
+ /* Initialize buffer metadata */
+ buf_hdr->size = seg_size;
+ buf_hdr->type = type;
+ buf_hdr->event_type = type;
+ buf_hdr->pool_hdl = pool->pool_hdl;
+ buf_hdr->uarea_addr = uarea;
+ /* Show user requested size through API */
+ buf_hdr->uarea_size = pool->params.pkt.uarea_size;
+ buf_hdr->segcount = 1;
+
+ /* Pointer to data start (of the first segment) */
+ buf_hdr->seg[0].hdr = buf_hdr;
+ buf_hdr->seg[0].data = &data[offset];
+ buf_hdr->seg[0].len = pool->data_size;
+
+ /* Store base values for fast init */
+ buf_hdr->base_data = buf_hdr->seg[0].data;
+ buf_hdr->buf_end = &data[offset + pool->data_size +
+ pool->tailroom];
+
+ buf_hdl = form_buffer_handle(pool->pool_idx, i);
+ buf_hdr->handle.handle = buf_hdl;
+
+ /* Store buffer into the global pool */
+ ring_enq(ring, mask, (uint32_t)(uintptr_t)buf_hdl);
}
-
- buf->allocator = ODP_FREEBUF; /* Mark buffer free */
- POOL_LOCK(&pool->buf_lock);
- buf->next = pool->buf_freelist;
- pool->buf_freelist = buf;
- POOL_UNLOCK(&pool->buf_lock);
-
- odp_atomic_fetch_add_u32(&pool->bufcount, 1);
- odp_atomic_inc_u64(&pool->poolstats.buffrees);
}
-/*
- * Pool creation
- */
-odp_pool_t _pool_create(const char *name,
- odp_pool_param_t *params,
- uint32_t shmflags)
+static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
+ uint32_t shmflags)
{
- odp_pool_t pool_hdl = ODP_POOL_INVALID;
- pool_entry_t *pool;
- uint32_t i, headroom = 0, tailroom = 0;
+ pool_t *pool;
+ uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
-
- if (params == NULL)
+ uint32_t data_size, align, num, hdr_size, block_size;
+ uint32_t max_len, max_seg_len;
+ uint32_t ring_size;
+ int name_len;
+ const char *postfix = "_uarea";
+ char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)];
+
+ if (params == NULL) {
+ ODP_ERR("No params");
return ODP_POOL_INVALID;
-
- /* Default size and align for timeouts */
- if (params->type == ODP_POOL_TIMEOUT) {
- params->buf.size = 0; /* tmo.__res1 */
- params->buf.align = 0; /* tmo.__res2 */
}
- /* Default initialization parameters */
- uint32_t p_udata_size = 0;
- uint32_t udata_stride = 0;
+ align = 0;
- /* Restriction for v1.0: All non-packet buffers are unsegmented */
- int unseg = 1;
+ if (params->type == ODP_POOL_BUFFER)
+ align = params->buf.align;
- uint32_t blk_size, buf_stride, buf_num, blk_num, seg_len = 0;
- uint32_t buf_align =
- params->type == ODP_POOL_BUFFER ? params->buf.align : 0;
+ if (align < ODP_CONFIG_BUFFER_ALIGN_MIN)
+ align = ODP_CONFIG_BUFFER_ALIGN_MIN;
/* Validate requested buffer alignment */
- if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
- buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align))
+ if (align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
+ align != ODP_ALIGN_ROUNDDOWN_POWER_2(align, align)) {
+ ODP_ERR("Bad align requirement");
return ODP_POOL_INVALID;
+ }
- /* Set correct alignment based on input request */
- if (buf_align == 0)
- buf_align = ODP_CACHE_LINE_SIZE;
- else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
- buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+ headroom = 0;
+ tailroom = 0;
+ data_size = 0;
+ max_len = 0;
+ max_seg_len = 0;
+ uarea_size = 0;
- /* Calculate space needed for buffer blocks and metadata */
switch (params->type) {
case ODP_POOL_BUFFER:
- buf_num = params->buf.num;
- blk_size = params->buf.size;
-
- /* Optimize small raw buffers */
- if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0)
- blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align);
-
- buf_stride = sizeof(odp_buffer_hdr_stride);
+ num = params->buf.num;
+ data_size = params->buf.size;
break;
case ODP_POOL_PACKET:
- unseg = 0; /* Packets are always segmented */
- headroom = ODP_CONFIG_PACKET_HEADROOM;
- tailroom = ODP_CONFIG_PACKET_TAILROOM;
- buf_num = params->pkt.num;
-
- seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ?
- ODP_CONFIG_PACKET_SEG_LEN_MIN :
- (params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ?
- params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX);
-
- seg_len = ODP_ALIGN_ROUNDUP(
- headroom + seg_len + tailroom,
- ODP_CONFIG_BUFFER_ALIGN_MIN);
-
- blk_size = params->pkt.len <= seg_len ? seg_len :
- ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len);
-
- /* Reject create if pkt.len needs too many segments */
- if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) {
- ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n",
- blk_size / seg_len, ODP_BUFFER_MAX_SEG);
- return ODP_POOL_INVALID;
- }
-
- p_udata_size = params->pkt.uarea_size;
- udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size,
- sizeof(uint64_t));
-
- buf_stride = sizeof(odp_packet_hdr_stride);
+ headroom = CONFIG_PACKET_HEADROOM;
+ tailroom = CONFIG_PACKET_TAILROOM;
+ num = params->pkt.num;
+ uarea_size = params->pkt.uarea_size;
+ data_size = CONFIG_PACKET_MAX_SEG_LEN;
+ max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+ max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len;
break;
case ODP_POOL_TIMEOUT:
- blk_size = 0;
- buf_num = params->tmo.num;
- buf_stride = sizeof(odp_timeout_hdr_stride);
+ num = params->tmo.num;
break;
default:
+ ODP_ERR("Bad pool type");
return ODP_POOL_INVALID;
}
- /* Validate requested number of buffers against addressable limits */
- if (buf_num >
- (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) {
- ODP_ERR("buf_num %d > then expected %d\n",
- buf_num, ODP_BUFFER_MAX_BUFFERS /
- (buf_stride / ODP_CACHE_LINE_SIZE));
+ if (uarea_size)
+ uarea_size = ODP_CACHE_LINE_SIZE_ROUNDUP(uarea_size);
+
+ pool = reserve_pool();
+
+ if (pool == NULL) {
+ ODP_ERR("No more free pools");
return ODP_POOL_INVALID;
}
- /* Find an unused buffer pool slot and iniitalize it as requested */
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
+ if (name == NULL) {
+ pool->name[0] = 0;
+ } else {
+ strncpy(pool->name, name,
+ ODP_POOL_NAME_LEN - 1);
+ pool->name[ODP_POOL_NAME_LEN - 1] = 0;
+ }
- POOL_LOCK(&pool->s.lock);
- if (pool->s.pool_shm != ODP_SHM_INVALID) {
- POOL_UNLOCK(&pool->s.lock);
- continue;
- }
+ name_len = strlen(pool->name);
+ memcpy(uarea_name, pool->name, name_len);
+ strcpy(&uarea_name[name_len], postfix);
- /* found free pool */
- size_t block_size, pad_size, mdata_size, udata_size;
+ pool->params = *params;
- pool->s.flags.all = 0;
+ hdr_size = sizeof(odp_packet_hdr_t);
+ hdr_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size);
- if (name == NULL) {
- pool->s.name[0] = 0;
- } else {
- strncpy(pool->s.name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
- pool->s.flags.has_name = 1;
- }
+ block_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size + align + headroom +
+ data_size + tailroom);
- pool->s.params = *params;
- pool->s.buf_align = buf_align;
+ if (num <= RING_SIZE_MIN)
+ ring_size = RING_SIZE_MIN;
+ else
+ ring_size = ODP_ROUNDUP_POWER_2(num);
- /* Optimize for short buffers: Data stored in buffer hdr */
- if (blk_size <= ODP_MAX_INLINE_BUF) {
- block_size = 0;
- pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *);
- } else {
- block_size = buf_num * blk_size;
- pool->s.buf_align = buf_align;
- }
+ pool->ring_mask = ring_size - 1;
+ pool->num = num;
+ pool->align = align;
+ pool->headroom = headroom;
+ pool->data_size = data_size;
+ pool->max_len = max_len;
+ pool->max_seg_len = max_seg_len;
+ pool->tailroom = tailroom;
+ pool->block_size = block_size;
+ pool->uarea_size = uarea_size;
+ pool->shm_size = num * block_size;
+ pool->uarea_shm_size = num * uarea_size;
- pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size;
- mdata_size = buf_num * buf_stride;
- udata_size = buf_num * udata_stride;
+ shm = odp_shm_reserve(pool->name, pool->shm_size,
+ ODP_PAGE_SIZE, shmflags);
- pool->s.buf_num = buf_num;
- pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size +
- pad_size +
- mdata_size +
- udata_size);
+ pool->shm = shm;
+
+ if (shm == ODP_SHM_INVALID) {
+ ODP_ERR("Shm reserve failed");
+ goto error;
+ }
- shm = odp_shm_reserve(pool->s.name,
- pool->s.pool_size,
+ pool->base_addr = odp_shm_addr(pool->shm);
+
+ pool->uarea_shm = ODP_SHM_INVALID;
+ if (uarea_size) {
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size,
ODP_PAGE_SIZE, shmflags);
+
+ pool->uarea_shm = shm;
+
if (shm == ODP_SHM_INVALID) {
- POOL_UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
+ ODP_ERR("Shm reserve failed (uarea)");
+ goto error;
}
- pool->s.pool_base_addr = odp_shm_addr(shm);
- pool->s.pool_shm = shm;
-
- /* Now safe to unlock since pool entry has been allocated */
- POOL_UNLOCK(&pool->s.lock);
-
- pool->s.flags.unsegmented = unseg;
- pool->s.seg_size = unseg ? blk_size : seg_len;
- pool->s.blk_size = blk_size;
-
- uint8_t *block_base_addr = pool->s.pool_base_addr;
- uint8_t *mdata_base_addr =
- block_base_addr + block_size + pad_size;
- uint8_t *udata_base_addr = mdata_base_addr + mdata_size;
-
- /* Pool mdata addr is used for indexing buffer metadata */
- pool->s.pool_mdata_addr = mdata_base_addr;
- pool->s.udata_size = p_udata_size;
-
- pool->s.buf_stride = buf_stride;
- pool->s.buf_freelist = NULL;
- pool->s.blk_freelist = NULL;
-
- /* Initialization will increment these to their target vals */
- odp_atomic_store_u32(&pool->s.bufcount, 0);
- odp_atomic_store_u32(&pool->s.blkcount, 0);
-
- uint8_t *buf = udata_base_addr - buf_stride;
- uint8_t *udat = udata_stride == 0 ? NULL :
- udata_base_addr + udata_size - udata_stride;
-
- /* Init buffer common header and add to pool buffer freelist */
- do {
- odp_buffer_hdr_t *tmp =
- (odp_buffer_hdr_t *)(void *)buf;
-
- /* Iniitalize buffer metadata */
- tmp->allocator = ODP_FREEBUF;
- tmp->flags.all = 0;
- tmp->size = 0;
- tmp->type = params->type;
- tmp->event_type = params->type;
- tmp->pool_hdl = pool->s.pool_hdl;
- tmp->uarea_addr = (void *)udat;
- tmp->uarea_size = p_udata_size;
- tmp->segcount = 0;
- tmp->segsize = pool->s.seg_size;
- tmp->handle.handle = odp_buffer_encode_handle(tmp);
-
- /* Set 1st seg addr for zero-len buffers */
- tmp->addr[0] = NULL;
-
- /* Special case for short buffer data */
- if (blk_size <= ODP_MAX_INLINE_BUF) {
- tmp->flags.hdrdata = 1;
- if (blk_size > 0) {
- tmp->segcount = 1;
- tmp->addr[0] = &tmp->addr[1];
- tmp->size = blk_size;
- }
- }
- /* Push buffer onto pool's freelist */
- ret_buf(&pool->s, tmp);
- buf -= buf_stride;
- udat -= udata_stride;
- } while (buf >= mdata_base_addr);
-
- /* Form block freelist for pool */
- uint8_t *blk =
- block_base_addr + block_size - pool->s.seg_size;
-
- if (blk_size > ODP_MAX_INLINE_BUF)
- do {
- ret_blk(&pool->s, blk);
- blk -= pool->s.seg_size;
- } while (blk >= block_base_addr);
-
- blk_num = odp_atomic_load_u32(&pool->s.blkcount);
-
- /* Initialize pool statistics counters */
- odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0);
- odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0);
- odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0);
- odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0);
- odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0);
- odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0);
- odp_atomic_store_u64(&pool->s.poolstats.buf_high_wm_count, 0);
- odp_atomic_store_u64(&pool->s.poolstats.buf_low_wm_count, 0);
- odp_atomic_store_u64(&pool->s.poolstats.blk_high_wm_count, 0);
- odp_atomic_store_u64(&pool->s.poolstats.blk_low_wm_count, 0);
-
- /* Reset other pool globals to initial state */
- pool->s.buf_low_wm_assert = 0;
- pool->s.blk_low_wm_assert = 0;
- pool->s.quiesced = 0;
- pool->s.headroom = headroom;
- pool->s.tailroom = tailroom;
-
- /* Watermarks are hard-coded for now to control caching */
- pool->s.buf_high_wm = buf_num / 2;
- pool->s.buf_low_wm = buf_num / 4;
- pool->s.blk_high_wm = blk_num / 2;
- pool->s.blk_low_wm = blk_num / 4;
-
- pool_hdl = pool->s.pool_hdl;
- break;
+ pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
}
- return pool_hdl;
-}
+ ring_init(&pool->ring->hdr);
+ init_buffers(pool);
-odp_pool_t odp_pool_create(const char *name,
- odp_pool_param_t *params)
-{
-#ifdef _ODP_PKTIO_IPC
- if (params && (params->type == ODP_POOL_PACKET))
- return _pool_create(name, params, ODP_SHM_PROC);
-#endif
- return _pool_create(name, params, 0);
+ return pool->pool_hdl;
+
+error:
+ if (pool->shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->shm);
+
+ if (pool->uarea_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->uarea_shm);
+ LOCK(&pool->lock);
+ pool->reserved = 0;
+ UNLOCK(&pool->lock);
+ return ODP_POOL_INVALID;
}
-odp_pool_t odp_pool_lookup(const char *name)
+static int check_params(odp_pool_param_t *params)
{
- uint32_t i;
- pool_entry_t *pool;
+ odp_pool_capability_t capa;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
+ odp_pool_capability(&capa);
- POOL_LOCK(&pool->s.lock);
- if (strcmp(name, pool->s.name) == 0) {
- /* found it */
- POOL_UNLOCK(&pool->s.lock);
- return pool->s.pool_hdl;
+ switch (params->type) {
+ case ODP_POOL_BUFFER:
+ if (params->buf.num > capa.buf.max_num) {
+ printf("buf.num too large %u\n", params->buf.num);
+ return -1;
}
- POOL_UNLOCK(&pool->s.lock);
- }
- return ODP_POOL_INVALID;
-}
+ if (params->buf.size > capa.buf.max_size) {
+ printf("buf.size too large %u\n", params->buf.size);
+ return -1;
+ }
-int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
-{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
+ if (params->buf.align > capa.buf.max_align) {
+ printf("buf.align too large %u\n", params->buf.align);
+ return -1;
+ }
- if (pool == NULL || info == NULL)
- return -1;
+ break;
- info->name = pool->s.name;
- info->params = pool->s.params;
+ case ODP_POOL_PACKET:
+ if (params->pkt.len > capa.pkt.max_len) {
+ printf("pkt.len too large %u\n", params->pkt.len);
+ return -1;
+ }
- return 0;
-}
+ if (params->pkt.max_len > capa.pkt.max_len) {
+ printf("pkt.max_len too large %u\n",
+ params->pkt.max_len);
+ return -1;
+ }
-static inline void get_local_cache_bufs(local_cache_t *buf_cache, uint32_t idx,
- odp_buffer_hdr_t *buf_hdr[],
- uint32_t num)
-{
- uint32_t i;
+ if (params->pkt.seg_len > capa.pkt.max_seg_len) {
+ printf("pkt.seg_len too large %u\n",
+ params->pkt.seg_len);
+ return -1;
+ }
+
+ if (params->pkt.uarea_size > capa.pkt.max_uarea_size) {
+ printf("pkt.uarea_size too large %u\n",
+ params->pkt.uarea_size);
+ return -1;
+ }
- for (i = 0; i < num; i++) {
- buf_hdr[i] = buf_cache->s.buf[idx + i];
- odp_prefetch(buf_hdr[i]);
- odp_prefetch_store(buf_hdr[i]);
+ break;
+
+ case ODP_POOL_TIMEOUT:
+ if (params->tmo.num > capa.tmo.max_num) {
+ printf("tmo.num too large %u\n", params->tmo.num);
+ return -1;
+ }
+ break;
+
+ default:
+ printf("bad pool type %i\n", params->type);
+ return -1;
}
+
+ return 0;
}
-static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool)
+odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
{
- uint32_t flush_count = 0;
- uint32_t num;
-
- while ((num = buf_cache->s.num_buf)) {
- odp_buffer_hdr_t *buf;
+ uint32_t shm_flags = 0;
- buf = buf_cache->s.buf[num - 1];
- ret_buf(pool, buf);
- flush_count++;
- buf_cache->s.num_buf--;
- }
+ if (check_params(params))
+ return ODP_POOL_INVALID;
- odp_atomic_add_u64(&pool->poolstats.bufallocs, buf_cache->s.bufallocs);
- odp_atomic_add_u64(&pool->poolstats.buffrees,
- buf_cache->s.buffrees - flush_count);
+#ifdef _ODP_PKTIO_IPC
+ if (params && (params->type == ODP_POOL_PACKET))
+ shm_flags = ODP_SHM_PROC;
+#endif
- buf_cache->s.bufallocs = 0;
- buf_cache->s.buffrees = 0;
+ return pool_create(name, params, shm_flags);
}
int odp_pool_destroy(odp_pool_t pool_hdl)
{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
int i;
if (pool == NULL)
return -1;
- POOL_LOCK(&pool->s.lock);
+ LOCK(&pool->lock);
- /* Call fails if pool is not allocated or predefined*/
- if (pool->s.pool_shm == ODP_SHM_INVALID ||
- pool->s.flags.predefined) {
- POOL_UNLOCK(&pool->s.lock);
- ODP_ERR("invalid shm for pool %s\n", pool->s.name);
+ if (pool->reserved == 0) {
+ UNLOCK(&pool->lock);
+ ODP_ERR("Pool not created\n");
return -1;
}
/* Make sure local caches are empty */
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
- flush_cache(&pool->s.local_cache[i], &pool->s);
-
- /* Call fails if pool has allocated buffers */
- if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.buf_num) {
- POOL_UNLOCK(&pool->s.lock);
- ODP_DBG("error: pool has allocated buffers %d/%d\n",
- odp_atomic_load_u32(&pool->s.bufcount),
- pool->s.buf_num);
- return -1;
- }
+ flush_cache(&pool->local_cache[i], pool);
- odp_shm_free(pool->s.pool_shm);
- pool->s.pool_shm = ODP_SHM_INVALID;
- POOL_UNLOCK(&pool->s.lock);
+ odp_shm_free(pool->shm);
- return 0;
-}
-
-int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount)
-{
- uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- void *newsegs[segcount];
- int i;
-
- for (i = 0; i < segcount; i++) {
- newsegs[i] = get_blk(&pool->s);
- if (newsegs[i] == NULL) {
- while (--i >= 0)
- ret_blk(&pool->s, newsegs[i]);
- return -1;
- }
- }
+ if (pool->uarea_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->uarea_shm);
- for (i = buf_hdr->segcount - 1; i >= 0; i--)
- buf_hdr->addr[i + segcount] = buf_hdr->addr[i];
+ pool->reserved = 0;
+ odp_shm_free(pool->ring_shm);
+ pool->ring = NULL;
+ UNLOCK(&pool->lock);
- for (i = 0; i < segcount; i++)
- buf_hdr->addr[i] = newsegs[i];
-
- buf_hdr->segcount += segcount;
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
return 0;
}
-void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount)
+odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
{
- uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- int s_cnt = buf_hdr->segcount;
- int i;
-
- for (i = 0; i < segcount; i++)
- ret_blk(&pool->s, buf_hdr->addr[i]);
-
- for (i = 0; i < s_cnt - segcount; i++)
- buf_hdr->addr[i] = buf_hdr->addr[i + segcount];
-
- buf_hdr->segcount -= segcount;
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
+ return buf_hdl_to_hdr(buf)->event_type;
}
-int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
{
- uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- uint32_t s_cnt = buf_hdr->segcount;
- int i;
-
- for (i = 0; i < segcount; i++) {
- buf_hdr->addr[s_cnt + i] = get_blk(&pool->s);
- if (buf_hdr->addr[s_cnt + i] == NULL) {
- while (--i >= 0)
- ret_blk(&pool->s, buf_hdr->addr[s_cnt + i]);
- return -1;
- }
- }
-
- buf_hdr->segcount += segcount;
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
- return 0;
+ buf_hdl_to_hdr(buf)->event_type = ev;
}
-void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+odp_pool_t odp_pool_lookup(const char *name)
{
- uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- int s_cnt = buf_hdr->segcount;
- int i;
+ uint32_t i;
+ pool_t *pool;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool = pool_entry(i);
- for (i = s_cnt - 1; i >= s_cnt - segcount; i--)
- ret_blk(&pool->s, buf_hdr->addr[i]);
+ LOCK(&pool->lock);
+ if (strcmp(name, pool->name) == 0) {
+ /* found it */
+ UNLOCK(&pool->lock);
+ return pool->pool_hdl;
+ }
+ UNLOCK(&pool->lock);
+ }
- buf_hdr->segcount -= segcount;
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
+ return ODP_POOL_INVALID;
}
-static inline int get_local_bufs(local_cache_t *buf_cache,
- odp_buffer_hdr_t *buf_hdr[], uint32_t max_num)
+int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
{
- uint32_t num_buf = buf_cache->s.num_buf;
- uint32_t num = num_buf;
-
- if (odp_unlikely(num_buf == 0))
- return 0;
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
- if (odp_likely(max_num < num))
- num = max_num;
+ if (pool == NULL || info == NULL)
+ return -1;
- get_local_cache_bufs(buf_cache, num_buf - num, buf_hdr, num);
- buf_cache->s.num_buf -= num;
- buf_cache->s.bufallocs += num;
+ info->name = pool->name;
+ info->params = pool->params;
- return num;
+ return 0;
}
-static inline void ret_local_buf(local_cache_t *buf_cache, uint32_t idx,
- odp_buffer_hdr_t *buf)
+int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
+ odp_buffer_hdr_t *buf_hdr[], int max_num)
{
- buf_cache->s.buf[idx] = buf;
- buf_cache->s.num_buf++;
- buf_cache->s.buffrees++;
-}
+ ring_t *ring;
+ uint32_t mask, i;
+ pool_cache_t *cache;
+ uint32_t cache_num, num_ch, num_deq, burst;
+ odp_buffer_hdr_t *hdr;
-static inline void ret_local_bufs(local_cache_t *buf_cache, uint32_t idx,
- odp_buffer_hdr_t *buf[], int num_buf)
-{
- int i;
+ cache = local.cache[pool->pool_idx];
- for (i = 0; i < num_buf; i++)
- buf_cache->s.buf[idx + i] = buf[i];
+ cache_num = cache->num;
+ num_ch = max_num;
+ num_deq = 0;
+ burst = CACHE_BURST;
- buf_cache->s.num_buf += num_buf;
- buf_cache->s.buffrees += num_buf;
-}
+ if (odp_unlikely(cache_num < (uint32_t)max_num)) {
+ /* Cache does not have enough buffers */
+ num_ch = cache_num;
+ num_deq = max_num - cache_num;
-int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size,
- odp_buffer_t buf[], int max_num)
-{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom;
- odp_buffer_hdr_t *buf_tbl[max_num];
- odp_buffer_hdr_t *buf_hdr;
- int num, i;
- intmax_t needed;
- void *blk;
-
- /* Reject oversized allocation requests */
- if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
- (!pool->s.flags.unsegmented &&
- totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG))
- return 0;
+ if (odp_unlikely(num_deq > CACHE_BURST))
+ burst = num_deq;
+ }
- /* Try to satisfy request from the local cache */
- num = get_local_bufs(local.cache[pool_id], buf_tbl, max_num);
-
- /* If cache is empty, satisfy request from the pool */
- if (odp_unlikely(num < max_num)) {
- for (; num < max_num; num++) {
- buf_hdr = get_buf(&pool->s);
-
- if (odp_unlikely(buf_hdr == NULL))
- goto pool_empty;
-
- /* Get blocks for this buffer, if pool uses
- * application data */
- if (buf_hdr->size < totsize) {
- uint32_t segcount;
-
- needed = totsize - buf_hdr->size;
- do {
- blk = get_blk(&pool->s);
- if (odp_unlikely(blk == NULL)) {
- ret_buf(&pool->s, buf_hdr);
- goto pool_empty;
- }
-
- segcount = buf_hdr->segcount++;
- buf_hdr->addr[segcount] = blk;
- needed -= pool->s.seg_size;
- } while (needed > 0);
- buf_hdr->size = buf_hdr->segcount *
- pool->s.seg_size;
- }
+ /* Get buffers from the cache */
+ for (i = 0; i < num_ch; i++) {
+ buf[i] = cache->buf[cache_num - num_ch + i];
- buf_tbl[num] = buf_hdr;
- }
+ if (odp_likely(buf_hdr != NULL))
+ buf_hdr[i] = pool_buf_hdl_to_hdr(pool, buf[i]);
}
-pool_empty:
- for (i = 0; i < num; i++) {
- buf_hdr = buf_tbl[i];
+ /* If needed, get more from the global pool */
+ if (odp_unlikely(num_deq)) {
+ /* Temporary copy needed since odp_buffer_t is uintptr_t
+ * and not uint32_t. */
+ uint32_t data[burst];
- /* Mark buffer as allocated */
- buf_hdr->allocator = local.thr_id;
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
+ burst = ring_deq_multi(ring, mask, data, burst);
+ cache_num = burst - num_deq;
- /* By default, buffers are not associated with
- * an ordered queue */
- buf_hdr->origin_qe = NULL;
+ if (odp_unlikely(burst < num_deq)) {
+ num_deq = burst;
+ cache_num = 0;
+ }
- buf[i] = odp_hdr_to_buf(buf_hdr);
+ for (i = 0; i < num_deq; i++) {
+ uint32_t idx = num_ch + i;
- /* Add more segments if buffer from local cache is too small */
- if (odp_unlikely(buf_hdr->size < totsize)) {
- needed = totsize - buf_hdr->size;
- do {
- blk = get_blk(&pool->s);
- if (odp_unlikely(blk == NULL)) {
- int j;
+ buf[idx] = (odp_buffer_t)(uintptr_t)data[i];
+ hdr = pool_buf_hdl_to_hdr(pool, buf[idx]);
+ odp_prefetch(hdr);
- ret_buf(&pool->s, buf_hdr);
- buf_hdr = NULL;
- local.cache[pool_id]->s.buffrees--;
+ if (odp_likely(buf_hdr != NULL))
+ buf_hdr[idx] = hdr;
+ }
- /* move remaining bufs up one step
- * and update loop counters */
- num--;
- for (j = i; j < num; j++)
- buf_tbl[j] = buf_tbl[j + 1];
+ /* Cache extra buffers. Cache is currently empty. */
+ for (i = 0; i < cache_num; i++)
+ cache->buf[i] = (odp_buffer_t)
+ (uintptr_t)data[num_deq + i];
- i--;
- break;
- }
- needed -= pool->s.seg_size;
- buf_hdr->addr[buf_hdr->segcount++] = blk;
- buf_hdr->size = buf_hdr->segcount *
- pool->s.seg_size;
- } while (needed > 0);
- }
+ cache->num = cache_num;
+ } else {
+ cache->num = cache_num - num_ch;
}
- return num;
+ return num_ch + num_deq;
}
-odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size)
+static inline void buffer_free_to_pool(uint32_t pool_id,
+ const odp_buffer_t buf[], int num)
{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom;
- odp_buffer_hdr_t *buf_hdr;
- intmax_t needed;
- void *blk;
-
- /* Reject oversized allocation requests */
- if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
- (!pool->s.flags.unsegmented &&
- totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG))
- return 0;
+ pool_t *pool;
+ int i;
+ ring_t *ring;
+ uint32_t mask;
+ pool_cache_t *cache;
+ uint32_t cache_num;
+
+ cache = local.cache[pool_id];
+ pool = pool_entry(pool_id);
+
+ /* Special case of a very large free. Move directly to
+ * the global pool. */
+ if (odp_unlikely(num > CONFIG_POOL_CACHE_SIZE)) {
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
+ for (i = 0; i < num; i++)
+ ring_enq(ring, mask, (uint32_t)(uintptr_t)buf[i]);
- /* Try to satisfy request from the local cache. If cache is empty,
- * satisfy request from the pool */
- if (odp_unlikely(!get_local_bufs(local.cache[pool_id], &buf_hdr, 1))) {
- buf_hdr = get_buf(&pool->s);
-
- if (odp_unlikely(buf_hdr == NULL))
- return ODP_BUFFER_INVALID;
-
- /* Get blocks for this buffer, if pool uses application data */
- if (buf_hdr->size < totsize) {
- needed = totsize - buf_hdr->size;
- do {
- blk = get_blk(&pool->s);
- if (odp_unlikely(blk == NULL)) {
- ret_buf(&pool->s, buf_hdr);
- return ODP_BUFFER_INVALID;
- }
- buf_hdr->addr[buf_hdr->segcount++] = blk;
- needed -= pool->s.seg_size;
- } while (needed > 0);
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
- }
- }
- /* Mark buffer as allocated */
- buf_hdr->allocator = local.thr_id;
-
- /* By default, buffers are not associated with
- * an ordered queue */
- buf_hdr->origin_qe = NULL;
-
- /* Add more segments if buffer from local cache is too small */
- if (odp_unlikely(buf_hdr->size < totsize)) {
- needed = totsize - buf_hdr->size;
- do {
- blk = get_blk(&pool->s);
- if (odp_unlikely(blk == NULL)) {
- ret_buf(&pool->s, buf_hdr);
- buf_hdr = NULL;
- local.cache[pool_id]->s.buffrees--;
- return ODP_BUFFER_INVALID;
- }
- buf_hdr->addr[buf_hdr->segcount++] = blk;
- needed -= pool->s.seg_size;
- } while (needed > 0);
- buf_hdr->size = buf_hdr->segcount * pool->s.seg_size;
+ return;
}
- return odp_hdr_to_buf(buf_hdr);
-}
+ /* Make room into local cache if needed. Do at least burst size
+ * transfer. */
+ cache_num = cache->num;
-odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
-{
- return buffer_alloc(pool_hdl,
- odp_pool_to_entry(pool_hdl)->s.params.buf.size);
-}
+ if (odp_unlikely((int)(CONFIG_POOL_CACHE_SIZE - cache_num) < num)) {
+ uint32_t index;
+ int burst = CACHE_BURST;
-int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
-{
- size_t buf_size = odp_pool_to_entry(pool_hdl)->s.params.buf.size;
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
- return buffer_alloc_multi(pool_hdl, buf_size, buf, num);
-}
+ if (odp_unlikely(num > CACHE_BURST))
+ burst = num;
-static void multi_pool_free(odp_buffer_hdr_t *buf_hdr[], int num_buf)
-{
- uint32_t pool_id, num;
- local_cache_t *buf_cache;
- pool_entry_t *pool;
- int i, j, idx;
-
- for (i = 0; i < num_buf; i++) {
- pool_id = pool_handle_to_index(buf_hdr[i]->pool_hdl);
- buf_cache = local.cache[pool_id];
- num = buf_cache->s.num_buf;
-
- if (num < POOL_MAX_LOCAL_BUFS) {
- ret_local_buf(buf_cache, num, buf_hdr[i]);
- continue;
- }
+ {
+ /* Temporary copy needed since odp_buffer_t is
+ * uintptr_t and not uint32_t. */
+ uint32_t data[burst];
- idx = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE;
- pool = get_pool_entry(pool_id);
+ index = cache_num - burst;
- /* local cache full, return a chunk */
- for (j = 0; j < POOL_CHUNK_SIZE; j++) {
- odp_buffer_hdr_t *tmp;
+ for (i = 0; i < burst; i++)
+ data[i] = (uint32_t)
+ (uintptr_t)cache->buf[index + i];
- tmp = buf_cache->s.buf[idx + i];
- ret_buf(&pool->s, tmp);
+ ring_enq_multi(ring, mask, data, burst);
}
- num = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE;
- buf_cache->s.num_buf = num;
- ret_local_buf(buf_cache, num, buf_hdr[i]);
+ cache_num -= burst;
}
+
+ for (i = 0; i < num; i++)
+ cache->buf[cache_num + i] = buf[i];
+
+ cache->num = cache_num + num;
}
-void buffer_free_multi(uint32_t pool_id,
- const odp_buffer_t buf[], int num_free)
+void buffer_free_multi(const odp_buffer_t buf[], int num_total)
{
- local_cache_t *buf_cache = local.cache[pool_id];
- uint32_t num;
- int i, idx;
- pool_entry_t *pool;
- odp_buffer_hdr_t *buf_hdr[num_free];
- int multi_pool = 0;
-
- for (i = 0; i < num_free; i++) {
- uint32_t id;
-
- buf_hdr[i] = odp_buf_to_hdr(buf[i]);
- ODP_ASSERT(buf_hdr[i]->allocator != ODP_FREEBUF);
- buf_hdr[i]->allocator = ODP_FREEBUF;
- id = pool_handle_to_index(buf_hdr[i]->pool_hdl);
- multi_pool |= (pool_id != id);
- }
+ uint32_t pool_id;
+ int num;
+ int i;
+ int first = 0;
- if (odp_unlikely(multi_pool)) {
- multi_pool_free(buf_hdr, num_free);
- return;
- }
+ while (1) {
+ num = 1;
+ i = 1;
+ pool_id = pool_id_from_buf(buf[first]);
- num = buf_cache->s.num_buf;
+ /* 'num' buffers are from the same pool */
+ if (num_total > 1) {
+ for (i = first; i < num_total; i++)
+ if (pool_id != pool_id_from_buf(buf[i]))
+ break;
- if (odp_likely((num + num_free) < POOL_MAX_LOCAL_BUFS)) {
- ret_local_bufs(buf_cache, num, buf_hdr, num_free);
- return;
- }
+ num = i - first;
+ }
- pool = get_pool_entry(pool_id);
+ buffer_free_to_pool(pool_id, &buf[first], num);
- /* Return at least one chunk into the global pool */
- if (odp_unlikely(num_free > POOL_CHUNK_SIZE)) {
- for (i = 0; i < num_free; i++)
- ret_buf(&pool->s, buf_hdr[i]);
+ if (i == num_total)
+ return;
- return;
+ first = i;
}
-
- idx = num - POOL_CHUNK_SIZE;
- for (i = 0; i < POOL_CHUNK_SIZE; i++)
- ret_buf(&pool->s, buf_cache->s.buf[idx + i]);
-
- num -= POOL_CHUNK_SIZE;
- buf_cache->s.num_buf = num;
- ret_local_bufs(buf_cache, num, buf_hdr, num_free);
}
-void buffer_free(uint32_t pool_id, const odp_buffer_t buf)
+odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
{
- local_cache_t *buf_cache = local.cache[pool_id];
- uint32_t num;
- int i;
- pool_entry_t *pool;
- odp_buffer_hdr_t *buf_hdr;
+ odp_buffer_t buf;
+ pool_t *pool;
+ int ret;
- buf_hdr = odp_buf_to_hdr(buf);
- ODP_ASSERT(buf_hdr->allocator != ODP_FREEBUF);
- buf_hdr->allocator = ODP_FREEBUF;
+ pool = pool_entry_from_hdl(pool_hdl);
+ ret = buffer_alloc_multi(pool, &buf, NULL, 1);
- num = buf_cache->s.num_buf;
+ if (odp_likely(ret == 1))
+ return buf;
- if (odp_likely((num + 1) < POOL_MAX_LOCAL_BUFS)) {
- ret_local_bufs(buf_cache, num, &buf_hdr, 1);
- return;
- }
+ return ODP_BUFFER_INVALID;
+}
- pool = get_pool_entry(pool_id);
+int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
+{
+ pool_t *pool;
- num -= POOL_CHUNK_SIZE;
- for (i = 0; i < POOL_CHUNK_SIZE; i++)
- ret_buf(&pool->s, buf_cache->s.buf[num + i]);
+ pool = pool_entry_from_hdl(pool_hdl);
- buf_cache->s.num_buf = num;
- ret_local_bufs(buf_cache, num, &buf_hdr, 1);
+ return buffer_alloc_multi(pool, buf, NULL, num);
}
void odp_buffer_free(odp_buffer_t buf)
{
- uint32_t pool_id = pool_id_from_buf(buf);
-
- buffer_free(pool_id, buf);
+ buffer_free_multi(&buf, 1);
}
void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
{
- uint32_t pool_id = pool_id_from_buf(buf[0]);
+ buffer_free_multi(buf, num);
+}
+
+int odp_pool_capability(odp_pool_capability_t *capa)
+{
+ uint32_t max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+
+ memset(capa, 0, sizeof(odp_pool_capability_t));
+
+ capa->max_pools = ODP_CONFIG_POOLS;
+
+ /* Buffer pools */
+ capa->buf.max_pools = ODP_CONFIG_POOLS;
+ capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
+ capa->buf.max_size = MAX_SIZE;
+ capa->buf.max_num = CONFIG_POOL_MAX_NUM;
- buffer_free_multi(pool_id, buf, num);
+ /* Packet pools */
+ capa->pkt.max_pools = ODP_CONFIG_POOLS;
+ capa->pkt.max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len;
+ capa->pkt.max_num = CONFIG_POOL_MAX_NUM;
+ capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM;
+ capa->pkt.max_segs_per_pkt = CONFIG_PACKET_MAX_SEGS;
+ capa->pkt.min_seg_len = max_seg_len;
+ capa->pkt.max_seg_len = max_seg_len;
+ capa->pkt.max_uarea_size = MAX_SIZE;
+
+ /* Timeout pools */
+ capa->tmo.max_pools = ODP_CONFIG_POOLS;
+ capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+
+ return 0;
}
void odp_pool_print(odp_pool_t pool_hdl)
{
- pool_entry_t *pool;
- uint32_t pool_id;
+ pool_t *pool;
- pool_id = pool_handle_to_index(pool_hdl);
- pool = get_pool_entry(pool_id);
-
- uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount);
- uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount);
- uint64_t bufallocs = odp_atomic_load_u64(&pool->s.poolstats.bufallocs);
- uint64_t buffrees = odp_atomic_load_u64(&pool->s.poolstats.buffrees);
- uint64_t blkallocs = odp_atomic_load_u64(&pool->s.poolstats.blkallocs);
- uint64_t blkfrees = odp_atomic_load_u64(&pool->s.poolstats.blkfrees);
- uint64_t bufempty = odp_atomic_load_u64(&pool->s.poolstats.bufempty);
- uint64_t blkempty = odp_atomic_load_u64(&pool->s.poolstats.blkempty);
- uint64_t bufhiwmct =
- odp_atomic_load_u64(&pool->s.poolstats.buf_high_wm_count);
- uint64_t buflowmct =
- odp_atomic_load_u64(&pool->s.poolstats.buf_low_wm_count);
- uint64_t blkhiwmct =
- odp_atomic_load_u64(&pool->s.poolstats.blk_high_wm_count);
- uint64_t blklowmct =
- odp_atomic_load_u64(&pool->s.poolstats.blk_low_wm_count);
-
- ODP_DBG("Pool info\n");
- ODP_DBG("---------\n");
- ODP_DBG(" pool %" PRIu64 "\n",
- odp_pool_to_u64(pool->s.pool_hdl));
- ODP_DBG(" name %s\n",
- pool->s.flags.has_name ? pool->s.name : "Unnamed Pool");
- ODP_DBG(" pool type %s\n",
- pool->s.params.type == ODP_POOL_BUFFER ? "buffer" :
- (pool->s.params.type == ODP_POOL_PACKET ? "packet" :
- (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" :
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ printf("\nPool info\n");
+ printf("---------\n");
+ printf(" pool %" PRIu64 "\n",
+ odp_pool_to_u64(pool->pool_hdl));
+ printf(" name %s\n", pool->name);
+ printf(" pool type %s\n",
+ pool->params.type == ODP_POOL_BUFFER ? "buffer" :
+ (pool->params.type == ODP_POOL_PACKET ? "packet" :
+ (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" :
"unknown")));
- ODP_DBG(" pool storage ODP managed shm handle %" PRIu64 "\n",
- odp_shm_to_u64(pool->s.pool_shm));
- ODP_DBG(" pool status %s\n",
- pool->s.quiesced ? "quiesced" : "active");
- ODP_DBG(" pool opts %s, %s\n",
- pool->s.flags.unsegmented ? "unsegmented" : "segmented",
- pool->s.flags.predefined ? "predefined" : "created");
- ODP_DBG(" pool base %p\n", pool->s.pool_base_addr);
- ODP_DBG(" pool size %zu (%zu pages)\n",
- pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE);
- ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr);
- ODP_DBG(" udata size %zu\n", pool->s.udata_size);
- ODP_DBG(" headroom %u\n", pool->s.headroom);
- ODP_DBG(" tailroom %u\n", pool->s.tailroom);
- if (pool->s.params.type == ODP_POOL_BUFFER) {
- ODP_DBG(" buf size %zu\n", pool->s.params.buf.size);
- ODP_DBG(" buf align %u requested, %u used\n",
- pool->s.params.buf.align, pool->s.buf_align);
- } else if (pool->s.params.type == ODP_POOL_PACKET) {
- ODP_DBG(" seg length %u requested, %u used\n",
- pool->s.params.pkt.seg_len, pool->s.seg_size);
- ODP_DBG(" pkt length %u requested, %u used\n",
- pool->s.params.pkt.len, pool->s.blk_size);
- }
- ODP_DBG(" num bufs %u\n", pool->s.buf_num);
- ODP_DBG(" bufs available %u %s\n", bufcount,
- pool->s.buf_low_wm_assert ? " **buf low wm asserted**" : "");
- ODP_DBG(" bufs in use %u\n", pool->s.buf_num - bufcount);
- ODP_DBG(" buf allocs %lu\n", bufallocs);
- ODP_DBG(" buf frees %lu\n", buffrees);
- ODP_DBG(" buf empty %lu\n", bufempty);
- ODP_DBG(" blk size %zu\n",
- pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0);
- ODP_DBG(" blks available %u %s\n", blkcount,
- pool->s.blk_low_wm_assert ? " **blk low wm asserted**" : "");
- ODP_DBG(" blk allocs %lu\n", blkallocs);
- ODP_DBG(" blk frees %lu\n", blkfrees);
- ODP_DBG(" blk empty %lu\n", blkempty);
- ODP_DBG(" buf high wm value %lu\n", pool->s.buf_high_wm);
- ODP_DBG(" buf high wm count %lu\n", bufhiwmct);
- ODP_DBG(" buf low wm value %lu\n", pool->s.buf_low_wm);
- ODP_DBG(" buf low wm count %lu\n", buflowmct);
- ODP_DBG(" blk high wm value %lu\n", pool->s.blk_high_wm);
- ODP_DBG(" blk high wm count %lu\n", blkhiwmct);
- ODP_DBG(" blk low wm value %lu\n", pool->s.blk_low_wm);
- ODP_DBG(" blk low wm count %lu\n", blklowmct);
+ printf(" pool shm %" PRIu64 "\n",
+ odp_shm_to_u64(pool->shm));
+ printf(" user area shm %" PRIu64 "\n",
+ odp_shm_to_u64(pool->uarea_shm));
+ printf(" num %u\n", pool->num);
+ printf(" align %u\n", pool->align);
+ printf(" headroom %u\n", pool->headroom);
+ printf(" data size %u\n", pool->data_size);
+ printf(" max data len %u\n", pool->max_len);
+ printf(" max seg len %u\n", pool->max_seg_len);
+ printf(" tailroom %u\n", pool->tailroom);
+ printf(" block size %u\n", pool->block_size);
+ printf(" uarea size %u\n", pool->uarea_size);
+ printf(" shm size %u\n", pool->shm_size);
+ printf(" base addr %p\n", pool->base_addr);
+ printf(" uarea shm size %u\n", pool->uarea_shm_size);
+ printf(" uarea base addr %p\n", pool->uarea_base_addr);
+ printf("\n");
}
odp_pool_t odp_buffer_pool(odp_buffer_t buf)
@@ -1158,3 +875,39 @@ void odp_pool_param_init(odp_pool_param_t *params)
{
memset(params, 0, sizeof(odp_pool_param_t));
}
+
+uint64_t odp_pool_to_u64(odp_pool_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+{
+ (void)buf_hdr;
+ (void)segcount;
+ return 0;
+}
+
+void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+{
+ (void)buf_hdr;
+ (void)segcount;
+}
+
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ odp_buffer_bits_t handle;
+ pool_t *pool;
+
+ handle.handle = buf;
+
+ if (handle.pool_id >= ODP_CONFIG_POOLS)
+ return 0;
+
+ pool = pool_entry(handle.pool_id);
+
+ if (pool->reserved == 0)
+ return 0;
+
+ return 1;
+}
diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
index 00070b536..e0a969929 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -23,7 +23,6 @@
#include <odp/api/hints.h>
#include <odp/api/sync.h>
#include <odp/api/traffic_mngr.h>
-#include <odp_schedule_ordered_internal.h>
#define NUM_INTERNAL_QUEUES 64
@@ -64,16 +63,30 @@ queue_entry_t *get_qentry(uint32_t queue_id)
static int queue_init(queue_entry_t *queue, const char *name,
const odp_queue_param_t *param)
{
- strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
-
+ if (name == NULL) {
+ queue->s.name[0] = 0;
+ } else {
+ strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ }
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count >
- SCHEDULE_ORDERED_LOCKS_PER_QUEUE)
+ if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
return -1;
- if (param->type == ODP_QUEUE_TYPE_SCHED)
+ if (param->type == ODP_QUEUE_TYPE_SCHED) {
queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
+ if (param->sched.sync == ODP_SCHED_SYNC_ORDERED) {
+ unsigned i;
+
+ odp_atomic_init_u64(&queue->s.ordered.ctx, 0);
+ odp_atomic_init_u64(&queue->s.ordered.next_ctx, 0);
+
+ for (i = 0; i < queue->s.param.sched.lock_count; i++)
+ odp_atomic_init_u64(&queue->s.ordered.lock[i],
+ 0);
+ }
+ }
queue->s.type = queue->s.param.type;
queue->s.enqueue = queue_enq;
@@ -86,16 +99,13 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->s.head = NULL;
queue->s.tail = NULL;
- queue->s.reorder_head = NULL;
- queue->s.reorder_tail = NULL;
-
return 0;
}
int odp_queue_init_global(void)
{
- uint32_t i, j;
+ uint32_t i;
odp_shm_t shm;
ODP_DBG("Queue init ... ");
@@ -115,10 +125,6 @@ int odp_queue_init_global(void)
/* init locks */
queue_entry_t *queue = get_qentry(i);
LOCK_INIT(&queue->s.lock);
- for (j = 0; j < SCHEDULE_ORDERED_LOCKS_PER_QUEUE; j++) {
- odp_atomic_init_u64(&queue->s.sync_in[j], 0);
- odp_atomic_init_u64(&queue->s.sync_out[j], 0);
- }
queue->s.index = i;
queue->s.handle = queue_from_id(i);
}
@@ -166,7 +172,7 @@ int odp_queue_capability(odp_queue_capability_t *capa)
/* Reserve some queues for internal use */
capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
- capa->max_ordered_locks = SCHEDULE_ORDERED_LOCKS_PER_QUEUE;
+ capa->max_ordered_locks = sched_fn->max_ordered_locks();
capa->max_sched_groups = sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
@@ -310,10 +316,11 @@ int odp_queue_destroy(odp_queue_t handle)
ODP_ERR("queue \"%s\" not empty\n", queue->s.name);
return -1;
}
- if (queue_is_ordered(queue) && queue->s.reorder_head) {
+ if (queue_is_ordered(queue) &&
+ odp_atomic_load_u64(&queue->s.ordered.ctx) !=
+ odp_atomic_load_u64(&queue->s.ordered.next_ctx)) {
UNLOCK(&queue->s.lock);
- ODP_ERR("queue \"%s\" reorder queue not empty\n",
- queue->s.name);
+ ODP_ERR("queue \"%s\" reorder incomplete\n", queue->s.name);
return -1;
}
@@ -379,15 +386,14 @@ odp_queue_t odp_queue_lookup(const char *name)
}
static inline int enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num, int sustain)
+ int num)
{
int sched = 0;
int i, ret;
odp_buffer_hdr_t *hdr, *tail, *next_hdr;
- /* Ordered queues do not use bursts */
if (sched_fn->ord_enq_multi(queue->s.index, (void **)buf_hdr, num,
- sustain, &ret))
+ &ret))
return ret;
/* Optimize the common case of single enqueue */
@@ -395,12 +401,14 @@ static inline int enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
tail = buf_hdr[0];
hdr = tail;
hdr->burst_num = 0;
+ hdr->next = NULL;
} else {
int next;
/* Start from the last buffer header */
tail = buf_hdr[num - 1];
hdr = tail;
+ hdr->next = NULL;
next = num - 2;
while (1) {
@@ -453,17 +461,16 @@ static inline int enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
return num; /* All events enqueued */
}
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num,
- int sustain)
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
- return enq_multi(queue, buf_hdr, num, sustain);
+ return enq_multi(queue, buf_hdr, num);
}
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int sustain)
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
{
int ret;
- ret = enq_multi(queue, &buf_hdr, 1, sustain);
+ ret = enq_multi(queue, &buf_hdr, 1);
if (ret == 1)
return 0;
@@ -483,10 +490,10 @@ int odp_queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
queue = queue_to_qentry(handle);
for (i = 0; i < num; i++)
- buf_hdr[i] = odp_buf_to_hdr(odp_buffer_from_event(ev[i]));
+ buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
return num == 0 ? 0 : queue->s.enqueue_multi(queue, buf_hdr,
- num, SUSTAIN_ORDER);
+ num);
}
int odp_queue_enq(odp_queue_t handle, odp_event_t ev)
@@ -495,12 +502,9 @@ int odp_queue_enq(odp_queue_t handle, odp_event_t ev)
queue_entry_t *queue;
queue = queue_to_qentry(handle);
- buf_hdr = odp_buf_to_hdr(odp_buffer_from_event(ev));
+ buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
- /* No chains via this entry */
- buf_hdr->link = NULL;
-
- return queue->s.enqueue(queue, buf_hdr, SUSTAIN_ORDER);
+ return queue->s.enqueue(queue, buf_hdr);
}
static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
@@ -557,22 +561,6 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
i++;
}
- /* Ordered queue book keeping inside the lock */
- if (queue_is_ordered(queue)) {
- for (j = 0; j < i; j++) {
- uint32_t k;
-
- buf_hdr[j]->origin_qe = queue;
- buf_hdr[j]->order = queue->s.order_in++;
- for (k = 0; k < queue->s.param.sched.lock_count; k++) {
- buf_hdr[j]->sync[k] =
- odp_atomic_fetch_inc_u64
- (&queue->s.sync_in[k]);
- }
- buf_hdr[j]->flags.sustain = SUSTAIN_ORDER;
- }
- }
-
/* Write head only if updated */
if (updated)
queue->s.head = hdr;
@@ -583,11 +571,6 @@ static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
UNLOCK(&queue->s.lock);
- /* Init origin_qe for non-ordered queues */
- if (!queue_is_ordered(queue))
- for (j = 0; j < i; j++)
- buf_hdr[j]->origin_qe = NULL;
-
return i;
}
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 81e79c965..645630a7a 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -17,12 +17,11 @@
#include <odp/api/hints.h>
#include <odp/api/cpu.h>
#include <odp/api/thrmask.h>
-#include <odp/api/atomic.h>
#include <odp_config_internal.h>
#include <odp_align_internal.h>
-#include <odp_schedule_internal.h>
-#include <odp_schedule_ordered_internal.h>
#include <odp/api/sync.h>
+#include <odp_ring_internal.h>
+#include <odp_queue_internal.h>
/* Number of priority levels */
#define NUM_PRIO 8
@@ -82,9 +81,6 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
/* Priority queue empty, not a valid queue index. */
#define PRIO_QUEUE_EMPTY ((uint32_t)-1)
-/* Ring empty, not a valid index. */
-#define RING_EMPTY ((uint32_t)-1)
-
/* For best performance, the number of queues should be a power of two. */
ODP_STATIC_ASSERT(ODP_VAL_IS_POWER_2(ODP_CONFIG_QUEUES),
"Number_of_queues_is_not_power_of_two");
@@ -111,28 +107,62 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= QUEUES_PER_PRIO,
/* Start of named groups in group mask arrays */
#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-/* Scheduler ring
- *
- * Ring stores head and tail counters. Ring indexes are formed from these
- * counters with a mask (mask = ring_size - 1), which requires that ring size
- * must be a power of two. */
+/* Maximum number of dequeues */
+#define MAX_DEQ CONFIG_BURST_SIZE
+
+/* Maximum number of ordered locks per queue */
+#define MAX_ORDERED_LOCKS_PER_QUEUE 2
+
+ODP_STATIC_ASSERT(MAX_ORDERED_LOCKS_PER_QUEUE <= CONFIG_QUEUE_MAX_ORD_LOCKS,
+ "Too_many_ordered_locks");
+
+/* Ordered stash size */
+#define MAX_ORDERED_STASH 512
+
+/* Storage for stashed enqueue operation arguments */
typedef struct {
- /* Writer head and tail */
- odp_atomic_u32_t w_head;
- odp_atomic_u32_t w_tail;
- uint8_t pad[ODP_CACHE_LINE_SIZE - (2 * sizeof(odp_atomic_u32_t))];
+ odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+ queue_entry_t *queue;
+ int num;
+} ordered_stash_t;
- /* Reader head and tail */
- odp_atomic_u32_t r_head;
- odp_atomic_u32_t r_tail;
+/* Ordered lock states */
+typedef union {
+ uint8_t u8[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ uint32_t all;
+} lock_called_t;
- uint32_t data[0];
-} sched_ring_t ODP_ALIGNED_CACHE;
+ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
+ "Lock_called_values_do_not_fit_in_uint32");
+
+/* Scheduler local data */
+typedef struct {
+ int thr;
+ int num;
+ int index;
+ int pause;
+ uint16_t round;
+ uint16_t prefer_offset;
+ uint16_t pktin_polls;
+ uint32_t queue_index;
+ odp_queue_t queue;
+ odp_event_t ev_stash[MAX_DEQ];
+ struct {
+ queue_entry_t *src_queue; /**< Source queue entry */
+ uint64_t ctx; /**< Ordered context id */
+ int stash_num; /**< Number of stashed enqueue operations */
+ uint8_t in_order; /**< Order status */
+ lock_called_t lock_called; /**< States of ordered locks */
+ /** Storage for stashed enqueue operations */
+ ordered_stash_t stash[MAX_ORDERED_STASH];
+ } ordered;
+
+} sched_local_t;
/* Priority queue */
typedef struct {
/* Ring header */
- sched_ring_t ring;
+ ring_t ring;
/* Ring data: queue indexes */
uint32_t queue_index[PRIO_QUEUE_RING_SIZE];
@@ -142,7 +172,7 @@ typedef struct {
/* Packet IO queue */
typedef struct {
/* Ring header */
- sched_ring_t ring;
+ ring_t ring;
/* Ring data: pktio poll command indexes */
uint32_t cmd_index[PKTIO_RING_SIZE];
@@ -181,6 +211,7 @@ typedef struct {
struct {
char name[ODP_SCHED_GROUP_NAME_LEN];
odp_thrmask_t mask;
+ int allocated;
} sched_grp[NUM_SCHED_GRPS];
struct {
@@ -204,71 +235,6 @@ __thread sched_local_t sched_local;
/* Function prototypes */
static inline void schedule_release_context(void);
-static void ring_init(sched_ring_t *ring)
-{
- odp_atomic_init_u32(&ring->w_head, 0);
- odp_atomic_init_u32(&ring->w_tail, 0);
- odp_atomic_init_u32(&ring->r_head, 0);
- odp_atomic_init_u32(&ring->r_tail, 0);
-}
-
-/* Dequeue data from the ring head */
-static inline uint32_t ring_deq(sched_ring_t *ring, uint32_t mask)
-{
- uint32_t head, tail, new_head;
- uint32_t data;
-
- head = odp_atomic_load_u32(&ring->r_head);
-
- /* Move reader head. This thread owns data at the new head. */
- do {
- tail = odp_atomic_load_u32(&ring->w_tail);
-
- if (head == tail)
- return RING_EMPTY;
-
- new_head = head + 1;
-
- } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head,
- new_head) == 0));
-
- /* Read queue index */
- data = ring->data[new_head & mask];
-
- /* Wait until other readers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head))
- odp_cpu_pause();
-
- /* Now update the reader tail */
- odp_atomic_store_rel_u32(&ring->r_tail, new_head);
-
- return data;
-}
-
-/* Enqueue data into the ring tail */
-static inline void ring_enq(sched_ring_t *ring, uint32_t mask, uint32_t data)
-{
- uint32_t old_head, new_head;
-
- /* Reserve a slot in the ring for writing */
- old_head = odp_atomic_fetch_inc_u32(&ring->w_head);
- new_head = old_head + 1;
-
- /* Ring is full. Wait for the last reader to finish. */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head))
- odp_cpu_pause();
-
- /* Write data */
- ring->data[new_head & mask] = data;
-
- /* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
- odp_cpu_pause();
-
- /* Now update the writer tail */
- odp_atomic_store_rel_u32(&ring->w_tail, new_head);
-}
-
static void sched_local_init(void)
{
memset(&sched_local, 0, sizeof(sched_local_t));
@@ -346,7 +312,7 @@ static int schedule_term_global(void)
for (i = 0; i < NUM_PRIO; i++) {
for (j = 0; j < QUEUES_PER_PRIO; j++) {
- sched_ring_t *ring = &sched->prio_q[i][j].ring;
+ ring_t *ring = &sched->prio_q[i][j].ring;
uint32_t qi;
while ((qi = ring_deq(ring, PRIO_QUEUE_MASK)) !=
@@ -391,6 +357,11 @@ static int schedule_term_local(void)
return 0;
}
+static unsigned schedule_max_ordered_locks(void)
+{
+ return MAX_ORDERED_LOCKS_PER_QUEUE;
+}
+
static inline int queue_per_prio(uint32_t queue_index)
{
return ((QUEUES_PER_PRIO - 1) & queue_index);
@@ -540,7 +511,7 @@ static void schedule_release_atomic(void)
if (qi != PRIO_QUEUE_EMPTY && sched_local.num == 0) {
int prio = sched->queue[qi].prio;
int queue_per_prio = sched->queue[qi].queue_per_prio;
- sched_ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
+ ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
/* Release current atomic queue */
ring_enq(ring, PRIO_QUEUE_MASK, qi);
@@ -548,25 +519,91 @@ static void schedule_release_atomic(void)
}
}
-static void schedule_release_ordered(void)
+static inline int ordered_own_turn(queue_entry_t *queue)
{
- if (sched_local.origin_qe) {
- int rc = release_order(sched_local.origin_qe,
- sched_local.order,
- sched_local.pool,
- sched_local.enq_called);
- if (rc == 0)
- sched_local.origin_qe = NULL;
+ uint64_t ctx;
+
+ ctx = odp_atomic_load_acq_u64(&queue->s.ordered.ctx);
+
+ return ctx == sched_local.ordered.ctx;
+}
+
+static inline void wait_for_order(queue_entry_t *queue)
+{
+ /* Busy loop to synchronize ordered processing */
+ while (1) {
+ if (ordered_own_turn(queue))
+ break;
+ odp_cpu_pause();
+ }
+}
+
+/**
+ * Perform stashed enqueue operations
+ *
+ * Should be called only when already in order.
+ */
+static inline void ordered_stash_release(void)
+{
+ int i;
+
+ for (i = 0; i < sched_local.ordered.stash_num; i++) {
+ queue_entry_t *queue;
+ odp_buffer_hdr_t **buf_hdr;
+ int num;
+
+ queue = sched_local.ordered.stash[i].queue;
+ buf_hdr = sched_local.ordered.stash[i].buf_hdr;
+ num = sched_local.ordered.stash[i].num;
+
+ queue_enq_multi(queue, buf_hdr, num);
+ }
+ sched_local.ordered.stash_num = 0;
+}
+
+static inline void release_ordered(void)
+{
+ unsigned i;
+ queue_entry_t *queue;
+
+ queue = sched_local.ordered.src_queue;
+
+ wait_for_order(queue);
+
+ /* Release all ordered locks */
+ for (i = 0; i < queue->s.param.sched.lock_count; i++) {
+ if (!sched_local.ordered.lock_called.u8[i])
+ odp_atomic_store_rel_u64(&queue->s.ordered.lock[i],
+ sched_local.ordered.ctx + 1);
}
+
+ sched_local.ordered.lock_called.all = 0;
+ sched_local.ordered.src_queue = NULL;
+ sched_local.ordered.in_order = 0;
+
+ ordered_stash_release();
+
+ /* Next thread can continue processing */
+ odp_atomic_add_rel_u64(&queue->s.ordered.ctx, 1);
+}
+
+static void schedule_release_ordered(void)
+{
+ queue_entry_t *queue;
+
+ queue = sched_local.ordered.src_queue;
+
+ if (odp_unlikely(!queue || sched_local.num))
+ return;
+
+ release_ordered();
}
static inline void schedule_release_context(void)
{
- if (sched_local.origin_qe != NULL) {
- release_order(sched_local.origin_qe, sched_local.order,
- sched_local.pool, sched_local.enq_called);
- sched_local.origin_qe = NULL;
- } else
+ if (sched_local.ordered.src_queue != NULL)
+ release_ordered();
+ else
schedule_release_atomic();
}
@@ -585,6 +622,46 @@ static inline int copy_events(odp_event_t out_ev[], unsigned int max)
return i;
}
+static int schedule_ord_enq_multi(uint32_t queue_index, void *buf_hdr[],
+ int num, int *ret)
+{
+ int i;
+ uint32_t stash_num = sched_local.ordered.stash_num;
+ queue_entry_t *dst_queue = get_qentry(queue_index);
+ queue_entry_t *src_queue = sched_local.ordered.src_queue;
+
+ if (!sched_local.ordered.src_queue || sched_local.ordered.in_order)
+ return 0;
+
+ if (ordered_own_turn(src_queue)) {
+ /* Own turn, so can do enqueue directly. */
+ sched_local.ordered.in_order = 1;
+ ordered_stash_release();
+ return 0;
+ }
+
+ if (odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
+ /* If the local stash is full, wait until it is our turn and
+ * then release the stash and do enqueue directly. */
+ wait_for_order(src_queue);
+
+ sched_local.ordered.in_order = 1;
+
+ ordered_stash_release();
+ return 0;
+ }
+
+ sched_local.ordered.stash[stash_num].queue = dst_queue;
+ sched_local.ordered.stash[stash_num].num = num;
+ for (i = 0; i < num; i++)
+ sched_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
+
+ sched_local.ordered.stash_num++;
+
+ *ret = num;
+ return 1;
+}
+
/*
* Schedule queues
*/
@@ -635,7 +712,7 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
int grp;
int ordered;
odp_queue_t handle;
- sched_ring_t *ring;
+ ring_t *ring;
if (id >= QUEUES_PER_PRIO)
id = 0;
@@ -681,12 +758,11 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
ordered = sched_cb_queue_is_ordered(qi);
- /* For ordered queues we want consecutive events to
- * be dispatched to separate threads, so do not cache
- * them locally.
- */
- if (ordered)
- max_deq = 1;
+ /* Do not cache ordered events locally to improve
+ * parallelism. Ordered context can only be released
+ * when the local cache is empty. */
+ if (ordered && max_num < MAX_DEQ)
+ max_deq = max_num;
num = sched_cb_queue_deq_multi(qi, sched_local.ev_stash,
max_deq);
@@ -711,11 +787,21 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
ret = copy_events(out_ev, max_num);
if (ordered) {
+ uint64_t ctx;
+ queue_entry_t *queue;
+ odp_atomic_u64_t *next_ctx;
+
+ queue = get_qentry(qi);
+ next_ctx = &queue->s.ordered.next_ctx;
+
+ ctx = odp_atomic_fetch_inc_u64(next_ctx);
+
+ sched_local.ordered.ctx = ctx;
+ sched_local.ordered.src_queue = queue;
+
/* Continue scheduling ordered queues */
ring_enq(ring, PRIO_QUEUE_MASK, qi);
- /* Cache order info about this event */
- cache_order_info(qi);
} else if (sched_cb_queue_is_atomic(qi)) {
/* Hold queue during atomic access */
sched_local.queue_index = qi;
@@ -746,7 +832,7 @@ static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
for (i = 0; i < PKTIO_CMD_QUEUES; i++, id = ((id + 1) &
PKTIO_CMD_QUEUE_MASK)) {
- sched_ring_t *ring;
+ ring_t *ring;
uint32_t cmd_index;
pktio_cmd_t *cmd;
@@ -840,6 +926,64 @@ static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
return schedule_loop(out_queue, wait, events, num);
}
+static inline void order_lock(void)
+{
+ queue_entry_t *queue;
+
+ queue = sched_local.ordered.src_queue;
+
+ if (!queue)
+ return;
+
+ wait_for_order(queue);
+}
+
+static void order_unlock(void)
+{
+}
+
+static void schedule_order_lock(unsigned lock_index)
+{
+ odp_atomic_u64_t *ord_lock;
+ queue_entry_t *queue;
+
+ queue = sched_local.ordered.src_queue;
+
+ ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count &&
+ !sched_local.ordered.lock_called.u8[lock_index]);
+
+ ord_lock = &queue->s.ordered.lock[lock_index];
+
+ /* Busy loop to synchronize ordered processing */
+ while (1) {
+ uint64_t lock_seq;
+
+ lock_seq = odp_atomic_load_acq_u64(ord_lock);
+
+ if (lock_seq == sched_local.ordered.ctx) {
+ sched_local.ordered.lock_called.u8[lock_index] = 1;
+ return;
+ }
+ odp_cpu_pause();
+ }
+}
+
+static void schedule_order_unlock(unsigned lock_index)
+{
+ odp_atomic_u64_t *ord_lock;
+ queue_entry_t *queue;
+
+ queue = sched_local.ordered.src_queue;
+
+ ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count);
+
+ ord_lock = &queue->s.ordered.lock[lock_index];
+
+ ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+
+ odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
+}
+
static void schedule_pause(void)
{
sched_local.pause = 1;
@@ -869,11 +1013,19 @@ static odp_schedule_group_t schedule_group_create(const char *name,
odp_spinlock_lock(&sched->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
- if (sched->sched_grp[i].name[0] == 0) {
- strncpy(sched->sched_grp[i].name, name,
- ODP_SCHED_GROUP_NAME_LEN - 1);
+ if (!sched->sched_grp[i].allocated) {
+ char *grp_name = sched->sched_grp[i].name;
+
+ if (name == NULL) {
+ grp_name[0] = 0;
+ } else {
+ strncpy(grp_name, name,
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
+ }
odp_thrmask_copy(&sched->sched_grp[i].mask, mask);
group = (odp_schedule_group_t)i;
+ sched->sched_grp[i].allocated = 1;
break;
}
}
@@ -889,10 +1041,11 @@ static int schedule_group_destroy(odp_schedule_group_t group)
odp_spinlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].name[0] != 0) {
+ sched->sched_grp[group].allocated) {
odp_thrmask_zero(&sched->sched_grp[group].mask);
memset(sched->sched_grp[group].name, 0,
ODP_SCHED_GROUP_NAME_LEN);
+ sched->sched_grp[group].allocated = 0;
ret = 0;
} else {
ret = -1;
@@ -928,7 +1081,7 @@ static int schedule_group_join(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].name[0] != 0) {
+ sched->sched_grp[group].allocated) {
odp_thrmask_or(&sched->sched_grp[group].mask,
&sched->sched_grp[group].mask,
mask);
@@ -949,7 +1102,7 @@ static int schedule_group_leave(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].name[0] != 0) {
+ sched->sched_grp[group].allocated) {
odp_thrmask_t leavemask;
odp_thrmask_xor(&leavemask, mask, &sched->mask_all);
@@ -973,7 +1126,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].name[0] != 0) {
+ sched->sched_grp[group].allocated) {
*thrmask = sched->sched_grp[group].mask;
ret = 0;
} else {
@@ -992,7 +1145,7 @@ static int schedule_group_info(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].name[0] != 0) {
+ sched->sched_grp[group].allocated) {
info->name = sched->sched_grp[group].name;
info->thrmask = sched->sched_grp[group].mask;
ret = 0;
@@ -1041,9 +1194,7 @@ static int schedule_sched_queue(uint32_t queue_index)
{
int prio = sched->queue[queue_index].prio;
int queue_per_prio = sched->queue[queue_index].queue_per_prio;
- sched_ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
-
- sched_local.ignore_ordered_context = 1;
+ ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
ring_enq(ring, PRIO_QUEUE_MASK, queue_index);
return 0;
@@ -1063,11 +1214,14 @@ const schedule_fn_t schedule_default_fn = {
.init_queue = schedule_init_queue,
.destroy_queue = schedule_destroy_queue,
.sched_queue = schedule_sched_queue,
- .ord_enq_multi = schedule_ordered_queue_enq_multi,
+ .ord_enq_multi = schedule_ord_enq_multi,
.init_global = schedule_init_global,
.term_global = schedule_term_global,
.init_local = schedule_init_local,
- .term_local = schedule_term_local
+ .term_local = schedule_term_local,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
+ .max_ordered_locks = schedule_max_ordered_locks
};
/* Fill in scheduler API calls */
diff --git a/platform/linux-generic/odp_schedule_ordered.c b/platform/linux-generic/odp_schedule_ordered.c
deleted file mode 100644
index 841218385..000000000
--- a/platform/linux-generic/odp_schedule_ordered.c
+++ /dev/null
@@ -1,818 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_packet_io_queue.h>
-#include <odp_queue_internal.h>
-#include <odp_schedule_if.h>
-#include <odp_schedule_ordered_internal.h>
-#include <odp_traffic_mngr_internal.h>
-#include <odp_schedule_internal.h>
-
-#define RESOLVE_ORDER 0
-#define NOAPPEND 0
-#define APPEND 1
-
-static inline void sched_enq_called(void)
-{
- sched_local.enq_called = 1;
-}
-
-static inline void get_sched_order(queue_entry_t **origin_qe, uint64_t *order)
-{
- if (sched_local.ignore_ordered_context) {
- sched_local.ignore_ordered_context = 0;
- *origin_qe = NULL;
- } else {
- *origin_qe = sched_local.origin_qe;
- *order = sched_local.order;
- }
-}
-
-static inline void sched_order_resolved(odp_buffer_hdr_t *buf_hdr)
-{
- if (buf_hdr)
- buf_hdr->origin_qe = NULL;
- sched_local.origin_qe = NULL;
-}
-
-static inline void get_qe_locks(queue_entry_t *qe1, queue_entry_t *qe2)
-{
- /* Special case: enq to self */
- if (qe1 == qe2) {
- queue_lock(qe1);
- return;
- }
-
- /* Since any queue can be either a source or target, queues do not have
- * a natural locking hierarchy. Create one by using the qentry address
- * as the ordering mechanism.
- */
-
- if (qe1 < qe2) {
- queue_lock(qe1);
- queue_lock(qe2);
- } else {
- queue_lock(qe2);
- queue_lock(qe1);
- }
-}
-
-static inline void free_qe_locks(queue_entry_t *qe1, queue_entry_t *qe2)
-{
- queue_unlock(qe1);
- if (qe1 != qe2)
- queue_unlock(qe2);
-}
-
-static inline odp_buffer_hdr_t *get_buf_tail(odp_buffer_hdr_t *buf_hdr)
-{
- odp_buffer_hdr_t *buf_tail = buf_hdr->link ? buf_hdr->link : buf_hdr;
-
- buf_hdr->next = buf_hdr->link;
- buf_hdr->link = NULL;
-
- while (buf_tail->next)
- buf_tail = buf_tail->next;
-
- return buf_tail;
-}
-
-static inline void queue_add_list(queue_entry_t *queue,
- odp_buffer_hdr_t *buf_head,
- odp_buffer_hdr_t *buf_tail)
-{
- if (queue->s.head)
- queue->s.tail->next = buf_head;
- else
- queue->s.head = buf_head;
-
- queue->s.tail = buf_tail;
-}
-
-static inline void queue_add_chain(queue_entry_t *queue,
- odp_buffer_hdr_t *buf_hdr)
-{
- queue_add_list(queue, buf_hdr, get_buf_tail(buf_hdr));
-}
-
-static inline void reorder_enq(queue_entry_t *queue,
- uint64_t order,
- queue_entry_t *origin_qe,
- odp_buffer_hdr_t *buf_hdr,
- int sustain)
-{
- odp_buffer_hdr_t *reorder_buf = origin_qe->s.reorder_head;
- odp_buffer_hdr_t *reorder_prev = NULL;
-
- while (reorder_buf && order >= reorder_buf->order) {
- reorder_prev = reorder_buf;
- reorder_buf = reorder_buf->next;
- }
-
- buf_hdr->next = reorder_buf;
-
- if (reorder_prev)
- reorder_prev->next = buf_hdr;
- else
- origin_qe->s.reorder_head = buf_hdr;
-
- if (!reorder_buf)
- origin_qe->s.reorder_tail = buf_hdr;
-
- buf_hdr->origin_qe = origin_qe;
- buf_hdr->target_qe = queue;
- buf_hdr->order = order;
- buf_hdr->flags.sustain = sustain;
-}
-
-static inline void order_release(queue_entry_t *origin_qe, int count)
-{
- uint64_t sync;
- uint32_t i;
-
- origin_qe->s.order_out += count;
-
- for (i = 0; i < origin_qe->s.param.sched.lock_count; i++) {
- sync = odp_atomic_load_u64(&origin_qe->s.sync_out[i]);
- if (sync < origin_qe->s.order_out)
- odp_atomic_fetch_add_u64(&origin_qe->s.sync_out[i],
- origin_qe->s.order_out - sync);
- }
-}
-
-static inline int reorder_deq(queue_entry_t *queue,
- queue_entry_t *origin_qe,
- odp_buffer_hdr_t **reorder_tail_return,
- odp_buffer_hdr_t **placeholder_buf_return,
- int *release_count_return,
- int *placeholder_count_return)
-{
- odp_buffer_hdr_t *reorder_buf = origin_qe->s.reorder_head;
- odp_buffer_hdr_t *reorder_tail = NULL;
- odp_buffer_hdr_t *placeholder_buf = NULL;
- odp_buffer_hdr_t *next_buf;
- int deq_count = 0;
- int release_count = 0;
- int placeholder_count = 0;
-
- while (reorder_buf &&
- reorder_buf->order <= origin_qe->s.order_out +
- release_count + placeholder_count) {
- /*
- * Elements on the reorder list fall into one of
- * three categories:
- *
- * 1. Those destined for the same queue. These
- * can be enq'd now if they were waiting to
- * be unblocked by this enq.
- *
- * 2. Those representing placeholders for events
- * whose ordering was released by a prior
- * odp_schedule_release_ordered() call. These
- * can now just be freed.
- *
- * 3. Those representing events destined for another
- * queue. These cannot be consolidated with this
- * enq since they have a different target.
- *
- * Detecting an element with an order sequence gap, an
- * element in category 3, or running out of elements
- * stops the scan.
- */
- next_buf = reorder_buf->next;
-
- if (odp_likely(reorder_buf->target_qe == queue)) {
- /* promote any chain */
- odp_buffer_hdr_t *reorder_link =
- reorder_buf->link;
-
- if (reorder_link) {
- reorder_buf->next = reorder_link;
- reorder_buf->link = NULL;
- while (reorder_link->next)
- reorder_link = reorder_link->next;
- reorder_link->next = next_buf;
- reorder_tail = reorder_link;
- } else {
- reorder_tail = reorder_buf;
- }
-
- deq_count++;
- if (!reorder_buf->flags.sustain)
- release_count++;
- reorder_buf = next_buf;
- } else if (!reorder_buf->target_qe) {
- if (reorder_tail)
- reorder_tail->next = next_buf;
- else
- origin_qe->s.reorder_head = next_buf;
-
- reorder_buf->next = placeholder_buf;
- placeholder_buf = reorder_buf;
-
- reorder_buf = next_buf;
- placeholder_count++;
- } else {
- break;
- }
- }
-
- *reorder_tail_return = reorder_tail;
- *placeholder_buf_return = placeholder_buf;
- *release_count_return = release_count;
- *placeholder_count_return = placeholder_count;
-
- return deq_count;
-}
-
-static inline void reorder_complete(queue_entry_t *origin_qe,
- odp_buffer_hdr_t **reorder_buf_return,
- odp_buffer_hdr_t **placeholder_buf,
- int placeholder_append)
-{
- odp_buffer_hdr_t *reorder_buf = origin_qe->s.reorder_head;
- odp_buffer_hdr_t *next_buf;
-
- *reorder_buf_return = NULL;
- if (!placeholder_append)
- *placeholder_buf = NULL;
-
- while (reorder_buf &&
- reorder_buf->order <= origin_qe->s.order_out) {
- next_buf = reorder_buf->next;
-
- if (!reorder_buf->target_qe) {
- origin_qe->s.reorder_head = next_buf;
- reorder_buf->next = *placeholder_buf;
- *placeholder_buf = reorder_buf;
-
- reorder_buf = next_buf;
- order_release(origin_qe, 1);
- } else if (reorder_buf->flags.sustain) {
- reorder_buf = next_buf;
- } else {
- *reorder_buf_return = origin_qe->s.reorder_head;
- origin_qe->s.reorder_head =
- origin_qe->s.reorder_head->next;
- break;
- }
- }
-}
-
-static inline void get_queue_order(queue_entry_t **origin_qe, uint64_t *order,
- odp_buffer_hdr_t *buf_hdr)
-{
- if (buf_hdr && buf_hdr->origin_qe) {
- *origin_qe = buf_hdr->origin_qe;
- *order = buf_hdr->order;
- } else {
- get_sched_order(origin_qe, order);
- }
-}
-
-int queue_tm_reenq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr,
- int sustain ODP_UNUSED)
-{
- odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE((uint8_t *)queue -
- offsetof(tm_queue_obj_t,
- tm_qentry));
- odp_packet_t pkt = (odp_packet_t)buf_hdr->handle.handle;
-
- return odp_tm_enq(tm_queue, pkt);
-}
-
-int queue_tm_reenq_multi(queue_entry_t *queue ODP_UNUSED,
- odp_buffer_hdr_t *buf[] ODP_UNUSED,
- int num ODP_UNUSED,
- int sustain ODP_UNUSED)
-{
- ODP_ABORT("Invalid call to queue_tm_reenq_multi()\n");
- return 0;
-}
-
-int queue_tm_reorder(queue_entry_t *queue,
- odp_buffer_hdr_t *buf_hdr)
-{
- queue_entry_t *origin_qe;
- uint64_t order;
-
- get_queue_order(&origin_qe, &order, buf_hdr);
-
- if (!origin_qe)
- return 0;
-
- /* Check if we're in order */
- queue_lock(origin_qe);
- if (odp_unlikely(origin_qe->s.status < QUEUE_STATUS_READY)) {
- queue_unlock(origin_qe);
- ODP_ERR("Bad origin queue status\n");
- return 0;
- }
-
- sched_enq_called();
-
- /* Wait if it's not our turn */
- if (order > origin_qe->s.order_out) {
- reorder_enq(queue, order, origin_qe, buf_hdr, SUSTAIN_ORDER);
- queue_unlock(origin_qe);
- return 1;
- }
-
- /* Back to TM to handle enqueue
- *
- * Note: Order will be resolved by a subsequent call to
- * odp_schedule_release_ordered() or odp_schedule() as odp_tm_enq()
- * calls never resolve order by themselves.
- */
- queue_unlock(origin_qe);
- return 0;
-}
-
-static int queue_enq_internal(odp_buffer_hdr_t *buf_hdr)
-{
- return buf_hdr->target_qe->s.enqueue(buf_hdr->target_qe, buf_hdr,
- buf_hdr->flags.sustain);
-}
-
-static int ordered_queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr,
- int sustain, queue_entry_t *origin_qe,
- uint64_t order)
-{
- odp_buffer_hdr_t *reorder_buf;
- odp_buffer_hdr_t *next_buf;
- odp_buffer_hdr_t *reorder_tail;
- odp_buffer_hdr_t *placeholder_buf = NULL;
- int release_count, placeholder_count;
- int sched = 0;
-
- /* Need two locks for enq operations from ordered queues */
- get_qe_locks(origin_qe, queue);
-
- if (odp_unlikely(origin_qe->s.status < QUEUE_STATUS_READY ||
- queue->s.status < QUEUE_STATUS_READY)) {
- free_qe_locks(queue, origin_qe);
- ODP_ERR("Bad queue status\n");
- ODP_ERR("queue = %s, origin q = %s, buf = %p\n",
- queue->s.name, origin_qe->s.name, buf_hdr);
- return -1;
- }
-
- /* Remember that enq was called for this order */
- sched_enq_called();
-
- /* We can only complete this enq if we're in order */
- if (order > origin_qe->s.order_out) {
- reorder_enq(queue, order, origin_qe, buf_hdr, sustain);
-
- /* This enq can't complete until order is restored, so
- * we're done here.
- */
- free_qe_locks(queue, origin_qe);
- return 0;
- }
-
- /* Resolve order if requested */
- if (!sustain) {
- order_release(origin_qe, 1);
- sched_order_resolved(buf_hdr);
- }
-
- /* Update queue status */
- if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
- queue->s.status = QUEUE_STATUS_SCHED;
- sched = 1;
- }
-
- /* We're in order, however the reorder queue may have other buffers
- * sharing this order on it and this buffer must not be enqueued ahead
- * of them. If the reorder queue is empty we can short-cut and
- * simply add to the target queue directly.
- */
-
- if (!origin_qe->s.reorder_head) {
- queue_add_chain(queue, buf_hdr);
- free_qe_locks(queue, origin_qe);
-
- /* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
- ODP_ABORT("schedule_queue failed\n");
- return 0;
- }
-
- /* The reorder_queue is non-empty, so sort this buffer into it. Note
- * that we force the sustain bit on here because we'll be removing
- * this immediately and we already accounted for this order earlier.
- */
- reorder_enq(queue, order, origin_qe, buf_hdr, 1);
-
- /* Pick up this element, and all others resolved by this enq,
- * and add them to the target queue.
- */
- reorder_deq(queue, origin_qe, &reorder_tail, &placeholder_buf,
- &release_count, &placeholder_count);
-
- /* Move the list from the reorder queue to the target queue */
- if (queue->s.head)
- queue->s.tail->next = origin_qe->s.reorder_head;
- else
- queue->s.head = origin_qe->s.reorder_head;
- queue->s.tail = reorder_tail;
- origin_qe->s.reorder_head = reorder_tail->next;
- reorder_tail->next = NULL;
-
- /* Reflect resolved orders in the output sequence */
- order_release(origin_qe, release_count + placeholder_count);
-
- /* Now handle any resolved orders for events destined for other
- * queues, appending placeholder bufs as needed.
- */
- if (origin_qe != queue)
- queue_unlock(queue);
-
- /* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
- ODP_ABORT("schedule_queue failed\n");
-
- reorder_complete(origin_qe, &reorder_buf, &placeholder_buf, APPEND);
- queue_unlock(origin_qe);
-
- if (reorder_buf)
- queue_enq_internal(reorder_buf);
-
- /* Free all placeholder bufs that are now released */
- while (placeholder_buf) {
- next_buf = placeholder_buf->next;
- odp_buffer_free(placeholder_buf->handle.handle);
- placeholder_buf = next_buf;
- }
-
- return 0;
-}
-
-int schedule_ordered_queue_enq_multi(uint32_t queue_index, void *p_buf_hdr[],
- int num, int sustain, int *ret)
-{
- queue_entry_t *origin_qe;
- uint64_t order;
- int i, rc;
- queue_entry_t *qe = get_qentry(queue_index);
- odp_buffer_hdr_t *first_hdr = p_buf_hdr[0];
- odp_buffer_hdr_t **buf_hdr = (odp_buffer_hdr_t **)p_buf_hdr;
-
- /* Chain input buffers together */
- for (i = 0; i < num - 1; i++) {
- buf_hdr[i]->next = buf_hdr[i + 1];
- buf_hdr[i]->burst_num = 0;
- }
-
- buf_hdr[num - 1]->next = NULL;
-
- /* Handle ordered enqueues commonly via links */
- get_queue_order(&origin_qe, &order, first_hdr);
- if (origin_qe) {
- first_hdr->link = first_hdr->next;
- rc = ordered_queue_enq(qe, first_hdr, sustain,
- origin_qe, order);
- *ret = rc == 0 ? num : rc;
- return 1;
- }
-
- return 0;
-}
-
-int queue_pktout_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr,
- int sustain)
-{
- queue_entry_t *origin_qe;
- uint64_t order;
- int rc;
-
- /* Special processing needed only if we came from an ordered queue */
- get_queue_order(&origin_qe, &order, buf_hdr);
- if (!origin_qe)
- return pktout_enqueue(queue, buf_hdr);
-
- /* Must lock origin_qe for ordered processing */
- queue_lock(origin_qe);
- if (odp_unlikely(origin_qe->s.status < QUEUE_STATUS_READY)) {
- queue_unlock(origin_qe);
- ODP_ERR("Bad origin queue status\n");
- return -1;
- }
-
- /* We can only complete the enq if we're in order */
- sched_enq_called();
- if (order > origin_qe->s.order_out) {
- reorder_enq(queue, order, origin_qe, buf_hdr, sustain);
-
- /* This enq can't complete until order is restored, so
- * we're done here.
- */
- queue_unlock(origin_qe);
- return 0;
- }
-
- /* Perform our enq since we're in order.
- * Note: Don't hold the origin_qe lock across an I/O operation!
- */
- queue_unlock(origin_qe);
-
- /* Handle any chained buffers (internal calls) */
- if (buf_hdr->link) {
- odp_buffer_hdr_t *buf_hdrs[QUEUE_MULTI_MAX];
- odp_buffer_hdr_t *next_buf;
- int num = 0;
-
- next_buf = buf_hdr->link;
- buf_hdr->link = NULL;
-
- while (next_buf) {
- buf_hdrs[num++] = next_buf;
- next_buf = next_buf->next;
- }
-
- rc = pktout_enq_multi(queue, buf_hdrs, num);
- if (rc < num)
- return -1;
- } else {
- rc = pktout_enqueue(queue, buf_hdr);
- if (rc)
- return rc;
- }
-
- /* Reacquire the lock following the I/O send. Note that we're still
- * guaranteed to be in order here since we haven't released
- * order yet.
- */
- queue_lock(origin_qe);
- if (odp_unlikely(origin_qe->s.status < QUEUE_STATUS_READY)) {
- queue_unlock(origin_qe);
- ODP_ERR("Bad origin queue status\n");
- return -1;
- }
-
- /* Account for this ordered enq */
- if (!sustain) {
- order_release(origin_qe, 1);
- sched_order_resolved(NULL);
- }
-
- /* Now check to see if our successful enq has unblocked other buffers
- * in the origin's reorder queue.
- */
- odp_buffer_hdr_t *reorder_buf;
- odp_buffer_hdr_t *next_buf;
- odp_buffer_hdr_t *reorder_tail;
- odp_buffer_hdr_t *xmit_buf;
- odp_buffer_hdr_t *placeholder_buf;
- int release_count, placeholder_count;
-
- /* Send released buffers as well */
- if (reorder_deq(queue, origin_qe, &reorder_tail, &placeholder_buf,
- &release_count, &placeholder_count)) {
- xmit_buf = origin_qe->s.reorder_head;
- origin_qe->s.reorder_head = reorder_tail->next;
- reorder_tail->next = NULL;
- queue_unlock(origin_qe);
-
- do {
- next_buf = xmit_buf->next;
- pktout_enqueue(queue, xmit_buf);
- xmit_buf = next_buf;
- } while (xmit_buf);
-
- /* Reacquire the origin_qe lock to continue */
- queue_lock(origin_qe);
- if (odp_unlikely(origin_qe->s.status < QUEUE_STATUS_READY)) {
- queue_unlock(origin_qe);
- ODP_ERR("Bad origin queue status\n");
- return -1;
- }
- }
-
- /* Update the order sequence to reflect the deq'd elements */
- order_release(origin_qe, release_count + placeholder_count);
-
- /* Now handle sends to other queues that are ready to go */
- reorder_complete(origin_qe, &reorder_buf, &placeholder_buf, APPEND);
-
- /* We're fully done with the origin_qe at last */
- queue_unlock(origin_qe);
-
- /* Now send the next buffer to its target queue */
- if (reorder_buf)
- queue_enq_internal(reorder_buf);
-
- /* Free all placeholder bufs that are now released */
- while (placeholder_buf) {
- next_buf = placeholder_buf->next;
- odp_buffer_free(placeholder_buf->handle.handle);
- placeholder_buf = next_buf;
- }
-
- return 0;
-}
-
-int queue_pktout_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num, int sustain)
-{
- int i, rc;
- queue_entry_t *origin_qe;
- uint64_t order;
-
- /* If we're not ordered, handle directly */
- get_queue_order(&origin_qe, &order, buf_hdr[0]);
- if (!origin_qe)
- return pktout_enq_multi(queue, buf_hdr, num);
-
- /* Chain input buffers together */
- for (i = 0; i < num - 1; i++)
- buf_hdr[i]->next = buf_hdr[i + 1];
-
- buf_hdr[num - 1]->next = NULL;
-
- /* Handle commonly via links */
- buf_hdr[0]->link = buf_hdr[0]->next;
- rc = queue_pktout_enq(queue, buf_hdr[0], sustain);
- return rc == 0 ? num : rc;
-}
-
-/* These routines exists here rather than in odp_schedule
- * because they operate on queue interenal structures
- */
-int release_order(void *origin_qe_ptr, uint64_t order,
- odp_pool_t pool, int enq_called)
-{
- odp_buffer_t placeholder_buf;
- odp_buffer_hdr_t *placeholder_buf_hdr, *reorder_buf, *next_buf;
- queue_entry_t *origin_qe = origin_qe_ptr;
-
- /* Must lock the origin queue to process the release */
- queue_lock(origin_qe);
-
- /* If we are in order we can release immediately since there can be no
- * confusion about intermediate elements
- */
- if (order <= origin_qe->s.order_out) {
- reorder_buf = origin_qe->s.reorder_head;
-
- /* We're in order, however there may be one or more events on
- * the reorder queue that are part of this order. If that is
- * the case, remove them and let ordered_queue_enq() handle
- * them and resolve the order for us.
- */
- if (reorder_buf && reorder_buf->order == order) {
- odp_buffer_hdr_t *reorder_head = reorder_buf;
-
- next_buf = reorder_buf->next;
-
- while (next_buf && next_buf->order == order) {
- reorder_buf = next_buf;
- next_buf = next_buf->next;
- }
-
- origin_qe->s.reorder_head = reorder_buf->next;
- reorder_buf->next = NULL;
-
- queue_unlock(origin_qe);
- reorder_head->link = reorder_buf->next;
- return ordered_queue_enq(reorder_head->target_qe,
- reorder_head, RESOLVE_ORDER,
- origin_qe, order);
- }
-
- /* Reorder queue has no elements for this order, so it's safe
- * to resolve order here
- */
- order_release(origin_qe, 1);
-
- /* Check if this release allows us to unblock waiters. At the
- * point of this call, the reorder list may contain zero or
- * more placeholders that need to be freed, followed by zero
- * or one complete reorder buffer chain. Note that since we
- * are releasing order, we know no further enqs for this order
- * can occur, so ignore the sustain bit to clear out our
- * element(s) on the reorder queue
- */
- reorder_complete(origin_qe, &reorder_buf,
- &placeholder_buf_hdr, NOAPPEND);
-
- /* Now safe to unlock */
- queue_unlock(origin_qe);
-
- /* If reorder_buf has a target, do the enq now */
- if (reorder_buf)
- queue_enq_internal(reorder_buf);
-
- while (placeholder_buf_hdr) {
- odp_buffer_hdr_t *placeholder_next =
- placeholder_buf_hdr->next;
-
- odp_buffer_free(placeholder_buf_hdr->handle.handle);
- placeholder_buf_hdr = placeholder_next;
- }
-
- return 0;
- }
-
- /* If we are not in order we need a placeholder to represent our
- * "place in line" unless we have issued enqs, in which case we
- * already have a place in the reorder queue. If we need a
- * placeholder, use an element from the same pool we were scheduled
- * with is from, otherwise just ensure that the final element for our
- * order is not marked sustain.
- */
- if (enq_called) {
- reorder_buf = NULL;
- next_buf = origin_qe->s.reorder_head;
-
- while (next_buf && next_buf->order <= order) {
- reorder_buf = next_buf;
- next_buf = next_buf->next;
- }
-
- if (reorder_buf && reorder_buf->order == order) {
- reorder_buf->flags.sustain = 0;
- queue_unlock(origin_qe);
- return 0;
- }
- }
-
- placeholder_buf = odp_buffer_alloc(pool);
-
- /* Can't release if no placeholder is available */
- if (odp_unlikely(placeholder_buf == ODP_BUFFER_INVALID)) {
- queue_unlock(origin_qe);
- return -1;
- }
-
- placeholder_buf_hdr = odp_buf_to_hdr(placeholder_buf);
-
- /* Copy info to placeholder and add it to the reorder queue */
- placeholder_buf_hdr->origin_qe = origin_qe;
- placeholder_buf_hdr->order = order;
- placeholder_buf_hdr->flags.sustain = 0;
-
- reorder_enq(NULL, order, origin_qe, placeholder_buf_hdr, 0);
-
- queue_unlock(origin_qe);
- return 0;
-}
-
-void schedule_order_lock(unsigned lock_index)
-{
- queue_entry_t *origin_qe;
- uint64_t sync, sync_out;
-
- origin_qe = sched_local.origin_qe;
- if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
- return;
-
- sync = sched_local.sync[lock_index];
- sync_out = odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]);
- ODP_ASSERT(sync >= sync_out);
-
- /* Wait until we are in order. Note that sync_out will be incremented
- * both by unlocks as well as order resolution, so we're OK if only
- * some events in the ordered flow need to lock.
- */
- while (sync != sync_out) {
- odp_cpu_pause();
- sync_out =
- odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]);
- }
-}
-
-void schedule_order_unlock(unsigned lock_index)
-{
- queue_entry_t *origin_qe;
-
- origin_qe = sched_local.origin_qe;
- if (!origin_qe || lock_index >= origin_qe->s.param.sched.lock_count)
- return;
- ODP_ASSERT(sched_local.sync[lock_index] ==
- odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]));
-
- /* Release the ordered lock */
- odp_atomic_fetch_inc_u64(&origin_qe->s.sync_out[lock_index]);
-}
-
-void cache_order_info(uint32_t queue_index)
-{
- uint32_t i;
- queue_entry_t *qe = get_qentry(queue_index);
- odp_event_t ev = sched_local.ev_stash[0];
- odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(odp_buffer_from_event(ev));
-
- sched_local.origin_qe = qe;
- sched_local.order = buf_hdr->order;
- sched_local.pool = buf_hdr->pool_hdl;
-
- for (i = 0; i < qe->s.param.sched.lock_count; i++)
- sched_local.sync[i] = buf_hdr->sync[i];
-
- sched_local.enq_called = 0;
-}
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 879eb5c49..5150d28df 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -13,9 +13,12 @@
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
#include <odp_config_internal.h>
+#include <odp_ring_internal.h>
+#define NUM_THREAD ODP_THREAD_COUNT_MAX
#define NUM_QUEUE ODP_CONFIG_QUEUES
#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
+#define NUM_ORDERED_LOCKS 1
#define NUM_PRIO 3
#define NUM_STATIC_GROUP 3
#define NUM_GROUP (NUM_STATIC_GROUP + 9)
@@ -28,12 +31,25 @@
#define GROUP_ALL ODP_SCHED_GROUP_ALL
#define GROUP_WORKER ODP_SCHED_GROUP_WORKER
#define GROUP_CONTROL ODP_SCHED_GROUP_CONTROL
+#define GROUP_PKTIN GROUP_ALL
+
+/* Maximum number of commands: one priority/group for all queues and pktios */
+#define RING_SIZE (ODP_ROUNDUP_POWER_2(NUM_QUEUE + NUM_PKTIO))
+#define RING_MASK (RING_SIZE - 1)
+
+/* Ring size must be power of two */
+ODP_STATIC_ASSERT(ODP_VAL_IS_POWER_2(RING_SIZE),
+ "Ring_size_is_not_power_of_two");
+
+ODP_STATIC_ASSERT(NUM_ORDERED_LOCKS <= CONFIG_QUEUE_MAX_ORD_LOCKS,
+ "Too_many_ordered_locks");
struct sched_cmd_t;
struct sched_cmd_s {
struct sched_cmd_t *next;
uint32_t index;
+ uint32_t ring_idx;
int type;
int prio;
int group;
@@ -48,38 +64,49 @@ typedef struct sched_cmd_t {
sizeof(struct sched_cmd_s)];
} sched_cmd_t ODP_ALIGNED_CACHE;
-struct prio_queue_s {
- odp_ticketlock_t lock;
- sched_cmd_t *head;
- sched_cmd_t *tail;
-};
+typedef struct {
+ /* Ring header */
+ ring_t ring;
+
+ /* Ring data: queue indexes */
+ uint32_t ring_idx[RING_SIZE];
-typedef struct prio_queue_t {
- struct prio_queue_s s;
- uint8_t pad[ROUNDUP_CACHE(sizeof(struct prio_queue_s)) -
- sizeof(struct prio_queue_s)];
} prio_queue_t ODP_ALIGNED_CACHE;
-struct sched_group_s {
- odp_ticketlock_t lock;
+typedef struct thr_group_t {
+ /* A generation counter for fast comparison if groups have changed */
+ odp_atomic_u32_t gen_cnt;
- struct {
- char name[ODP_SCHED_GROUP_NAME_LEN + 1];
- odp_thrmask_t mask;
- int allocated;
- } group[NUM_GROUP];
-};
+ /* Number of groups the thread belongs to */
+ int num_group;
+
+ /* The groups the thread belongs to */
+ int group[NUM_GROUP];
+
+} thr_group_t;
typedef struct sched_group_t {
- struct sched_group_s s;
- uint8_t pad[ROUNDUP_CACHE(sizeof(struct sched_group_s)) -
- sizeof(struct sched_group_s)];
+ struct {
+ odp_ticketlock_t lock;
+
+ /* All groups */
+ struct {
+ char name[ODP_SCHED_GROUP_NAME_LEN + 1];
+ odp_thrmask_t mask;
+ int allocated;
+ } group[NUM_GROUP];
+
+ /* Per thread group information */
+ thr_group_t thr[NUM_THREAD];
+
+ } s;
+
} sched_group_t ODP_ALIGNED_CACHE;
typedef struct {
sched_cmd_t queue_cmd[NUM_QUEUE];
sched_cmd_t pktio_cmd[NUM_PKTIO];
- prio_queue_t prio_queue[NUM_PRIO];
+ prio_queue_t prio_queue[NUM_GROUP][NUM_PRIO];
sched_group_t sched_group;
} sched_global_t;
@@ -87,14 +114,37 @@ typedef struct {
sched_cmd_t *cmd;
int pause;
int thr_id;
+ uint32_t gen_cnt;
+ int num_group;
+ int group[NUM_GROUP];
} sched_local_t;
static sched_global_t sched_global;
static __thread sched_local_t sched_local;
+static inline uint32_t index_to_ring_idx(int pktio, uint32_t index)
+{
+ if (pktio)
+ return (0x80000000 | index);
+
+ return index;
+}
+
+static inline uint32_t index_from_ring_idx(uint32_t *index, uint32_t ring_idx)
+{
+ uint32_t pktio = ring_idx & 0x80000000;
+
+ if (pktio)
+ *index = ring_idx & (~0x80000000);
+ else
+ *index = ring_idx;
+
+ return pktio;
+}
+
static int init_global(void)
{
- int i;
+ int i, j;
sched_group_t *sched_group = &sched_global.sched_group;
ODP_DBG("Using SP scheduler\n");
@@ -102,21 +152,28 @@ static int init_global(void)
memset(&sched_global, 0, sizeof(sched_global_t));
for (i = 0; i < NUM_QUEUE; i++) {
- sched_global.queue_cmd[i].s.type = CMD_QUEUE;
- sched_global.queue_cmd[i].s.index = i;
+ sched_global.queue_cmd[i].s.type = CMD_QUEUE;
+ sched_global.queue_cmd[i].s.index = i;
+ sched_global.queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i);
}
for (i = 0; i < NUM_PKTIO; i++) {
- sched_global.pktio_cmd[i].s.type = CMD_PKTIO;
- sched_global.pktio_cmd[i].s.index = i;
- sched_global.pktio_cmd[i].s.prio = PKTIN_PRIO;
+ sched_global.pktio_cmd[i].s.type = CMD_PKTIO;
+ sched_global.pktio_cmd[i].s.index = i;
+ sched_global.pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i);
+ sched_global.pktio_cmd[i].s.prio = PKTIN_PRIO;
+ sched_global.pktio_cmd[i].s.group = GROUP_PKTIN;
}
- for (i = 0; i < NUM_PRIO; i++)
- odp_ticketlock_init(&sched_global.prio_queue[i].s.lock);
+ for (i = 0; i < NUM_GROUP; i++)
+ for (j = 0; j < NUM_PRIO; j++)
+ ring_init(&sched_global.prio_queue[i][j].ring);
odp_ticketlock_init(&sched_group->s.lock);
+ for (i = 0; i < NUM_THREAD; i++)
+ odp_atomic_init_u32(&sched_group->s.thr[i].gen_cnt, 0);
+
strncpy(sched_group->s.group[GROUP_ALL].name, "__group_all",
ODP_SCHED_GROUP_NAME_LEN);
odp_thrmask_zero(&sched_group->s.group[GROUP_ALL].mask);
@@ -162,6 +219,52 @@ static int term_local(void)
return 0;
}
+static unsigned max_ordered_locks(void)
+{
+ return NUM_ORDERED_LOCKS;
+}
+
+static void add_group(sched_group_t *sched_group, int thr, int group)
+{
+ int num;
+ uint32_t gen_cnt;
+ thr_group_t *thr_group = &sched_group->s.thr[thr];
+
+ num = thr_group->num_group;
+ thr_group->group[num] = group;
+ thr_group->num_group = num + 1;
+ gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
+ odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+}
+
+static void remove_group(sched_group_t *sched_group, int thr, int group)
+{
+ int i, num;
+ int found = 0;
+ thr_group_t *thr_group = &sched_group->s.thr[thr];
+
+ num = thr_group->num_group;
+
+ for (i = 0; i < num; i++) {
+ if (thr_group->group[i] == group) {
+ found = 1;
+
+ for (; i < num - 1; i++)
+ thr_group->group[i] = thr_group->group[i + 1];
+
+ break;
+ }
+ }
+
+ if (found) {
+ uint32_t gen_cnt;
+
+ thr_group->num_group = num - 1;
+ gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
+ odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+ }
+}
+
static int thr_add(odp_schedule_group_t group, int thr)
{
sched_group_t *sched_group = &sched_global.sched_group;
@@ -169,6 +272,9 @@ static int thr_add(odp_schedule_group_t group, int thr)
if (group < 0 || group >= NUM_GROUP)
return -1;
+ if (thr < 0 || thr >= NUM_THREAD)
+ return -1;
+
odp_ticketlock_lock(&sched_group->s.lock);
if (!sched_group->s.group[group].allocated) {
@@ -177,6 +283,7 @@ static int thr_add(odp_schedule_group_t group, int thr)
}
odp_thrmask_set(&sched_group->s.group[group].mask, thr);
+ add_group(sched_group, thr, group);
odp_ticketlock_unlock(&sched_group->s.lock);
@@ -199,6 +306,8 @@ static int thr_rem(odp_schedule_group_t group, int thr)
odp_thrmask_clr(&sched_group->s.group[group].mask, thr);
+ remove_group(sched_group, thr, group);
+
odp_ticketlock_unlock(&sched_group->s.lock);
return 0;
@@ -241,51 +350,34 @@ static void destroy_queue(uint32_t qi)
static inline void add_tail(sched_cmd_t *cmd)
{
prio_queue_t *prio_queue;
+ int group = cmd->s.group;
+ int prio = cmd->s.prio;
+ uint32_t idx = cmd->s.ring_idx;
- prio_queue = &sched_global.prio_queue[cmd->s.prio];
- cmd->s.next = NULL;
-
- odp_ticketlock_lock(&prio_queue->s.lock);
-
- if (prio_queue->s.head == NULL)
- prio_queue->s.head = cmd;
- else
- prio_queue->s.tail->s.next = cmd;
+ prio_queue = &sched_global.prio_queue[group][prio];
- prio_queue->s.tail = cmd;
-
- odp_ticketlock_unlock(&prio_queue->s.lock);
+ ring_enq(&prio_queue->ring, RING_MASK, idx);
}
-static inline sched_cmd_t *rem_head(int prio)
+static inline sched_cmd_t *rem_head(int group, int prio)
{
prio_queue_t *prio_queue;
- sched_cmd_t *cmd;
-
- prio_queue = &sched_global.prio_queue[prio];
+ uint32_t ring_idx, index;
+ int pktio;
- odp_ticketlock_lock(&prio_queue->s.lock);
+ prio_queue = &sched_global.prio_queue[group][prio];
- if (prio_queue->s.head == NULL) {
- cmd = NULL;
- } else {
- sched_group_t *sched_group = &sched_global.sched_group;
+ ring_idx = ring_deq(&prio_queue->ring, RING_MASK);
- cmd = prio_queue->s.head;
+ if (ring_idx == RING_EMPTY)
+ return NULL;
- /* Remove head cmd only if thread belongs to the
- * scheduler group. Otherwise continue to the next priority
- * queue. */
- if (odp_thrmask_isset(&sched_group->s.group[cmd->s.group].mask,
- sched_local.thr_id))
- prio_queue->s.head = cmd->s.next;
- else
- cmd = NULL;
- }
+ pktio = index_from_ring_idx(&index, ring_idx);
- odp_ticketlock_unlock(&prio_queue->s.lock);
+ if (pktio)
+ return &sched_global.pktio_cmd[index];
- return cmd;
+ return &sched_global.queue_cmd[index];
}
static int sched_queue(uint32_t qi)
@@ -299,12 +391,11 @@ static int sched_queue(uint32_t qi)
}
static int ord_enq_multi(uint32_t queue_index, void *buf_hdr[], int num,
- int sustain, int *ret)
+ int *ret)
{
(void)queue_index;
(void)buf_hdr;
(void)num;
- (void)sustain;
(void)ret;
/* didn't consume the events */
@@ -333,15 +424,43 @@ static void pktio_start(int pktio_index, int num, int pktin_idx[])
add_tail(cmd);
}
-static inline sched_cmd_t *sched_cmd(int num_prio)
+static inline sched_cmd_t *sched_cmd(void)
{
- int prio;
+ int prio, i;
+ int thr = sched_local.thr_id;
+ sched_group_t *sched_group = &sched_global.sched_group;
+ thr_group_t *thr_group = &sched_group->s.thr[thr];
+ uint32_t gen_cnt;
+
+ /* There's no matching store_rel since the value is updated while
+ * holding a lock */
+ gen_cnt = odp_atomic_load_acq_u32(&thr_group->gen_cnt);
+
+ /* Check if groups have changed and need to be read again */
+ if (odp_unlikely(gen_cnt != sched_local.gen_cnt)) {
+ int num_grp;
+
+ odp_ticketlock_lock(&sched_group->s.lock);
+
+ num_grp = thr_group->num_group;
+ gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
- for (prio = 0; prio < num_prio; prio++) {
- sched_cmd_t *cmd = rem_head(prio);
+ for (i = 0; i < num_grp; i++)
+ sched_local.group[i] = thr_group->group[i];
- if (cmd)
- return cmd;
+ odp_ticketlock_unlock(&sched_group->s.lock);
+
+ sched_local.num_group = num_grp;
+ sched_local.gen_cnt = gen_cnt;
+ }
+
+ for (i = 0; i < sched_local.num_group; i++) {
+ for (prio = 0; prio < NUM_PRIO; prio++) {
+ sched_cmd_t *cmd = rem_head(sched_local.group[i], prio);
+
+ if (cmd)
+ return cmd;
+ }
}
return NULL;
@@ -374,7 +493,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
uint32_t qi;
int num;
- cmd = sched_cmd(NUM_PRIO);
+ cmd = sched_cmd();
if (cmd && cmd->s.type == CMD_PKTIO) {
if (sched_cb_pktin_poll(cmd->s.index, cmd->s.num_pktin,
@@ -490,8 +609,15 @@ static odp_schedule_group_t schedule_group_create(const char *name,
for (i = NUM_STATIC_GROUP; i < NUM_GROUP; i++) {
if (!sched_group->s.group[i].allocated) {
- strncpy(sched_group->s.group[i].name, name,
- ODP_SCHED_GROUP_NAME_LEN);
+ char *grp_name = sched_group->s.group[i].name;
+
+ if (name == NULL) {
+ grp_name[0] = 0;
+ } else {
+ strncpy(grp_name, name,
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
+ }
odp_thrmask_copy(&sched_group->s.group[i].mask,
thrmask);
sched_group->s.group[i].allocated = 1;
@@ -550,11 +676,14 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
static int schedule_group_join(odp_schedule_group_t group,
const odp_thrmask_t *thrmask)
{
+ int thr;
sched_group_t *sched_group = &sched_global.sched_group;
if (group < 0 || group >= NUM_GROUP)
return -1;
+ thr = odp_thrmask_first(thrmask);
+
odp_ticketlock_lock(&sched_group->s.lock);
if (!sched_group->s.group[group].allocated) {
@@ -566,6 +695,11 @@ static int schedule_group_join(odp_schedule_group_t group,
&sched_group->s.group[group].mask,
thrmask);
+ while (thr >= 0) {
+ add_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
+
odp_ticketlock_unlock(&sched_group->s.lock);
return 0;
@@ -574,6 +708,7 @@ static int schedule_group_join(odp_schedule_group_t group,
static int schedule_group_leave(odp_schedule_group_t group,
const odp_thrmask_t *thrmask)
{
+ int thr;
sched_group_t *sched_group = &sched_global.sched_group;
odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask;
odp_thrmask_t not;
@@ -581,6 +716,8 @@ static int schedule_group_leave(odp_schedule_group_t group,
if (group < 0 || group >= NUM_GROUP)
return -1;
+ thr = odp_thrmask_first(thrmask);
+
odp_ticketlock_lock(&sched_group->s.lock);
if (!sched_group->s.group[group].allocated) {
@@ -593,6 +730,11 @@ static int schedule_group_leave(odp_schedule_group_t group,
&sched_group->s.group[group].mask,
&not);
+ while (thr >= 0) {
+ remove_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
+
odp_ticketlock_unlock(&sched_group->s.lock);
return 0;
@@ -653,6 +795,14 @@ static void schedule_order_unlock(unsigned lock_index)
(void)lock_index;
}
+static void order_lock(void)
+{
+}
+
+static void order_unlock(void)
+{
+}
+
/* Fill in scheduler interface */
const schedule_fn_t schedule_sp_fn = {
.pktio_start = pktio_start,
@@ -666,7 +816,10 @@ const schedule_fn_t schedule_sp_fn = {
.init_global = init_global,
.term_global = term_global,
.init_local = init_local,
- .term_local = term_local
+ .term_local = term_local,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
+ .max_ordered_locks = max_ordered_locks
};
/* Fill in scheduler API calls */
diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c
index 550af2718..ba32deecd 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -4,114 +4,39 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_posix_extensions.h>
-
-#include <odp/api/shared_memory.h>
-#include <odp_internal.h>
-#include <odp/api/spinlock.h>
-#include <odp/api/align.h>
-#include <odp/api/system_info.h>
-#include <odp/api/debug.h>
-#include <odp_shm_internal.h>
-#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
-
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <asm/mman.h>
-#include <fcntl.h>
-
-#include <stdio.h>
+#include <odp/api/debug.h>
+#include <odp/api/std_types.h>
+#include <odp/api/shared_memory.h>
+#include <_ishm_internal.h>
#include <string.h>
-#include <errno.h>
-#include <inttypes.h>
ODP_STATIC_ASSERT(ODP_CONFIG_SHM_BLOCKS >= ODP_CONFIG_POOLS,
"ODP_CONFIG_SHM_BLOCKS < ODP_CONFIG_POOLS");
-typedef struct {
- char name[ODP_SHM_NAME_LEN];
- uint64_t size;
- uint64_t align;
- uint64_t alloc_size;
- void *addr_orig;
- void *addr;
- int huge;
- odp_shm_t hdl;
- uint32_t flags;
- uint64_t page_sz;
- int fd;
-
-} odp_shm_block_t;
-
-
-typedef struct {
- odp_shm_block_t block[ODP_CONFIG_SHM_BLOCKS];
- odp_spinlock_t lock;
-
-} odp_shm_table_t;
-
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-
-/* Global shared memory table */
-static odp_shm_table_t *odp_shm_tbl;
-
-
static inline uint32_t from_handle(odp_shm_t shm)
{
return _odp_typeval(shm) - 1;
}
-
static inline odp_shm_t to_handle(uint32_t index)
{
return _odp_cast_scalar(odp_shm_t, index + 1);
}
-
-int odp_shm_init_global(void)
-{
- void *addr;
-
-#ifndef MAP_HUGETLB
- ODP_DBG("NOTE: mmap does not support huge pages\n");
-#endif
-
- addr = mmap(NULL, sizeof(odp_shm_table_t),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
-
- if (addr == MAP_FAILED)
- return -1;
-
- odp_shm_tbl = addr;
-
- memset(odp_shm_tbl, 0, sizeof(odp_shm_table_t));
- odp_spinlock_init(&odp_shm_tbl->lock);
-
- return 0;
-}
-
-int odp_shm_term_global(void)
+static uint32_t get_ishm_flags(uint32_t flags)
{
- int ret;
+ uint32_t f = 0; /* internal ishm flags */
- ret = munmap(odp_shm_tbl, sizeof(odp_shm_table_t));
- if (ret)
- ODP_ERR("unable to munmap\n.");
+ /* set internal ishm flags according to API flags:
+ * note that both ODP_SHM_PROC and ODP_SHM_EXPORT maps to
+ * _ODP_ISHM_LINK as in the linux-gen implementation there is
+ * no difference between exporting to another ODP instance or
+ * another linux process */
+ f |= (flags & (ODP_SHM_PROC | ODP_SHM_EXPORT)) ? _ODP_ISHM_EXPORT : 0;
+ f |= (flags & ODP_SHM_SINGLE_VA) ? _ODP_ISHM_SINGLE_VA : 0;
- return ret;
-}
-
-
-int odp_shm_init_local(void)
-{
- return 0;
+ return f;
}
int odp_shm_capability(odp_shm_capability_t *capa)
@@ -119,319 +44,71 @@ int odp_shm_capability(odp_shm_capability_t *capa)
memset(capa, 0, sizeof(odp_shm_capability_t));
capa->max_blocks = ODP_CONFIG_SHM_BLOCKS;
- capa->max_size = 0;
- capa->max_align = 0;
+ capa->max_size = 0;
+ capa->max_align = 0;
return 0;
}
-static int find_block(const char *name, uint32_t *index)
+odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
+ uint32_t flags)
{
- uint32_t i;
+ int block_index;
+ int flgs = 0; /* internal ishm flags */
- for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++) {
- if (strcmp(name, odp_shm_tbl->block[i].name) == 0) {
- /* found it */
- if (index != NULL)
- *index = i;
+ flgs = get_ishm_flags(flags);
- return 1;
- }
- }
-
- return 0;
+ block_index = _odp_ishm_reserve(name, size, -1, align, flgs, flags);
+ if (block_index >= 0)
+ return to_handle(block_index);
+ else
+ return ODP_SHM_INVALID;
}
-int odp_shm_free(odp_shm_t shm)
+odp_shm_t odp_shm_import(const char *remote_name,
+ odp_instance_t odp_inst,
+ const char *local_name)
{
- uint32_t i;
int ret;
- odp_shm_block_t *block;
- char shm_devname[SHM_DEVNAME_MAXLEN];
-
- if (shm == ODP_SHM_INVALID) {
- ODP_DBG("odp_shm_free: Invalid handle\n");
- return -1;
- }
- i = from_handle(shm);
+ ret = _odp_ishm_find_exported(remote_name, (pid_t)odp_inst,
+ local_name);
- if (i >= ODP_CONFIG_SHM_BLOCKS) {
- ODP_DBG("odp_shm_free: Bad handle\n");
- return -1;
- }
-
- odp_spinlock_lock(&odp_shm_tbl->lock);
-
- block = &odp_shm_tbl->block[i];
-
- if (block->addr == NULL) {
- ODP_DBG("odp_shm_free: Free block\n");
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return 0;
- }
-
- ret = munmap(block->addr_orig, block->alloc_size);
- if (0 != ret) {
- ODP_DBG("odp_shm_free: munmap failed: %s, id %u, addr %p\n",
- strerror(errno), i, block->addr_orig);
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return -1;
- }
-
- if (block->flags & ODP_SHM_PROC || block->flags & _ODP_SHM_PROC_NOCREAT) {
- int shm_ns_id;
-
- if (odp_global_data.ipc_ns)
- shm_ns_id = odp_global_data.ipc_ns;
- else
- shm_ns_id = odp_global_data.main_pid;
-
- snprintf(shm_devname, SHM_DEVNAME_MAXLEN,
- SHM_DEVNAME_FORMAT, shm_ns_id, block->name);
- ret = shm_unlink(shm_devname);
- if (0 != ret) {
- ODP_DBG("odp_shm_free: shm_unlink failed\n");
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return -1;
- }
- }
- memset(block, 0, sizeof(odp_shm_block_t));
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return 0;
+ return to_handle(ret);
}
-odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
- uint32_t flags)
+int odp_shm_free(odp_shm_t shm)
{
- uint32_t i;
- char shm_devname[SHM_DEVNAME_MAXLEN];
- odp_shm_block_t *block;
- void *addr;
- int fd = -1;
- int map_flag = MAP_SHARED;
- /* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
- int oflag = O_RDWR;
- uint64_t alloc_size;
- uint64_t page_sz, huge_sz;
-#ifdef MAP_HUGETLB
- int need_huge_page = 0;
- uint64_t alloc_hp_size;
-#endif
-
- page_sz = odp_sys_page_size();
- alloc_size = size + align;
-
-#ifdef MAP_HUGETLB
- huge_sz = odp_sys_huge_page_size();
- need_huge_page = (huge_sz && alloc_size > page_sz);
- /* munmap for huge pages requires sizes round up by page */
- alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
-#endif
-
- if (flags & ODP_SHM_PROC)
- oflag |= O_CREAT | O_TRUNC;
- if (flags & _ODP_SHM_O_EXCL)
- oflag |= O_EXCL;
-
- if (flags & (ODP_SHM_PROC | _ODP_SHM_PROC_NOCREAT)) {
- int shm_ns_id;
-
- if (odp_global_data.ipc_ns)
- shm_ns_id = odp_global_data.ipc_ns;
- else
- shm_ns_id = odp_global_data.main_pid;
-
- need_huge_page = 0;
-
- /* Creates a file to /dev/shm/odp */
- snprintf(shm_devname, SHM_DEVNAME_MAXLEN,
- SHM_DEVNAME_FORMAT, shm_ns_id, name);
- fd = shm_open(shm_devname, oflag,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
- if (fd == -1) {
- ODP_DBG("%s: shm_open failed.\n", shm_devname);
- return ODP_SHM_INVALID;
- }
- } else {
- map_flag |= MAP_ANONYMOUS;
- }
-
- odp_spinlock_lock(&odp_shm_tbl->lock);
-
- if (find_block(name, NULL)) {
- /* Found a block with the same name */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_DBG("name \"%s\" already used.\n", name);
- return ODP_SHM_INVALID;
- }
-
- for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++) {
- if (odp_shm_tbl->block[i].addr == NULL) {
- /* Found free block */
- break;
- }
- }
-
- if (i > ODP_CONFIG_SHM_BLOCKS - 1) {
- /* Table full */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_DBG("%s: no more blocks.\n", name);
- return ODP_SHM_INVALID;
- }
-
- block = &odp_shm_tbl->block[i];
-
- block->hdl = to_handle(i);
- addr = MAP_FAILED;
-
-#ifdef MAP_HUGETLB
- /* Try first huge pages */
- if (need_huge_page) {
- if ((flags & ODP_SHM_PROC) &&
- (ftruncate(fd, alloc_hp_size) == -1)) {
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_DBG("%s: ftruncate huge pages failed.\n", name);
- return ODP_SHM_INVALID;
- }
-
- addr = mmap(NULL, alloc_hp_size, PROT_READ | PROT_WRITE,
- map_flag | MAP_HUGETLB, fd, 0);
- if (addr == MAP_FAILED) {
- ODP_DBG(" %s:\n"
- "\tNo huge pages, fall back to normal pages,\n"
- "\tcheck: /proc/sys/vm/nr_hugepages.\n", name);
- } else {
- block->alloc_size = alloc_hp_size;
- block->huge = 1;
- block->page_sz = huge_sz;
- }
- }
-#endif
-
- /* Use normal pages for small or failed huge page allocations */
- if (addr == MAP_FAILED) {
- if ((flags & ODP_SHM_PROC) &&
- (ftruncate(fd, alloc_size) == -1)) {
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_ERR("%s: ftruncate failed.\n", name);
- return ODP_SHM_INVALID;
- }
-
- addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
- map_flag, fd, 0);
- if (addr == MAP_FAILED) {
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_DBG("%s mmap failed.\n", name);
- return ODP_SHM_INVALID;
- } else {
- block->alloc_size = alloc_size;
- block->huge = 0;
- block->page_sz = page_sz;
- }
- }
-
- block->addr_orig = addr;
-
- /* move to correct alignment */
- addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);
-
- strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
- block->name[ODP_SHM_NAME_LEN - 1] = 0;
- block->size = size;
- block->align = align;
- block->flags = flags;
- block->fd = fd;
- block->addr = addr;
-
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return block->hdl;
+ return _odp_ishm_free_by_index(from_handle(shm));
}
odp_shm_t odp_shm_lookup(const char *name)
{
- uint32_t i;
- odp_shm_t hdl;
-
- odp_spinlock_lock(&odp_shm_tbl->lock);
-
- if (find_block(name, &i) == 0) {
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return ODP_SHM_INVALID;
- }
-
- hdl = odp_shm_tbl->block[i].hdl;
- odp_spinlock_unlock(&odp_shm_tbl->lock);
-
- return hdl;
+ return to_handle(_odp_ishm_lookup_by_name(name));
}
-
void *odp_shm_addr(odp_shm_t shm)
{
- uint32_t i;
-
- i = from_handle(shm);
-
- if (i > (ODP_CONFIG_SHM_BLOCKS - 1))
- return NULL;
-
- return odp_shm_tbl->block[i].addr;
+ return _odp_ishm_address(from_handle(shm));
}
-
int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info)
{
- odp_shm_block_t *block;
- uint32_t i;
+ _odp_ishm_info_t ishm_info;
- i = from_handle(shm);
-
- if (i > (ODP_CONFIG_SHM_BLOCKS - 1))
+ if (_odp_ishm_info(from_handle(shm), &ishm_info))
return -1;
- block = &odp_shm_tbl->block[i];
-
- info->name = block->name;
- info->addr = block->addr;
- info->size = block->size;
- info->page_size = block->page_sz;
- info->flags = block->flags;
+ info->name = ishm_info.name;
+ info->addr = ishm_info.addr;
+ info->size = ishm_info.size;
+ info->page_size = ishm_info.page_size;
+ info->flags = ishm_info.user_flags;
return 0;
}
-
void odp_shm_print_all(void)
{
- int i;
-
- ODP_PRINT("\nShared memory\n");
- ODP_PRINT("--------------\n");
- ODP_PRINT(" page size: %"PRIu64" kB\n",
- odp_sys_page_size() / 1024);
- ODP_PRINT(" huge page size: %"PRIu64" kB\n",
- odp_sys_huge_page_size() / 1024);
- ODP_PRINT("\n");
-
- ODP_PRINT(" id name kB align huge addr\n");
-
- for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++) {
- odp_shm_block_t *block;
-
- block = &odp_shm_tbl->block[i];
-
- if (block->addr) {
- ODP_PRINT(" %2i %-24s %4"PRIu64" %4"PRIu64
- " %2c %p\n",
- i,
- block->name,
- block->size/1024,
- block->align,
- (block->huge ? '*' : ' '),
- block->addr);
- }
- }
-
- ODP_PRINT("\n");
+ _odp_ishm_status("Memory allocation status:");
}
diff --git a/platform/linux-generic/odp_spinlock.c b/platform/linux-generic/odp_spinlock.c
index 6fc138b44..cb0f0533c 100644
--- a/platform/linux-generic/odp_spinlock.c
+++ b/platform/linux-generic/odp_spinlock.c
@@ -13,7 +13,6 @@ void odp_spinlock_init(odp_spinlock_t *spinlock)
_odp_atomic_flag_init(&spinlock->lock, 0);
}
-
void odp_spinlock_lock(odp_spinlock_t *spinlock)
{
/* While the lock is already taken... */
@@ -25,19 +24,16 @@ void odp_spinlock_lock(odp_spinlock_t *spinlock)
odp_cpu_pause();
}
-
int odp_spinlock_trylock(odp_spinlock_t *spinlock)
{
return (_odp_atomic_flag_tas(&spinlock->lock) == 0);
}
-
void odp_spinlock_unlock(odp_spinlock_t *spinlock)
{
_odp_atomic_flag_clear(&spinlock->lock);
}
-
int odp_spinlock_is_locked(odp_spinlock_t *spinlock)
{
return _odp_atomic_flag_load(&spinlock->lock) != 0;
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index bbe5358ed..18c61dbe7 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -4,6 +4,13 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ */
+
#include <odp_posix_extensions.h>
#include <odp/api/system_info.h>
@@ -11,11 +18,13 @@
#include <odp_debug_internal.h>
#include <odp/api/align.h>
#include <odp/api/cpu.h>
+#include <errno.h>
#include <pthread.h>
#include <sched.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
+#include <ctype.h>
/* sysconf */
#include <unistd.h>
@@ -97,6 +106,158 @@ static uint64_t default_huge_page_size(void)
}
/*
+ * split string into tokens. largely "inspired" by dpdk:
+ * lib/librte_eal/common/eal_common_string_fns.c: rte_strsplit
+ */
+static int strsplit(char *string, int stringlen,
+ char **tokens, int maxtokens, char delim)
+{
+ int i, tok = 0;
+ int tokstart = 1; /* first token is right at start of string */
+
+ if (string == NULL || tokens == NULL)
+ return -1;
+
+ for (i = 0; i < stringlen; i++) {
+ if (string[i] == '\0' || tok >= maxtokens)
+ break;
+ if (tokstart) {
+ tokstart = 0;
+ tokens[tok++] = &string[i];
+ }
+ if (string[i] == delim) {
+ string[i] = '\0';
+ tokstart = 1;
+ }
+ }
+ return tok;
+}
+
+/*
+ * Converts a numeric string to the equivalent uint64_t value.
+ * As well as straight number conversion, also recognises the suffixes
+ * k, m and g for kilobytes, megabytes and gigabytes respectively.
+ *
+ * If a negative number is passed in i.e. a string with the first non-black
+ * character being "-", zero is returned. Zero is also returned in the case of
+ * an error with the strtoull call in the function.
+ * largely "inspired" by dpdk:
+ * lib/librte_eal/common/include/rte_common.h: rte_str_to_size
+ *
+ * param str
+ * String containing number to convert.
+ * return
+ * Number.
+ */
+static inline uint64_t str_to_size(const char *str)
+{
+ char *endptr;
+ unsigned long long size;
+
+ while (isspace((int)*str))
+ str++;
+ if (*str == '-')
+ return 0;
+
+ errno = 0;
+ size = strtoull(str, &endptr, 0);
+ if (errno)
+ return 0;
+
+ if (*endptr == ' ')
+ endptr++; /* allow 1 space gap */
+
+ switch (*endptr) {
+ case 'G':
+ case 'g':
+ size *= 1024; /* fall-through */
+ case 'M':
+ case 'm':
+ size *= 1024; /* fall-through */
+ case 'K':
+ case 'k':
+ size *= 1024; /* fall-through */
+ default:
+ break;
+ }
+ return size;
+}
+
+/*
+ * returns a malloced string containing the name of the directory for
+ * huge pages of a given size (0 for default)
+ * largely "inspired" by dpdk:
+ * lib/librte_eal/linuxapp/eal/eal_hugepage_info.c: get_hugepage_dir
+ *
+ * Analysis of /proc/mounts
+ */
+static char *get_hugepage_dir(uint64_t hugepage_sz)
+{
+ enum proc_mount_fieldnames {
+ DEVICE = 0,
+ MOUNTPT,
+ FSTYPE,
+ OPTIONS,
+ _FIELDNAME_MAX
+ };
+ static uint64_t default_size;
+ const char proc_mounts[] = "/proc/mounts";
+ const char hugetlbfs_str[] = "hugetlbfs";
+ const size_t htlbfs_str_len = sizeof(hugetlbfs_str) - 1;
+ const char pagesize_opt[] = "pagesize=";
+ const size_t pagesize_opt_len = sizeof(pagesize_opt) - 1;
+ const char split_tok = ' ';
+ char *tokens[_FIELDNAME_MAX];
+ char buf[BUFSIZ];
+ char *retval = NULL;
+ const char *pagesz_str;
+ uint64_t pagesz;
+ FILE *fd = fopen(proc_mounts, "r");
+
+ if (fd == NULL)
+ return NULL;
+
+ if (default_size == 0)
+ default_size = default_huge_page_size();
+
+ if (hugepage_sz == 0)
+ hugepage_sz = default_size;
+
+ while (fgets(buf, sizeof(buf), fd)) {
+ if (strsplit(buf, sizeof(buf), tokens,
+ _FIELDNAME_MAX, split_tok) != _FIELDNAME_MAX) {
+ ODP_ERR("Error parsing %s\n", proc_mounts);
+ break; /* return NULL */
+ }
+
+ /* is this hugetlbfs? */
+ if (!strncmp(tokens[FSTYPE], hugetlbfs_str, htlbfs_str_len)) {
+ pagesz_str = strstr(tokens[OPTIONS], pagesize_opt);
+
+ /* No explicit size, default page size is compared */
+ if (pagesz_str == NULL) {
+ if (hugepage_sz == default_size) {
+ retval = strdup(tokens[MOUNTPT]);
+ break;
+ }
+ }
+ /* there is an explicit page size, so check it */
+ else {
+ pagesz =
+ str_to_size(&pagesz_str[pagesize_opt_len]);
+ if (pagesz == hugepage_sz) {
+ retval = strdup(tokens[MOUNTPT]);
+ break;
+ }
+ }
+ } /* end if strncmp hugetlbfs */
+ } /* end while fgets */
+
+ fclose(fd);
+ return retval;
+}
+
+/*
* Analysis of /sys/devices/system/cpu/ files
*/
static int systemcpu(system_info_t *sysinfo)
@@ -125,11 +286,21 @@ static int systemcpu(system_info_t *sysinfo)
return -1;
}
- sysinfo->default_huge_page_size = default_huge_page_size();
-
return 0;
}
+/*
+ * Huge page information
+ */
+static int system_hp(hugepage_info_t *hugeinfo)
+{
+ hugeinfo->default_huge_page_size = default_huge_page_size();
+
+ /* default_huge_page_dir may be NULL if no huge page support */
+ hugeinfo->default_huge_page_dir = get_hugepage_dir(0);
+
+ return 0;
+}
/*
* System info initialisation
@@ -157,6 +328,8 @@ int odp_system_info_init(void)
return -1;
}
+ system_hp(&odp_global_data.hugepage_info);
+
return 0;
}
@@ -165,6 +338,8 @@ int odp_system_info_init(void)
*/
int odp_system_info_term(void)
{
+ free(odp_global_data.hugepage_info.default_huge_page_dir);
+
return 0;
}
@@ -200,7 +375,7 @@ uint64_t odp_cpu_hz_max_id(int id)
uint64_t odp_sys_huge_page_size(void)
{
- return odp_global_data.system_info.default_huge_page_size;
+ return odp_global_data.hugepage_info.default_huge_page_size;
}
uint64_t odp_sys_page_size(void)
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index ee4c4c00d..53fec0855 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -29,6 +29,7 @@
#include <unistd.h>
#include <sys/syscall.h>
#include <inttypes.h>
+#include <string.h>
#include <odp/api/align.h>
#include <odp_align_internal.h>
@@ -75,7 +76,7 @@ static _odp_atomic_flag_t locks[NUM_LOCKS]; /* Multiple locks per cache line! */
static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf)
{
- return (odp_timeout_hdr_t *)(void *)odp_buf_to_hdr(buf);
+ return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf);
}
static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo)
@@ -222,7 +223,7 @@ static inline odp_timer_t tp_idx_to_handle(struct odp_timer_pool_s *tp,
static void itimer_init(odp_timer_pool *tp);
static void itimer_fini(odp_timer_pool *tp);
-static odp_timer_pool_t odp_timer_pool_new(const char *_name,
+static odp_timer_pool_t odp_timer_pool_new(const char *name,
const odp_timer_pool_param_t *param)
{
uint32_t tp_idx = odp_atomic_fetch_add_u32(&num_timer_pools, 1);
@@ -238,14 +239,20 @@ static odp_timer_pool_t odp_timer_pool_new(const char *_name,
ODP_CACHE_LINE_SIZE);
size_t sz2 = ODP_ALIGN_ROUNDUP(sizeof(odp_timer) * param->num_timers,
ODP_CACHE_LINE_SIZE);
- odp_shm_t shm = odp_shm_reserve(_name, sz0 + sz1 + sz2,
+ odp_shm_t shm = odp_shm_reserve(name, sz0 + sz1 + sz2,
ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
if (odp_unlikely(shm == ODP_SHM_INVALID))
ODP_ABORT("%s: timer pool shm-alloc(%zuKB) failed\n",
- _name, (sz0 + sz1 + sz2) / 1024);
+ name, (sz0 + sz1 + sz2) / 1024);
odp_timer_pool *tp = (odp_timer_pool *)odp_shm_addr(shm);
odp_atomic_init_u64(&tp->cur_tick, 0);
- snprintf(tp->name, sizeof(tp->name), "%s", _name);
+
+ if (name == NULL) {
+ tp->name[0] = 0;
+ } else {
+ strncpy(tp->name, name, ODP_TIMER_POOL_NAME_LEN - 1);
+ tp->name[ODP_TIMER_POOL_NAME_LEN - 1] = 0;
+ }
tp->shm = shm;
tp->param = *param;
tp->min_rel_tck = odp_timer_ns_to_tick(tp, param->min_tmo);
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index 62e5c6350..9dc3a8649 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -99,6 +99,24 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
tm_shaper_obj_t *timer_shaper,
pkt_desc_t *demoted_pkt_desc);
+static int queue_tm_reenq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+{
+ odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE((uint8_t *)queue -
+ offsetof(tm_queue_obj_t,
+ tm_qentry));
+ odp_packet_t pkt = (odp_packet_t)buf_hdr->handle.handle;
+
+ return odp_tm_enq(tm_queue, pkt);
+}
+
+static int queue_tm_reenq_multi(queue_entry_t *queue ODP_UNUSED,
+ odp_buffer_hdr_t *buf[] ODP_UNUSED,
+ int num ODP_UNUSED)
+{
+ ODP_ABORT("Invalid call to queue_tm_reenq_multi()\n");
+ return 0;
+}
+
static tm_queue_obj_t *get_tm_queue_obj(tm_system_t *tm_system,
pkt_desc_t *pkt_desc)
{
@@ -1861,13 +1879,6 @@ static int tm_enqueue(tm_system_t *tm_system,
odp_bool_t drop_eligible, drop;
uint32_t frame_len, pkt_depth;
int rc;
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- /* If we're from an ordered queue and not in order
- * record the event and wait until order is resolved
- */
- if (queue_tm_reorder(&tm_queue_obj->tm_qentry, &pkt_hdr->buf_hdr))
- return 0;
tm_group = GET_TM_GROUP(tm_system->odp_tm_group);
if (tm_group->first_enq == 0) {
@@ -1888,7 +1899,10 @@ static int tm_enqueue(tm_system_t *tm_system,
work_item.queue_num = tm_queue_obj->queue_num;
work_item.pkt = pkt;
+ sched_fn->order_lock();
rc = input_work_queue_append(tm_system, &work_item);
+ sched_fn->order_unlock();
+
if (rc < 0) {
ODP_DBG("%s work queue full\n", __func__);
return rc;
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 11f3509d6..0eb025aeb 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -956,10 +956,12 @@ static int dpdk_send(pktio_entry_t *pktio_entry, int index,
rte_pktmbuf_free(tx_mbufs[i]);
}
- odp_packet_free_multi(pkt_table, tx_pkts);
-
- if (odp_unlikely(tx_pkts == 0 && __odp_errno != 0))
- return -1;
+ if (odp_unlikely(tx_pkts == 0)) {
+ if (__odp_errno != 0)
+ return -1;
+ } else {
+ odp_packet_free_multi(pkt_table, tx_pkts);
+ }
return tx_pkts;
}
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index c1f28db79..377f20e65 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -3,150 +3,85 @@
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
#include <odp_packet_io_ipc_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_io_internal.h>
#include <odp/api/system_info.h>
#include <odp_shm_internal.h>
+#include <_ishm_internal.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
+#define IPC_ODP_DEBUG_PRINT 0
+
+#define IPC_ODP_DBG(fmt, ...) \
+ do { \
+ if (IPC_ODP_DEBUG_PRINT == 1) \
+ ODP_DBG(fmt, ##__VA_ARGS__);\
+ } while (0)
+
/* MAC address for the "ipc" interface */
static const char pktio_ipc_mac[] = {0x12, 0x12, 0x12, 0x12, 0x12, 0x12};
-static void *_ipc_map_remote_pool(const char *name, size_t size);
+static odp_shm_t _ipc_map_remote_pool(const char *name, int pid);
static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl)
{
- pool_entry_t *pool;
- uint32_t pool_id;
+ pool_t *pool;
odp_shm_t shm;
odp_shm_info_t info;
- pool_id = pool_handle_to_index(pool_hdl);
- pool = get_pool_entry(pool_id);
- shm = pool->s.pool_shm;
+ pool = pool_entry_from_hdl(pool_hdl);
+ shm = pool->shm;
odp_shm_info(shm, &info);
return info.name;
}
-/**
-* Look up for shared memory object.
-*
-* @param name name of shm object
-*
-* @return 0 on success, otherwise non-zero
-*/
-static int _ipc_shm_lookup(const char *name)
-{
- int shm;
- char shm_devname[SHM_DEVNAME_MAXLEN];
-
- if (!odp_global_data.ipc_ns)
- ODP_ABORT("ipc_ns not set\n");
-
- snprintf(shm_devname, SHM_DEVNAME_MAXLEN,
- SHM_DEVNAME_FORMAT,
- odp_global_data.ipc_ns, name);
-
- shm = shm_open(shm_devname, O_RDWR, S_IRUSR | S_IWUSR);
- if (shm == -1) {
- if (errno == ENOENT) {
- ODP_DBG("no file %s\n", shm_devname);
- return -1;
- }
- ODP_ABORT("shm_open for %s err %s\n",
- shm_devname, strerror(errno));
- }
- close(shm);
- return 0;
-}
-
-static int _ipc_map_pktio_info(pktio_entry_t *pktio_entry,
- const char *dev,
- int *slave)
-{
- struct pktio_info *pinfo;
- char name[ODP_POOL_NAME_LEN + sizeof("_info")];
- uint32_t flags;
- odp_shm_t shm;
-
- /* Create info about remote pktio */
- snprintf(name, sizeof(name), "%s_info", dev);
-
- flags = ODP_SHM_PROC | _ODP_SHM_O_EXCL;
-
- shm = odp_shm_reserve(name, sizeof(struct pktio_info),
- ODP_CACHE_LINE_SIZE,
- flags);
- if (ODP_SHM_INVALID != shm) {
- pinfo = odp_shm_addr(shm);
- pinfo->master.pool_name[0] = 0;
- *slave = 0;
- } else {
- flags = _ODP_SHM_PROC_NOCREAT | _ODP_SHM_O_EXCL;
- shm = odp_shm_reserve(name, sizeof(struct pktio_info),
- ODP_CACHE_LINE_SIZE,
- flags);
- if (ODP_SHM_INVALID == shm)
- ODP_ABORT("can not connect to shm\n");
-
- pinfo = odp_shm_addr(shm);
- *slave = 1;
- }
-
- pktio_entry->s.ipc.pinfo = pinfo;
- pktio_entry->s.ipc.pinfo_shm = shm;
-
- return 0;
-}
-
static int _ipc_master_start(pktio_entry_t *pktio_entry)
{
struct pktio_info *pinfo = pktio_entry->s.ipc.pinfo;
- int ret;
- void *ipc_pool_base;
+ odp_shm_t shm;
- if (pinfo->slave.mdata_offset == 0)
+ if (pinfo->slave.init_done == 0)
return -1;
- ret = _ipc_shm_lookup(pinfo->slave.pool_name);
- if (ret) {
- ODP_DBG("no pool file %s\n", pinfo->slave.pool_name);
+ shm = _ipc_map_remote_pool(pinfo->slave.pool_name,
+ pinfo->slave.pid);
+ if (shm == ODP_SHM_INVALID) {
+ ODP_DBG("no pool file %s for pid %d\n",
+ pinfo->slave.pool_name, pinfo->slave.pid);
return -1;
}
- ipc_pool_base = _ipc_map_remote_pool(pinfo->slave.pool_name,
- pinfo->master.shm_pkt_pool_size);
- pktio_entry->s.ipc.pool_mdata_base = (char *)ipc_pool_base +
- pinfo->slave.mdata_offset;
+ pktio_entry->s.ipc.remote_pool_shm = shm;
+ pktio_entry->s.ipc.pool_base = odp_shm_addr(shm);
+ pktio_entry->s.ipc.pool_mdata_base = (char *)odp_shm_addr(shm) +
+ pinfo->slave.base_addr_offset;
odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 1);
- ODP_DBG("%s started.\n", pktio_entry->s.name);
+ IPC_ODP_DBG("%s started.\n", pktio_entry->s.name);
return 0;
}
static int _ipc_init_master(pktio_entry_t *pktio_entry,
const char *dev,
- odp_pool_t pool)
+ odp_pool_t pool_hdl)
{
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_m_prod")];
- pool_entry_t *pool_entry;
- uint32_t pool_id;
+ pool_t *pool;
struct pktio_info *pinfo;
const char *pool_name;
- pool_id = pool_handle_to_index(pool);
- pool_entry = get_pool_entry(pool_id);
+ pool = pool_entry_from_hdl(pool_hdl);
+ (void)pool;
if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_m_prod"))) {
- ODP_DBG("too big ipc name\n");
+ ODP_ERR("too big ipc name\n");
return -1;
}
@@ -158,7 +93,7 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
PKTIO_IPC_ENTRIES,
_RING_SHM_PROC | _RING_NO_LIST);
if (!pktio_entry->s.ipc.tx.send) {
- ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ ODP_ERR("pid %d unable to create ipc ring %s name\n",
getpid(), ipc_shm_name);
return -1;
}
@@ -174,7 +109,7 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
PKTIO_IPC_ENTRIES,
_RING_SHM_PROC | _RING_NO_LIST);
if (!pktio_entry->s.ipc.tx.free) {
- ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ ODP_ERR("pid %d unable to create ipc ring %s name\n",
getpid(), ipc_shm_name);
goto free_m_prod;
}
@@ -187,7 +122,7 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
PKTIO_IPC_ENTRIES,
_RING_SHM_PROC | _RING_NO_LIST);
if (!pktio_entry->s.ipc.rx.recv) {
- ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ ODP_ERR("pid %d unable to create ipc ring %s name\n",
getpid(), ipc_shm_name);
goto free_m_cons;
}
@@ -200,7 +135,7 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
PKTIO_IPC_ENTRIES,
_RING_SHM_PROC | _RING_NO_LIST);
if (!pktio_entry->s.ipc.rx.free) {
- ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ ODP_ERR("pid %d unable to create ipc ring %s name\n",
getpid(), ipc_shm_name);
goto free_s_prod;
}
@@ -210,24 +145,23 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
/* Set up pool name for remote info */
pinfo = pktio_entry->s.ipc.pinfo;
- pool_name = _ipc_odp_buffer_pool_shm_name(pool);
+ pool_name = _ipc_odp_buffer_pool_shm_name(pool_hdl);
if (strlen(pool_name) > ODP_POOL_NAME_LEN) {
- ODP_DBG("pid %d ipc pool name %s is too big %d\n",
+ ODP_ERR("pid %d ipc pool name %s is too big %d\n",
getpid(), pool_name, strlen(pool_name));
goto free_s_prod;
}
memcpy(pinfo->master.pool_name, pool_name, strlen(pool_name));
- pinfo->master.shm_pkt_pool_size = pool_entry->s.pool_size;
- pinfo->master.shm_pool_bufs_num = pool_entry->s.buf_num;
- pinfo->master.shm_pkt_size = pool_entry->s.seg_size;
- pinfo->master.mdata_offset = pool_entry->s.pool_mdata_addr -
- pool_entry->s.pool_base_addr;
- pinfo->slave.mdata_offset = 0;
+ pinfo->slave.base_addr_offset = 0;
+ pinfo->slave.base_addr = 0;
+ pinfo->slave.pid = 0;
+ pinfo->slave.init_done = 0;
- pktio_entry->s.ipc.pool = pool;
+ pktio_entry->s.ipc.pool = pool_hdl;
ODP_DBG("Pre init... DONE.\n");
+ pinfo->master.init_done = 1;
_ipc_master_start(pktio_entry);
@@ -246,55 +180,42 @@ free_m_prod:
}
static void _ipc_export_pool(struct pktio_info *pinfo,
- odp_pool_t pool)
+ odp_pool_t pool_hdl)
{
- pool_entry_t *pool_entry;
-
- pool_entry = odp_pool_to_entry(pool);
- if (pool_entry->s.blk_size != pinfo->master.shm_pkt_size)
- ODP_ABORT("pktio for same name should have the same pool size\n");
- if (pool_entry->s.buf_num != (unsigned)pinfo->master.shm_pool_bufs_num)
- ODP_ABORT("pktio for same name should have the same pool size\n");
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
snprintf(pinfo->slave.pool_name, ODP_POOL_NAME_LEN, "%s",
- pool_entry->s.name);
- pinfo->slave.mdata_offset = pool_entry->s.pool_mdata_addr -
- pool_entry->s.pool_base_addr;
+ _ipc_odp_buffer_pool_shm_name(pool_hdl));
+ pinfo->slave.pid = odp_global_data.main_pid;
+ pinfo->slave.block_size = pool->block_size;
+ pinfo->slave.base_addr = pool->base_addr;
}
-static void *_ipc_map_remote_pool(const char *name, size_t size)
+static odp_shm_t _ipc_map_remote_pool(const char *name, int pid)
{
odp_shm_t shm;
- void *addr;
-
- ODP_DBG("Mapping remote pool %s, size %ld\n", name, size);
- shm = odp_shm_reserve(name,
- size,
- ODP_CACHE_LINE_SIZE,
- _ODP_SHM_PROC_NOCREAT);
- if (shm == ODP_SHM_INVALID)
- ODP_ABORT("unable map %s\n", name);
-
- addr = odp_shm_addr(shm);
- ODP_DBG("MAP master: %p - %p size %ld, pool %s\n",
- addr, (char *)addr + size, size, name);
- return addr;
+ char rname[ODP_SHM_NAME_LEN];
+
+ snprintf(rname, ODP_SHM_NAME_LEN, "remote-%s", name);
+ shm = odp_shm_import(name, pid, rname);
+ if (shm == ODP_SHM_INVALID) {
+ ODP_ERR("unable map %s\n", name);
+ return ODP_SHM_INVALID;
+ }
+
+ IPC_ODP_DBG("Mapped remote pool %s to local %s\n", name, rname);
+ return shm;
}
-static void *_ipc_shm_map(char *name, size_t size)
+static void *_ipc_shm_map(char *name, int pid)
{
odp_shm_t shm;
- int ret;
- ret = _ipc_shm_lookup(name);
- if (ret == -1)
+ shm = odp_shm_import(name, pid, name);
+ if (ODP_SHM_INVALID == shm) {
+ ODP_ERR("unable to map: %s\n", name);
return NULL;
-
- shm = odp_shm_reserve(name, size,
- ODP_CACHE_LINE_SIZE,
- _ODP_SHM_PROC_NOCREAT);
- if (ODP_SHM_INVALID == shm)
- ODP_ABORT("unable to map: %s\n", name);
+ }
return odp_shm_addr(shm);
}
@@ -313,15 +234,21 @@ static int _ipc_init_slave(const char *dev,
static int _ipc_slave_start(pktio_entry_t *pktio_entry)
{
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")];
- size_t ring_size = PKTIO_IPC_ENTRIES * sizeof(void *) +
- sizeof(_ring_t);
struct pktio_info *pinfo;
- void *ipc_pool_base;
odp_shm_t shm;
- const char *dev = pktio_entry->s.name;
+ char tail[ODP_POOL_NAME_LEN];
+ char dev[ODP_POOL_NAME_LEN];
+ int pid;
+
+ if (sscanf(pktio_entry->s.name, "ipc:%d:%s", &pid, tail) != 2) {
+ ODP_ERR("wrong pktio name\n");
+ return -1;
+ }
+
+ sprintf(dev, "ipc:%s", tail);
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
- pktio_entry->s.ipc.rx.recv = _ipc_shm_map(ipc_shm_name, ring_size);
+ pktio_entry->s.ipc.rx.recv = _ipc_shm_map(ipc_shm_name, pid);
if (!pktio_entry->s.ipc.rx.recv) {
ODP_DBG("pid %d unable to find ipc ring %s name\n",
getpid(), dev);
@@ -333,9 +260,9 @@ static int _ipc_slave_start(pktio_entry_t *pktio_entry)
_ring_free_count(pktio_entry->s.ipc.rx.recv));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
- pktio_entry->s.ipc.rx.free = _ipc_shm_map(ipc_shm_name, ring_size);
+ pktio_entry->s.ipc.rx.free = _ipc_shm_map(ipc_shm_name, pid);
if (!pktio_entry->s.ipc.rx.free) {
- ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ ODP_ERR("pid %d unable to find ipc ring %s name\n",
getpid(), dev);
goto free_m_prod;
}
@@ -344,9 +271,9 @@ static int _ipc_slave_start(pktio_entry_t *pktio_entry)
_ring_free_count(pktio_entry->s.ipc.rx.free));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
- pktio_entry->s.ipc.tx.send = _ipc_shm_map(ipc_shm_name, ring_size);
+ pktio_entry->s.ipc.tx.send = _ipc_shm_map(ipc_shm_name, pid);
if (!pktio_entry->s.ipc.tx.send) {
- ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ ODP_ERR("pid %d unable to find ipc ring %s name\n",
getpid(), dev);
goto free_m_cons;
}
@@ -355,9 +282,9 @@ static int _ipc_slave_start(pktio_entry_t *pktio_entry)
_ring_free_count(pktio_entry->s.ipc.tx.send));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
- pktio_entry->s.ipc.tx.free = _ipc_shm_map(ipc_shm_name, ring_size);
+ pktio_entry->s.ipc.tx.free = _ipc_shm_map(ipc_shm_name, pid);
if (!pktio_entry->s.ipc.tx.free) {
- ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ ODP_ERR("pid %d unable to find ipc ring %s name\n",
getpid(), dev);
goto free_s_prod;
}
@@ -367,15 +294,17 @@ static int _ipc_slave_start(pktio_entry_t *pktio_entry)
/* Get info about remote pool */
pinfo = pktio_entry->s.ipc.pinfo;
- ipc_pool_base = _ipc_map_remote_pool(pinfo->master.pool_name,
- pinfo->master.shm_pkt_pool_size);
- pktio_entry->s.ipc.pool_mdata_base = (char *)ipc_pool_base +
- pinfo->master.mdata_offset;
- pktio_entry->s.ipc.pkt_size = pinfo->master.shm_pkt_size;
+ shm = _ipc_map_remote_pool(pinfo->master.pool_name,
+ pid);
+ pktio_entry->s.ipc.remote_pool_shm = shm;
+ pktio_entry->s.ipc.pool_mdata_base = (char *)odp_shm_addr(shm) +
+ pinfo->master.base_addr_offset;
+ pktio_entry->s.ipc.pkt_size = pinfo->master.block_size;
_ipc_export_pool(pinfo, pktio_entry->s.ipc.pool);
odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 1);
+ pinfo->slave.init_done = 1;
ODP_DBG("%s started.\n", pktio_entry->s.name);
return 0;
@@ -401,7 +330,11 @@ static int ipc_pktio_open(odp_pktio_t id ODP_UNUSED,
odp_pool_t pool)
{
int ret = -1;
- int slave;
+ int pid ODP_UNUSED;
+ struct pktio_info *pinfo;
+ char name[ODP_POOL_NAME_LEN + sizeof("_info")];
+ char tail[ODP_POOL_NAME_LEN];
+ odp_shm_t shm;
ODP_STATIC_ASSERT(ODP_POOL_NAME_LEN == _RING_NAMESIZE,
"mismatch pool and ring name arrays");
@@ -411,65 +344,59 @@ static int ipc_pktio_open(odp_pktio_t id ODP_UNUSED,
odp_atomic_init_u32(&pktio_entry->s.ipc.ready, 0);
- _ipc_map_pktio_info(pktio_entry, dev, &slave);
- pktio_entry->s.ipc.type = (slave == 0) ? PKTIO_TYPE_IPC_MASTER :
- PKTIO_TYPE_IPC_SLAVE;
+ /* Shared info about remote pktio */
+ if (sscanf(dev, "ipc:%d:%s", &pid, tail) == 2) {
+ pktio_entry->s.ipc.type = PKTIO_TYPE_IPC_SLAVE;
+
+ snprintf(name, sizeof(name), "ipc:%s_info", tail);
+ IPC_ODP_DBG("lookup for name %s for pid %d\n", name, pid);
+ shm = odp_shm_import(name, pid, name);
+ if (ODP_SHM_INVALID == shm)
+ return -1;
+ pinfo = odp_shm_addr(shm);
+
+ if (!pinfo->master.init_done) {
+ odp_shm_free(shm);
+ return -1;
+ }
+ pktio_entry->s.ipc.pinfo = pinfo;
+ pktio_entry->s.ipc.pinfo_shm = shm;
+ ODP_DBG("process %d is slave\n", getpid());
+ ret = _ipc_init_slave(name, pktio_entry, pool);
+ } else {
+ pktio_entry->s.ipc.type = PKTIO_TYPE_IPC_MASTER;
+ snprintf(name, sizeof(name), "%s_info", dev);
+ shm = odp_shm_reserve(name, sizeof(struct pktio_info),
+ ODP_CACHE_LINE_SIZE,
+ _ODP_ISHM_EXPORT | _ODP_ISHM_LOCK);
+ if (ODP_SHM_INVALID == shm) {
+ ODP_ERR("can not create shm %s\n", name);
+ return -1;
+ }
- if (pktio_entry->s.ipc.type == PKTIO_TYPE_IPC_MASTER) {
+ pinfo = odp_shm_addr(shm);
+ pinfo->master.init_done = 0;
+ pinfo->master.pool_name[0] = 0;
+ pktio_entry->s.ipc.pinfo = pinfo;
+ pktio_entry->s.ipc.pinfo_shm = shm;
ODP_DBG("process %d is master\n", getpid());
ret = _ipc_init_master(pktio_entry, dev, pool);
- } else {
- ODP_DBG("process %d is slave\n", getpid());
- ret = _ipc_init_slave(dev, pktio_entry, pool);
}
return ret;
}
-static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf,
- uint32_t offset,
- uint32_t *seglen,
- uint32_t limit)
+static void _ipc_free_ring_packets(pktio_entry_t *pktio_entry, _ring_t *r)
{
- int seg_index = offset / buf->segsize;
- int seg_offset = offset % buf->segsize;
-#ifdef _ODP_PKTIO_IPC
- void *addr = (char *)buf - buf->ipc_addr_offset[seg_index];
-#else
- /** buf_hdr.ipc_addr_offset defined only when ipc is
- * enabled. */
- void *addr = NULL;
-
- (void)seg_index;
-#endif
- if (seglen) {
- uint32_t buf_left = limit - offset;
- *seglen = seg_offset + buf_left <= buf->segsize ?
- buf_left : buf->segsize - seg_offset;
- }
-
- return (void *)(seg_offset + (uint8_t *)addr);
-}
-
-static inline void *_ipc_packet_map(odp_packet_hdr_t *pkt_hdr,
- uint32_t offset, uint32_t *seglen)
-{
- if (offset > pkt_hdr->frame_len)
- return NULL;
-
- return _ipc_buffer_map(&pkt_hdr->buf_hdr,
- pkt_hdr->headroom + offset, seglen,
- pkt_hdr->headroom + pkt_hdr->frame_len);
-}
-
-static void _ipc_free_ring_packets(_ring_t *r)
-{
- odp_packet_t r_p_pkts[PKTIO_IPC_ENTRIES];
+ uintptr_t offsets[PKTIO_IPC_ENTRIES];
int ret;
void **rbuf_p;
int i;
- rbuf_p = (void *)&r_p_pkts;
+ if (!r)
+ return;
+
+ rbuf_p = (void *)&offsets;
while (1) {
ret = _ring_mc_dequeue_burst(r, rbuf_p,
@@ -477,8 +404,13 @@ static void _ipc_free_ring_packets(_ring_t *r)
if (0 == ret)
break;
for (i = 0; i < ret; i++) {
- if (r_p_pkts[i] != ODP_PACKET_INVALID)
- odp_packet_free(r_p_pkts[i]);
+ odp_packet_hdr_t *phdr;
+ odp_packet_t pkt;
+ void *mbase = pktio_entry->s.ipc.pool_mdata_base;
+
+ phdr = (void *)((uint8_t *)mbase + offsets[i]);
+ pkt = (odp_packet_t)phdr->buf_hdr.handle.handle;
+ odp_packet_free(pkt);
}
}
}
@@ -490,22 +422,23 @@ static int ipc_pktio_recv_lockless(pktio_entry_t *pktio_entry,
int i;
_ring_t *r;
_ring_t *r_p;
+ uintptr_t offsets[PKTIO_IPC_ENTRIES];
+ void **ipcbufs_p = (void *)&offsets;
+ uint32_t ready;
+ int pkts_ring;
- odp_packet_t remote_pkts[PKTIO_IPC_ENTRIES];
- void **ipcbufs_p = (void *)&remote_pkts;
- uint32_t ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
-
+ ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
if (odp_unlikely(!ready)) {
- ODP_DBG("start pktio is missing before usage?\n");
- return -1;
+ IPC_ODP_DBG("start pktio is missing before usage?\n");
+ return 0;
}
- _ipc_free_ring_packets(pktio_entry->s.ipc.tx.free);
+ _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
r = pktio_entry->s.ipc.rx.recv;
pkts = _ring_mc_dequeue_burst(r, ipcbufs_p, len);
if (odp_unlikely(pkts < 0))
- ODP_ABORT("error to dequeue no packets\n");
+ ODP_ABORT("internal error dequeue\n");
/* fast path */
if (odp_likely(0 == pkts))
@@ -514,36 +447,21 @@ static int ipc_pktio_recv_lockless(pktio_entry_t *pktio_entry,
for (i = 0; i < pkts; i++) {
odp_pool_t pool;
odp_packet_t pkt;
- odp_packet_hdr_t phdr;
- void *ptr;
- odp_buffer_bits_t handle;
- int idx; /* Remote packet has coded pool and index.
- * We need only index.*/
+ odp_packet_hdr_t *phdr;
void *pkt_data;
- void *remote_pkt_data;
+ uint64_t data_pool_off;
+ void *rmt_data_ptr;
- if (remote_pkts[i] == ODP_PACKET_INVALID)
- continue;
+ phdr = (void *)((uint8_t *)pktio_entry->s.ipc.pool_mdata_base +
+ offsets[i]);
- handle.handle = _odp_packet_to_buffer(remote_pkts[i]);
- idx = handle.index;
-
- /* Link to packed data. To this line we have Zero-Copy between
- * processes, to simplify use packet copy in that version which
- * can be removed later with more advance buffer management
- * (ref counters).
- */
- /* reverse odp_buf_to_hdr() */
- ptr = (char *)pktio_entry->s.ipc.pool_mdata_base +
- (idx * ODP_CACHE_LINE_SIZE);
- memcpy(&phdr, ptr, sizeof(odp_packet_hdr_t));
-
- /* Allocate new packet. Select*/
pool = pktio_entry->s.ipc.pool;
if (odp_unlikely(pool == ODP_POOL_INVALID))
ODP_ABORT("invalid pool");
- pkt = odp_packet_alloc(pool, phdr.frame_len);
+ data_pool_off = phdr->buf_hdr.ipc_data_offset;
+
+ pkt = odp_packet_alloc(pool, phdr->frame_len);
if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
/* Original pool might be smaller then
* PKTIO_IPC_ENTRIES. If packet can not be
@@ -562,30 +480,40 @@ static int ipc_pktio_recv_lockless(pktio_entry_t *pktio_entry,
(PKTIO_TYPE_IPC_SLAVE ==
pktio_entry->s.ipc.type));
- remote_pkt_data = _ipc_packet_map(ptr, 0, NULL);
- if (odp_unlikely(!remote_pkt_data))
- ODP_ABORT("unable to map remote_pkt_data, ipc_slave %d\n",
- (PKTIO_TYPE_IPC_SLAVE ==
- pktio_entry->s.ipc.type));
-
/* Copy packet data from shared pool to local pool. */
- memcpy(pkt_data, remote_pkt_data, phdr.frame_len);
+ rmt_data_ptr = (uint8_t *)pktio_entry->s.ipc.pool_mdata_base +
+ data_pool_off;
+ memcpy(pkt_data, rmt_data_ptr, phdr->frame_len);
/* Copy packets L2, L3 parsed offsets and size */
- copy_packet_cls_metadata(&phdr, odp_packet_hdr(pkt));
+ copy_packet_cls_metadata(phdr, odp_packet_hdr(pkt));
+
+ odp_packet_hdr(pkt)->frame_len = phdr->frame_len;
+ odp_packet_hdr(pkt)->headroom = phdr->headroom;
+ odp_packet_hdr(pkt)->tailroom = phdr->tailroom;
+
+ /* Take classification fields */
+ odp_packet_hdr(pkt)->p = phdr->p;
- odp_packet_hdr(pkt)->frame_len = phdr.frame_len;
- odp_packet_hdr(pkt)->headroom = phdr.headroom;
- odp_packet_hdr(pkt)->tailroom = phdr.tailroom;
- odp_packet_hdr(pkt)->input = pktio_entry->s.handle;
pkt_table[i] = pkt;
}
/* Now tell other process that we no longer need that buffers.*/
r_p = pktio_entry->s.ipc.rx.free;
- pkts = _ring_mp_enqueue_burst(r_p, ipcbufs_p, i);
+
+repeat:
+ pkts_ring = _ring_mp_enqueue_burst(r_p, ipcbufs_p, pkts);
if (odp_unlikely(pkts < 0))
ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n");
+ if (odp_unlikely(pkts != pkts_ring)) {
+ IPC_ODP_DBG("odp_ring_full: %d, odp_ring_count %d,"
+ " _ring_free_count %d\n",
+ _ring_full(r_p), _ring_count(r_p),
+ _ring_free_count(r_p));
+ ipcbufs_p = (void *)&offsets[pkts_ring - 1];
+ pkts = pkts - pkts_ring;
+ goto repeat;
+ }
return pkts;
}
@@ -614,26 +542,23 @@ static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
uint32_t ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
odp_packet_t pkt_table_mapped[len]; /**< Ready to send packet has to be
* in memory mapped pool. */
+ uintptr_t offsets[len];
if (odp_unlikely(!ready))
return 0;
- _ipc_free_ring_packets(pktio_entry->s.ipc.tx.free);
+ _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
- /* Prepare packets: calculate offset from address. */
+ /* Copy packets to shm shared pool if they are in different */
for (i = 0; i < len; i++) {
- int j;
odp_packet_t pkt = pkt_table[i];
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ pool_t *ipc_pool = pool_entry_from_hdl(pktio_entry->s.ipc.pool);
odp_buffer_bits_t handle;
- uint32_t cur_mapped_pool_id =
- pool_handle_to_index(pktio_entry->s.ipc.pool);
- uint32_t pool_id;
+ uint32_t pkt_pool_id;
- /* do copy if packet was allocated from not mapped pool */
handle.handle = _odp_packet_to_buffer(pkt);
- pool_id = handle.pool_id;
- if (pool_id != cur_mapped_pool_id) {
+ pkt_pool_id = handle.pool_id;
+ if (pkt_pool_id != ipc_pool->pool_idx) {
odp_packet_t newpkt;
newpkt = odp_packet_copy(pkt, pktio_entry->s.ipc.pool);
@@ -645,24 +570,32 @@ static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
} else {
pkt_table_mapped[i] = pkt;
}
+ }
- /* buf_hdr.addr can not be used directly in remote process,
- * convert it to offset
- */
- for (j = 0; j < ODP_BUFFER_MAX_SEG; j++) {
-#ifdef _ODP_PKTIO_IPC
- pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr -
- (char *)pkt_hdr->buf_hdr.addr[j];
-#else
- /** buf_hdr.ipc_addr_offset defined only when ipc is
- * enabled. */
- (void)pkt_hdr;
-#endif
- }
+ /* Set offset to phdr for outgoing packets */
+ for (i = 0; i < len; i++) {
+ uint64_t data_pool_off;
+ odp_packet_t pkt = pkt_table_mapped[i];
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_pool_t pool_hdl = odp_packet_pool(pkt);
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+
+ offsets[i] = (uint8_t *)pkt_hdr -
+ (uint8_t *)odp_shm_addr(pool->shm);
+ data_pool_off = (uint8_t *)pkt_hdr->buf_hdr.seg[0].data -
+ (uint8_t *)odp_shm_addr(pool->shm);
+
+ /* compile all function code even if ipc disabled with config */
+ pkt_hdr->buf_hdr.ipc_data_offset = data_pool_off;
+ IPC_ODP_DBG("%d/%d send packet %llx, pool %llx,"
+ "phdr = %p, offset %x\n",
+ i, len,
+ odp_packet_to_u64(pkt), odp_pool_to_u64(pool_hdl),
+ pkt_hdr, pkt_hdr->buf_hdr.ipc_data_offset);
}
/* Put packets to ring to be processed by other process. */
- rbuf_p = (void *)&pkt_table_mapped[0];
+ rbuf_p = (void *)&offsets[0];
r = pktio_entry->s.ipc.tx.send;
ret = _ring_mp_enqueue_burst(r, rbuf_p, len);
if (odp_unlikely(ret < 0)) {
@@ -673,6 +606,7 @@ static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
ODP_ERR("odp_ring_full: %d, odp_ring_count %d, _ring_free_count %d\n",
_ring_full(r), _ring_count(r),
_ring_free_count(r));
+ ODP_ABORT("Unexpected!\n");
}
return ret;
@@ -722,22 +656,25 @@ static int ipc_start(pktio_entry_t *pktio_entry)
static int ipc_stop(pktio_entry_t *pktio_entry)
{
- unsigned tx_send, tx_free;
+ unsigned tx_send = 0, tx_free = 0;
odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 0);
- _ipc_free_ring_packets(pktio_entry->s.ipc.tx.send);
+ if (pktio_entry->s.ipc.tx.send)
+ _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.send);
/* other process can transfer packets from one ring to
* other, use delay here to free that packets. */
sleep(1);
- _ipc_free_ring_packets(pktio_entry->s.ipc.tx.free);
+ if (pktio_entry->s.ipc.tx.free)
+ _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
- tx_send = _ring_count(pktio_entry->s.ipc.tx.send);
- tx_free = _ring_count(pktio_entry->s.ipc.tx.free);
+ if (pktio_entry->s.ipc.tx.send)
+ tx_send = _ring_count(pktio_entry->s.ipc.tx.send);
+ if (pktio_entry->s.ipc.tx.free)
+ tx_free = _ring_count(pktio_entry->s.ipc.tx.free);
if (tx_send | tx_free) {
ODP_DBG("IPC rings: tx send %d tx free %d\n",
- _ring_free_count(pktio_entry->s.ipc.tx.send),
- _ring_free_count(pktio_entry->s.ipc.tx.free));
+ tx_send, tx_free);
}
return 0;
@@ -747,23 +684,31 @@ static int ipc_close(pktio_entry_t *pktio_entry)
{
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_m_prod")];
char *dev = pktio_entry->s.name;
+ char name[ODP_POOL_NAME_LEN];
+ char tail[ODP_POOL_NAME_LEN];
+ int pid = 0;
ipc_stop(pktio_entry);
- if (pktio_entry->s.ipc.type == PKTIO_TYPE_IPC_MASTER) {
- /* unlink this pktio info for both master and slave */
- odp_shm_free(pktio_entry->s.ipc.pinfo_shm);
-
- /* destroy rings */
- snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
- _ring_destroy(ipc_shm_name);
- snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
- _ring_destroy(ipc_shm_name);
- snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
- _ring_destroy(ipc_shm_name);
- snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
- _ring_destroy(ipc_shm_name);
- }
+ odp_shm_free(pktio_entry->s.ipc.remote_pool_shm);
+
+ if (sscanf(dev, "ipc:%d:%s", &pid, tail) == 2)
+ snprintf(name, sizeof(name), "ipc:%s", tail);
+ else
+ snprintf(name, sizeof(name), "%s", dev);
+
+ /* unlink this pktio info for both master and slave */
+ odp_shm_free(pktio_entry->s.ipc.pinfo_shm);
+
+ /* destroy rings */
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", name);
+ _ring_destroy(ipc_shm_name);
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", name);
+ _ring_destroy(ipc_shm_name);
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", name);
+ _ring_destroy(ipc_shm_name);
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", name);
+ _ring_destroy(ipc_shm_name);
return 0;
}
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 21d75422a..70962839f 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -162,14 +162,14 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
len = QUEUE_MULTI_MAX;
for (i = 0; i < len; ++i) {
- hdr_tbl[i] = odp_buf_to_hdr(_odp_packet_to_buffer(pkt_tbl[i]));
+ hdr_tbl[i] = buf_hdl_to_hdr(_odp_packet_to_buffer(pkt_tbl[i]));
bytes += odp_packet_len(pkt_tbl[i]);
}
odp_ticketlock_lock(&pktio_entry->s.txl);
qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq);
- ret = queue_enq_multi(qentry, hdr_tbl, len, 0);
+ ret = queue_enq_multi(qentry, hdr_tbl, len);
if (ret > 0) {
pktio_entry->s.stats.out_ucast_pkts += ret;
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 412beec01..8eb81459e 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -345,9 +345,7 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
pkt_nm->pool = pool;
/* max frame len taking into account the l2-offset */
- pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX -
- odp_buffer_pool_headroom(pool) -
- odp_buffer_pool_tailroom(pool);
+ pkt_nm->max_frame_len = CONFIG_PACKET_MAX_SEG_LEN;
/* allow interface to be opened with or without the 'netmap:' prefix */
prefix = "netmap:";
@@ -830,10 +828,12 @@ static int netmap_send(pktio_entry_t *pktio_entry, int index,
if (!pkt_nm->lockless_tx)
odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
- odp_packet_free_multi(pkt_table, nb_tx);
-
- if (odp_unlikely(nb_tx == 0 && __odp_errno != 0))
- return -1;
+ if (odp_unlikely(nb_tx == 0)) {
+ if (__odp_errno != 0)
+ return -1;
+ } else {
+ odp_packet_free_multi(pkt_table, nb_tx);
+ }
return nb_tx;
}
diff --git a/platform/linux-generic/pktio/ring.c b/platform/linux-generic/pktio/ring.c
index cc84e8af3..aeda04b26 100644
--- a/platform/linux-generic/pktio/ring.c
+++ b/platform/linux-generic/pktio/ring.c
@@ -160,7 +160,7 @@ _ring_create(const char *name, unsigned count, unsigned flags)
odp_shm_t shm;
if (flags & _RING_SHM_PROC)
- shm_flag = ODP_SHM_PROC;
+ shm_flag = ODP_SHM_PROC | ODP_SHM_EXPORT;
else
shm_flag = 0;
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index e01b0a529..7d2396866 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -46,6 +46,9 @@
#include <protocols/eth.h>
#include <protocols/ip.h>
+#define MAX_SEGS CONFIG_PACKET_MAX_SEGS
+#define PACKET_JUMBO_LEN (9 * 1024)
+
static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
static int sock_stats_reset(pktio_entry_t *pktio_entry);
@@ -583,20 +586,18 @@ static int sock_mmsg_open(odp_pktio_t id ODP_UNUSED,
}
static uint32_t _rx_pkt_to_iovec(odp_packet_t pkt,
- struct iovec iovecs[ODP_BUFFER_MAX_SEG])
+ struct iovec iovecs[MAX_SEGS])
{
odp_packet_seg_t seg = odp_packet_first_seg(pkt);
uint32_t seg_count = odp_packet_num_segs(pkt);
uint32_t seg_id = 0;
uint32_t iov_count = 0;
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
uint8_t *ptr;
uint32_t seglen;
for (seg_id = 0; seg_id < seg_count; ++seg_id) {
- ptr = segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg,
- &seglen, pkt_hdr->frame_len,
- pkt_hdr->headroom);
+ ptr = odp_packet_seg_data(pkt, seg);
+ seglen = odp_packet_seg_data_len(pkt, seg);
if (ptr) {
iovecs[iov_count].iov_base = ptr;
@@ -673,6 +674,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (cls_classify_packet(pktio_entry, base, pkt_len,
pkt_len, &pool, &parsed_hdr))
continue;
+
num = packet_alloc_multi(pool, pkt_len, &pkt, 1);
if (num != 1)
continue;
@@ -692,13 +694,14 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
}
} else {
struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX]
- [ODP_BUFFER_MAX_SEG];
+ [MAX_SEGS];
for (i = 0; i < (int)len; i++) {
int num;
num = packet_alloc_multi(pkt_sock->pool, pkt_sock->mtu,
&pkt_table[i], 1);
+
if (odp_unlikely(num != 1)) {
pkt_table[i] = ODP_PACKET_INVALID;
break;
@@ -723,23 +726,34 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
void *base = msgvec[i].msg_hdr.msg_iov->iov_base;
struct ethhdr *eth_hdr = base;
odp_packet_hdr_t *pkt_hdr;
+ odp_packet_t pkt;
+ int ret;
+
+ pkt = pkt_table[i];
/* Don't receive packets sent by ourselves */
if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
eth_hdr->h_source))) {
- odp_packet_free(pkt_table[i]);
+ odp_packet_free(pkt);
continue;
}
- pkt_hdr = odp_packet_hdr(pkt_table[i]);
+
/* Parse and set packet header data */
- odp_packet_pull_tail(pkt_table[i],
- odp_packet_len(pkt_table[i]) -
- msgvec[i].msg_len);
+ ret = odp_packet_trunc_tail(&pkt, odp_packet_len(pkt) -
+ msgvec[i].msg_len,
+ NULL, NULL);
+ if (ret < 0) {
+ ODP_ERR("trunk_tail failed");
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ pkt_hdr = odp_packet_hdr(pkt);
packet_parse_l2(&pkt_hdr->p, pkt_hdr->frame_len);
packet_set_ts(pkt_hdr, ts);
pkt_hdr->input = pktio_entry->s.handle;
- pkt_table[nb_rx] = pkt_table[i];
+ pkt_table[nb_rx] = pkt;
nb_rx++;
}
@@ -754,7 +768,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
}
static uint32_t _tx_pkt_to_iovec(odp_packet_t pkt,
- struct iovec iovecs[ODP_BUFFER_MAX_SEG])
+ struct iovec iovecs[MAX_SEGS])
{
uint32_t pkt_len = odp_packet_len(pkt);
uint32_t offset = odp_packet_l2_offset(pkt);
@@ -780,7 +794,7 @@ static int sock_mmsg_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
{
pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_TX];
- struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][ODP_BUFFER_MAX_SEG];
+ struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][MAX_SEGS];
int ret;
int sockfd;
int n, i;
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 96556685c..666aae6af 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -346,17 +346,15 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
{
int pz = getpagesize();
- uint32_t pool_id;
- pool_entry_t *pool_entry;
+ pool_t *pool;
if (pool_hdl == ODP_POOL_INVALID)
ODP_ABORT("Invalid pool handle\n");
- pool_id = pool_handle_to_index(pool_hdl);
- pool_entry = get_pool_entry(pool_id);
+ pool = pool_entry_from_hdl(pool_hdl);
/* Frame has to capture full packet which can fit to the pool block.*/
- ring->req.tp_frame_size = (pool_entry->s.blk_size +
+ ring->req.tp_frame_size = (pool->data_size +
TPACKET_HDRLEN + TPACKET_ALIGNMENT +
+ (pz - 1)) & (-pz);
@@ -364,7 +362,7 @@ static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
* and align size to page boundary.
*/
ring->req.tp_block_size = (ring->req.tp_frame_size *
- pool_entry->s.buf_num + (pz - 1)) & (-pz);
+ pool->num + (pz - 1)) & (-pz);
if (!fanout) {
/* Single socket is in use. Use 1 block with buf_num frames. */
diff --git a/scripts/build-pktio-dpdk b/scripts/build-pktio-dpdk
index 280f5185d..36727dd7b 100755
--- a/scripts/build-pktio-dpdk
+++ b/scripts/build-pktio-dpdk
@@ -10,9 +10,9 @@ if [ "$?" != "0" ]; then
echo "Error: pcap is not installed. You may need to install libpcap-dev"
fi
-git clone http://dpdk.org/git/dpdk dpdk
+git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v16.07 http://dpdk.org/git/dpdk dpdk
pushd dpdk
-git checkout -b bv16.07 v16.07
+git log --oneline --decorate
#Make and edit DPDK configuration
make config T=${TARGET} O=${TARGET}
diff --git a/test/common_plat/performance/odp_crypto.c b/test/common_plat/performance/odp_crypto.c
index 49a9f4b6f..993628830 100644
--- a/test/common_plat/performance/odp_crypto.c
+++ b/test/common_plat/performance/odp_crypto.c
@@ -23,15 +23,10 @@
fprintf(stderr, "%s:%d:%s(): Error: " fmt, __FILE__, \
__LINE__, __func__, ##__VA_ARGS__)
-/** @def SHM_PKT_POOL_SIZE
- * @brief Size of the shared memory block
+/** @def POOL_NUM_PKT
+ * Number of packets in the pool
*/
-#define SHM_PKT_POOL_SIZE (512 * 2048 * 2)
-
-/** @def SHM_PKT_POOL_BUF_SIZE
- * @brief Buffer size of the packet pool buffer
- */
-#define SHM_PKT_POOL_BUF_SIZE (1024 * 32)
+#define POOL_NUM_PKT 64
static uint8_t test_iv[8] = "01234567";
@@ -54,7 +49,7 @@ static uint8_t test_key24[24] = { 0x01, 0x02, 0x03, 0x04, 0x05,
*/
typedef struct {
const char *name; /**< Algorithm name */
- odp_crypto_session_params_t session; /**< Prefilled crypto session params */
+ odp_crypto_session_param_t session; /**< Prefilled crypto session params */
unsigned int hash_adjust; /**< Size of hash */
} crypto_alg_config_t;
@@ -165,9 +160,7 @@ static void parse_args(int argc, char *argv[], crypto_args_t *cargs);
static void usage(char *progname);
/**
- * Set of predefined payloads. Make sure that maximum payload
- * size is not bigger than SHM_PKT_POOL_BUF_SIZE. May relax when
- * implementation start support segmented buffers/packets.
+ * Set of predefined payloads.
*/
static unsigned int payloads[] = {
16,
@@ -178,6 +171,9 @@ static unsigned int payloads[] = {
16384
};
+/** Number of payloads used in the test */
+static unsigned num_payloads;
+
/**
* Set of known algorithms to test
*/
@@ -424,12 +420,13 @@ create_session_from_config(odp_crypto_session_t *session,
crypto_alg_config_t *config,
crypto_args_t *cargs)
{
- odp_crypto_session_params_t params;
+ odp_crypto_session_param_t params;
odp_crypto_ses_create_err_t ses_create_rc;
odp_pool_t pkt_pool;
odp_queue_t out_queue;
- memcpy(&params, &config->session, sizeof(odp_crypto_session_params_t));
+ odp_crypto_session_param_init(&params);
+ memcpy(&params, &config->session, sizeof(odp_crypto_session_param_t));
params.op = ODP_CRYPTO_OP_ENCODE;
params.pref_mode = ODP_CRYPTO_SYNC;
@@ -472,7 +469,7 @@ run_measure_one(crypto_args_t *cargs,
unsigned int payload_length,
crypto_run_result_t *result)
{
- odp_crypto_op_params_t params;
+ odp_crypto_op_param_t params;
odp_pool_t pkt_pool;
odp_queue_t out_queue;
@@ -680,12 +677,10 @@ run_measure_one_config(crypto_args_t *cargs,
config, &result);
}
} else {
- unsigned int i;
+ unsigned i;
print_result_header();
- for (i = 0;
- i < (sizeof(payloads) / sizeof(unsigned int));
- i++) {
+ for (i = 0; i < num_payloads; i++) {
rc = run_measure_one(cargs, config, &session,
payloads[i], &result);
if (rc)
@@ -728,6 +723,9 @@ int main(int argc, char *argv[])
int num_workers = 1;
odph_odpthread_t thr[num_workers];
odp_instance_t instance;
+ odp_pool_capability_t capa;
+ uint32_t max_seg_len;
+ unsigned i;
memset(&cargs, 0, sizeof(cargs));
@@ -743,11 +741,25 @@ int main(int argc, char *argv[])
/* Init this thread */
odp_init_local(instance, ODP_THREAD_WORKER);
+ if (odp_pool_capability(&capa)) {
+ app_err("Pool capability request failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ max_seg_len = capa.pkt.max_seg_len;
+
+ for (i = 0; i < sizeof(payloads) / sizeof(unsigned int); i++) {
+ if (payloads[i] > max_seg_len)
+ break;
+ }
+
+ num_payloads = i;
+
/* Create packet pool */
odp_pool_param_init(&params);
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.seg_len = max_seg_len;
+ params.pkt.len = max_seg_len;
+ params.pkt.num = POOL_NUM_PKT;
params.type = ODP_POOL_PACKET;
pool = odp_pool_create("packet_pool", &params);
diff --git a/test/common_plat/performance/odp_l2fwd.c b/test/common_plat/performance/odp_l2fwd.c
index 651ed1019..82c3a251f 100644
--- a/test/common_plat/performance/odp_l2fwd.c
+++ b/test/common_plat/performance/odp_l2fwd.c
@@ -20,6 +20,7 @@
#include <unistd.h>
#include <errno.h>
#include <inttypes.h>
+#include <assert.h>
#include <test_debug.h>
@@ -353,6 +354,7 @@ static int run_worker_sched_mode(void *arg)
/* packets from the same queue are from the same interface */
src_idx = odp_packet_input_index(pkt_tbl[0]);
+ assert(src_idx >= 0);
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
@@ -1507,6 +1509,11 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
+ if (odp_shm_free(shm)) {
+ LOG_ERR("Error: shm free\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_term_local()) {
LOG_ERR("Error: term local\n");
exit(EXIT_FAILURE);
diff --git a/test/common_plat/performance/odp_pktio_perf.c b/test/common_plat/performance/odp_pktio_perf.c
index 6db02e0fb..92d979d37 100644
--- a/test/common_plat/performance/odp_pktio_perf.c
+++ b/test/common_plat/performance/odp_pktio_perf.c
@@ -36,7 +36,7 @@
#define TEST_SKIP 77
-#define PKT_BUF_NUM 8192
+#define PKT_BUF_NUM (32 * 1024)
#define MAX_NUM_IFACES 2
#define TEST_HDR_MAGIC 0x92749451
#define MAX_WORKERS 32
@@ -891,6 +891,14 @@ static int test_term(void)
LOG_ERR("Failed to free test_globals\n");
ret = -1;
}
+ if (odp_shm_free(odp_shm_lookup("test_globals.rx_stats")) != 0) {
+ LOG_ERR("Failed to free test_globals.rx_stats\n");
+ ret = -1;
+ }
+ if (odp_shm_free(odp_shm_lookup("test_globals.tx_stats")) != 0) {
+ LOG_ERR("Failed to free test_globals.tx_stats\n");
+ ret = -1;
+ }
return ret;
}
diff --git a/test/common_plat/performance/odp_scheduling.c b/test/common_plat/performance/odp_scheduling.c
index 9407636c8..e2a49d34d 100644
--- a/test/common_plat/performance/odp_scheduling.c
+++ b/test/common_plat/performance/odp_scheduling.c
@@ -28,7 +28,7 @@
/* GNU lib C */
#include <getopt.h>
-#define MSG_POOL_SIZE (4 * 1024 * 1024) /**< Message pool size */
+#define NUM_MSG (512 * 1024) /**< Number of msg in pool */
#define MAX_ALLOCS 32 /**< Alloc burst size */
#define QUEUES_PER_PRIO 64 /**< Queue per priority */
#define NUM_PRIOS 2 /**< Number of tested priorities */
@@ -868,7 +868,7 @@ int main(int argc, char *argv[])
odp_pool_param_init(&params);
params.buf.size = sizeof(test_message_t);
params.buf.align = 0;
- params.buf.num = MSG_POOL_SIZE / sizeof(test_message_t);
+ params.buf.num = NUM_MSG;
params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", &params);
@@ -880,8 +880,6 @@ int main(int argc, char *argv[])
globals->pool = pool;
- /* odp_pool_print(pool); */
-
/*
* Create a queue for plain queue test
*/
@@ -940,6 +938,8 @@ int main(int argc, char *argv[])
odp_shm_print_all();
+ odp_pool_print(pool);
+
/* Barrier to sync test case execution */
odp_barrier_init(&globals->barrier, num_workers);
diff --git a/test/common_plat/validation/api/atomic/atomic.c b/test/common_plat/validation/api/atomic/atomic.c
index c4e934525..db9484bc2 100644
--- a/test/common_plat/validation/api/atomic/atomic.c
+++ b/test/common_plat/validation/api/atomic/atomic.c
@@ -583,6 +583,29 @@ int atomic_init(odp_instance_t *inst)
return ret;
}
+int atomic_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/* Atomic tests */
static int test_atomic_inc_dec_thread(void *arg UNUSED)
{
@@ -875,6 +898,7 @@ int atomic_main(int argc, char *argv[])
return -1;
odp_cunit_register_global_init(atomic_init);
+ odp_cunit_register_global_term(atomic_term);
ret = odp_cunit_register(atomic_suites);
diff --git a/test/common_plat/validation/api/atomic/atomic.h b/test/common_plat/validation/api/atomic/atomic.h
index 4ea837b7a..66796c8e3 100644
--- a/test/common_plat/validation/api/atomic/atomic.h
+++ b/test/common_plat/validation/api/atomic/atomic.h
@@ -31,6 +31,7 @@ extern odp_suiteinfo_t atomic_suites[];
/* executable init/term functions: */
int atomic_init(odp_instance_t *inst);
+int atomic_term(odp_instance_t inst);
/* main test program: */
int atomic_main(int argc, char *argv[]);
diff --git a/test/common_plat/validation/api/barrier/barrier.c b/test/common_plat/validation/api/barrier/barrier.c
index d4583884a..79ee82b3b 100644
--- a/test/common_plat/validation/api/barrier/barrier.c
+++ b/test/common_plat/validation/api/barrier/barrier.c
@@ -372,6 +372,29 @@ int barrier_init(odp_instance_t *inst)
return ret;
}
+int barrier_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
odp_suiteinfo_t barrier_suites[] = {
{"barrier", NULL, NULL,
barrier_suite_barrier},
@@ -387,6 +410,7 @@ int barrier_main(int argc, char *argv[])
return -1;
odp_cunit_register_global_init(barrier_init);
+ odp_cunit_register_global_term(barrier_term);
ret = odp_cunit_register(barrier_suites);
diff --git a/test/common_plat/validation/api/barrier/barrier.h b/test/common_plat/validation/api/barrier/barrier.h
index e4890e0f4..188bcb8fa 100644
--- a/test/common_plat/validation/api/barrier/barrier.h
+++ b/test/common_plat/validation/api/barrier/barrier.h
@@ -22,6 +22,7 @@ extern odp_suiteinfo_t barrier_suites[];
/* executable init/term functions: */
int barrier_init(odp_instance_t *inst);
+int barrier_term(odp_instance_t inst);
/* main test program: */
int barrier_main(int argc, char *argv[]);
diff --git a/test/common_plat/validation/api/buffer/buffer.c b/test/common_plat/validation/api/buffer/buffer.c
index d26d5e82e..7c723d4f4 100644
--- a/test/common_plat/validation/api/buffer/buffer.c
+++ b/test/common_plat/validation/api/buffer/buffer.c
@@ -8,20 +8,21 @@
#include "odp_cunit_common.h"
#include "buffer.h"
+#define BUF_ALIGN ODP_CACHE_LINE_SIZE
+#define BUF_SIZE 1500
+
static odp_pool_t raw_pool;
static odp_buffer_t raw_buffer = ODP_BUFFER_INVALID;
-static const size_t raw_buffer_size = 1500;
int buffer_suite_init(void)
{
- odp_pool_param_t params = {
- .buf = {
- .size = raw_buffer_size,
- .align = ODP_CACHE_LINE_SIZE,
- .num = 100,
- },
- .type = ODP_POOL_BUFFER,
- };
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_BUFFER;
+ params.buf.size = BUF_SIZE;
+ params.buf.align = BUF_ALIGN;
+ params.buf.num = 100;
raw_pool = odp_pool_create("raw_pool", &params);
if (raw_pool == ODP_POOL_INVALID)
@@ -44,25 +45,25 @@ void buffer_test_pool_alloc(void)
{
odp_pool_t pool;
const int num = 3;
- const size_t size = 1500;
odp_buffer_t buffer[num];
odp_event_t ev;
int index;
- char wrong_type = 0, wrong_size = 0;
- odp_pool_param_t params = {
- .buf = {
- .size = size,
- .align = ODP_CACHE_LINE_SIZE,
- .num = num,
- },
- .type = ODP_POOL_BUFFER,
- };
+ char wrong_type = 0, wrong_size = 0, wrong_align = 0;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_BUFFER;
+ params.buf.size = BUF_SIZE;
+ params.buf.align = BUF_ALIGN;
+ params.buf.num = num;
pool = odp_pool_create("buffer_pool_alloc", &params);
odp_pool_print(pool);
/* Try to allocate num items from the pool */
for (index = 0; index < num; index++) {
+ uintptr_t addr;
+
buffer[index] = odp_buffer_alloc(pool);
if (buffer[index] == ODP_BUFFER_INVALID)
@@ -71,9 +72,15 @@ void buffer_test_pool_alloc(void)
ev = odp_buffer_to_event(buffer[index]);
if (odp_event_type(ev) != ODP_EVENT_BUFFER)
wrong_type = 1;
- if (odp_buffer_size(buffer[index]) < size)
+ if (odp_buffer_size(buffer[index]) < BUF_SIZE)
wrong_size = 1;
- if (wrong_type || wrong_size)
+
+ addr = (uintptr_t)odp_buffer_addr(buffer[index]);
+
+ if ((addr % BUF_ALIGN) != 0)
+ wrong_align = 1;
+
+ if (wrong_type || wrong_size || wrong_align)
odp_buffer_print(buffer[index]);
}
@@ -85,6 +92,7 @@ void buffer_test_pool_alloc(void)
/* Check that the pool had correct buffers */
CU_ASSERT(wrong_type == 0);
CU_ASSERT(wrong_size == 0);
+ CU_ASSERT(wrong_align == 0);
for (; index >= 0; index--)
odp_buffer_free(buffer[index]);
@@ -112,19 +120,17 @@ void buffer_test_pool_alloc_multi(void)
{
odp_pool_t pool;
const int num = 3;
- const size_t size = 1500;
odp_buffer_t buffer[num + 1];
odp_event_t ev;
int index;
- char wrong_type = 0, wrong_size = 0;
- odp_pool_param_t params = {
- .buf = {
- .size = size,
- .align = ODP_CACHE_LINE_SIZE,
- .num = num,
- },
- .type = ODP_POOL_BUFFER,
- };
+ char wrong_type = 0, wrong_size = 0, wrong_align = 0;
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_BUFFER;
+ params.buf.size = BUF_SIZE;
+ params.buf.align = BUF_ALIGN;
+ params.buf.num = num;
pool = odp_pool_create("buffer_pool_alloc_multi", &params);
odp_pool_print(pool);
@@ -133,15 +139,23 @@ void buffer_test_pool_alloc_multi(void)
CU_ASSERT_FATAL(buffer_alloc_multi(pool, buffer, num + 1) == num);
for (index = 0; index < num; index++) {
+ uintptr_t addr;
+
if (buffer[index] == ODP_BUFFER_INVALID)
break;
ev = odp_buffer_to_event(buffer[index]);
if (odp_event_type(ev) != ODP_EVENT_BUFFER)
wrong_type = 1;
- if (odp_buffer_size(buffer[index]) < size)
+ if (odp_buffer_size(buffer[index]) < BUF_SIZE)
wrong_size = 1;
- if (wrong_type || wrong_size)
+
+ addr = (uintptr_t)odp_buffer_addr(buffer[index]);
+
+ if ((addr % BUF_ALIGN) != 0)
+ wrong_align = 1;
+
+ if (wrong_type || wrong_size || wrong_align)
odp_buffer_print(buffer[index]);
}
@@ -151,6 +165,7 @@ void buffer_test_pool_alloc_multi(void)
/* Check that the pool had correct buffers */
CU_ASSERT(wrong_type == 0);
CU_ASSERT(wrong_size == 0);
+ CU_ASSERT(wrong_align == 0);
odp_buffer_free_multi(buffer, num);
@@ -161,14 +176,13 @@ void buffer_test_pool_free(void)
{
odp_pool_t pool;
odp_buffer_t buffer;
- odp_pool_param_t params = {
- .buf = {
- .size = 64,
- .align = ODP_CACHE_LINE_SIZE,
- .num = 1,
- },
- .type = ODP_POOL_BUFFER,
- };
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_BUFFER;
+ params.buf.size = 64;
+ params.buf.align = BUF_ALIGN;
+ params.buf.num = 1;
pool = odp_pool_create("buffer_pool_free", &params);
@@ -194,14 +208,13 @@ void buffer_test_pool_free_multi(void)
odp_pool_t pool[2];
odp_buffer_t buffer[4];
odp_buffer_t buf_inval[2];
- odp_pool_param_t params = {
- .buf = {
- .size = 64,
- .align = ODP_CACHE_LINE_SIZE,
- .num = 2,
- },
- .type = ODP_POOL_BUFFER,
- };
+ odp_pool_param_t params;
+
+ odp_pool_param_init(&params);
+ params.type = ODP_POOL_BUFFER;
+ params.buf.size = 64;
+ params.buf.align = BUF_ALIGN;
+ params.buf.num = 2;
pool[0] = odp_pool_create("buffer_pool_free_multi_0", &params);
pool[1] = odp_pool_create("buffer_pool_free_multi_1", &params);
@@ -235,7 +248,7 @@ void buffer_test_management_basic(void)
CU_ASSERT(odp_buffer_is_valid(raw_buffer) == 1);
CU_ASSERT(odp_buffer_pool(raw_buffer) != ODP_POOL_INVALID);
CU_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
- CU_ASSERT(odp_buffer_size(raw_buffer) >= raw_buffer_size);
+ CU_ASSERT(odp_buffer_size(raw_buffer) >= BUF_SIZE);
CU_ASSERT(odp_buffer_addr(raw_buffer) != NULL);
odp_buffer_print(raw_buffer);
CU_ASSERT(odp_buffer_to_u64(raw_buffer) !=
diff --git a/test/common_plat/validation/api/classification/odp_classification_basic.c b/test/common_plat/validation/api/classification/odp_classification_basic.c
index 372377d85..9817287e9 100644
--- a/test/common_plat/validation/api/classification/odp_classification_basic.c
+++ b/test/common_plat/validation/api/classification/odp_classification_basic.c
@@ -16,7 +16,6 @@ void classification_test_create_cos(void)
odp_cls_cos_param_t cls_param;
odp_pool_t pool;
odp_queue_t queue;
- char cosname[ODP_COS_NAME_LEN];
pool = pool_create("cls_basic_pool");
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
@@ -24,13 +23,12 @@ void classification_test_create_cos(void)
queue = queue_create("cls_basic_queue", true);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
- sprintf(cosname, "ClassOfService");
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos = odp_cls_cos_create(cosname, &cls_param);
+ cos = odp_cls_cos_create(NULL, &cls_param);
CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
odp_cos_destroy(cos);
odp_pool_destroy(pool);
diff --git a/test/common_plat/validation/api/crypto/crypto.c b/test/common_plat/validation/api/crypto/crypto.c
index 8946cde62..208901682 100644
--- a/test/common_plat/validation/api/crypto/crypto.c
+++ b/test/common_plat/validation/api/crypto/crypto.c
@@ -9,11 +9,8 @@
#include "odp_crypto_test_inp.h"
#include "crypto.h"
-#define SHM_PKT_POOL_SIZE (512 * 2048 * 2)
-#define SHM_PKT_POOL_BUF_SIZE (1024 * 32)
-
-#define SHM_COMPL_POOL_SIZE (128 * 1024)
-#define SHM_COMPL_POOL_BUF_SIZE 128
+#define PKT_POOL_NUM 64
+#define PKT_POOL_LEN (1 * 1024)
odp_suiteinfo_t crypto_suites[] = {
{ODP_CRYPTO_SYNC_INP, crypto_suite_sync_init, NULL, crypto_suite},
@@ -43,14 +40,21 @@ int crypto_init(odp_instance_t *inst)
return -1;
}
- memset(&params, 0, sizeof(params));
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE / SHM_PKT_POOL_BUF_SIZE;
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = PKT_POOL_LEN;
+ params.pkt.len = PKT_POOL_LEN;
+ params.pkt.num = PKT_POOL_NUM;
params.type = ODP_POOL_PACKET;
- if (SHM_PKT_POOL_BUF_SIZE > pool_capa.pkt.max_len)
- params.pkt.len = pool_capa.pkt.max_len;
+ if (PKT_POOL_LEN > pool_capa.pkt.max_seg_len) {
+ fprintf(stderr, "Warning: small packet segment length\n");
+ params.pkt.seg_len = pool_capa.pkt.max_seg_len;
+ }
+
+ if (PKT_POOL_LEN > pool_capa.pkt.max_len) {
+ fprintf(stderr, "Pool max packet length too small\n");
+ return -1;
+ }
pool = odp_pool_create("packet_pool", &params);
diff --git a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
index 4ac4a0700..de9d6e459 100644
--- a/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/common_plat/validation/api/crypto/odp_crypto_test_inp.c
@@ -11,6 +11,8 @@
#include "odp_crypto_test_inp.h"
#include "crypto.h"
+#define MAX_ALG_CAPA 32
+
struct suite_context_s {
odp_crypto_op_mode_t pref_mode;
odp_pool_t pool;
@@ -42,8 +44,7 @@ static void alg_test(odp_crypto_op_t op,
const uint8_t *ciphertext,
unsigned int ciphertext_len,
const uint8_t *digest,
- unsigned int digest_len
- )
+ uint32_t digest_len)
{
odp_crypto_session_t session;
odp_crypto_capability_t capability;
@@ -53,10 +54,14 @@ static void alg_test(odp_crypto_op_t op,
odp_event_t event;
odp_crypto_compl_t compl_event;
odp_crypto_op_result_t result;
- odp_crypto_session_params_t ses_params;
- odp_crypto_op_params_t op_params;
+ odp_crypto_session_param_t ses_params;
+ odp_crypto_op_param_t op_params;
uint8_t *data_addr;
int data_off;
+ odp_crypto_cipher_capability_t cipher_capa[MAX_ALG_CAPA];
+ odp_crypto_auth_capability_t auth_capa[MAX_ALG_CAPA];
+ int num, i;
+ int found;
rc = odp_crypto_capability(&capability);
CU_ASSERT(!rc);
@@ -65,36 +70,36 @@ static void alg_test(odp_crypto_op_t op,
if (cipher_alg == ODP_CIPHER_ALG_3DES_CBC &&
!(capability.hw_ciphers.bit.trides_cbc))
rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES128_CBC &&
- !(capability.hw_ciphers.bit.aes128_cbc))
+ if (cipher_alg == ODP_CIPHER_ALG_AES_CBC &&
+ !(capability.hw_ciphers.bit.aes_cbc))
rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES128_GCM &&
- !(capability.hw_ciphers.bit.aes128_gcm))
+ if (cipher_alg == ODP_CIPHER_ALG_AES_GCM &&
+ !(capability.hw_ciphers.bit.aes_gcm))
rc = -1;
} else {
if (cipher_alg == ODP_CIPHER_ALG_3DES_CBC &&
!(capability.ciphers.bit.trides_cbc))
rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES128_CBC &&
- !(capability.ciphers.bit.aes128_cbc))
+ if (cipher_alg == ODP_CIPHER_ALG_AES_CBC &&
+ !(capability.ciphers.bit.aes_cbc))
rc = -1;
- if (cipher_alg == ODP_CIPHER_ALG_AES128_GCM &&
- !(capability.ciphers.bit.aes128_gcm))
+ if (cipher_alg == ODP_CIPHER_ALG_AES_GCM &&
+ !(capability.ciphers.bit.aes_gcm))
rc = -1;
}
CU_ASSERT(!rc);
if (capability.hw_auths.all_bits) {
- if (auth_alg == ODP_AUTH_ALG_AES128_GCM &&
- !(capability.hw_auths.bit.aes128_gcm))
+ if (auth_alg == ODP_AUTH_ALG_AES_GCM &&
+ !(capability.hw_auths.bit.aes_gcm))
rc = -1;
if (auth_alg == ODP_AUTH_ALG_NULL &&
!(capability.hw_auths.bit.null))
rc = -1;
} else {
- if (auth_alg == ODP_AUTH_ALG_AES128_GCM &&
- !(capability.auths.bit.aes128_gcm))
+ if (auth_alg == ODP_AUTH_ALG_AES_GCM &&
+ !(capability.auths.bit.aes_gcm))
rc = -1;
if (auth_alg == ODP_AUTH_ALG_NULL &&
!(capability.auths.bit.null))
@@ -103,8 +108,61 @@ static void alg_test(odp_crypto_op_t op,
CU_ASSERT(!rc);
+ num = odp_crypto_cipher_capability(cipher_alg, cipher_capa,
+ MAX_ALG_CAPA);
+
+ if (cipher_alg != ODP_CIPHER_ALG_NULL) {
+ CU_ASSERT(num > 0);
+ found = 0;
+ } else {
+ CU_ASSERT(num == 0);
+ found = 1;
+ }
+
+ CU_ASSERT(num <= MAX_ALG_CAPA);
+
+ if (num > MAX_ALG_CAPA)
+ num = MAX_ALG_CAPA;
+
+ /* Search for the test case */
+ for (i = 0; i < num; i++) {
+ if (cipher_capa[i].key_len == cipher_key.length &&
+ cipher_capa[i].iv_len == ses_iv.length) {
+ found = 1;
+ break;
+ }
+ }
+
+ CU_ASSERT(found);
+
+ num = odp_crypto_auth_capability(auth_alg, auth_capa, MAX_ALG_CAPA);
+
+ if (auth_alg != ODP_AUTH_ALG_NULL) {
+ CU_ASSERT(num > 0);
+ found = 0;
+ } else {
+ CU_ASSERT(num == 0);
+ found = 1;
+ }
+
+ CU_ASSERT(num <= MAX_ALG_CAPA);
+
+ if (num > MAX_ALG_CAPA)
+ num = MAX_ALG_CAPA;
+
+ /* Search for the test case */
+ for (i = 0; i < num; i++) {
+ if (auth_capa[i].digest_len == digest_len &&
+ auth_capa[i].key_len == auth_key.length) {
+ found = 1;
+ break;
+ }
+ }
+
+ CU_ASSERT(found);
+
/* Create a crypto session */
- memset(&ses_params, 0, sizeof(ses_params));
+ odp_crypto_session_param_init(&ses_params);
ses_params.op = op;
ses_params.auth_cipher_text = false;
ses_params.pref_mode = suite_context.pref_mode;
@@ -345,11 +403,11 @@ void crypto_test_enc_alg_aes128_gcm(void)
iv.length = sizeof(aes128_gcm_reference_iv[i]);
alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES128_GCM,
+ ODP_CIPHER_ALG_AES_GCM,
iv,
NULL,
cipher_key,
- ODP_AUTH_ALG_AES128_GCM,
+ ODP_AUTH_ALG_AES_GCM,
auth_key,
&aes128_gcm_cipher_range[i],
&aes128_gcm_auth_range[i],
@@ -381,11 +439,11 @@ void crypto_test_enc_alg_aes128_gcm_ovr_iv(void)
cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES128_GCM,
+ ODP_CIPHER_ALG_AES_GCM,
iv,
aes128_gcm_reference_iv[i],
cipher_key,
- ODP_AUTH_ALG_AES128_GCM,
+ ODP_AUTH_ALG_AES_GCM,
auth_key,
&aes128_gcm_cipher_range[i],
&aes128_gcm_auth_range[i],
@@ -420,11 +478,11 @@ void crypto_test_dec_alg_aes128_gcm(void)
iv.length = sizeof(aes128_gcm_reference_iv[i]);
alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES128_GCM,
+ ODP_CIPHER_ALG_AES_GCM,
iv,
NULL,
cipher_key,
- ODP_AUTH_ALG_AES128_GCM,
+ ODP_AUTH_ALG_AES_GCM,
auth_key,
&aes128_gcm_cipher_range[i],
&aes128_gcm_auth_range[i],
@@ -457,11 +515,11 @@ void crypto_test_dec_alg_aes128_gcm_ovr_iv(void)
cipher_key.length = sizeof(aes128_gcm_reference_key[i]);
alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES128_GCM,
+ ODP_CIPHER_ALG_AES_GCM,
iv,
aes128_gcm_reference_iv[i],
cipher_key,
- ODP_AUTH_ALG_AES128_GCM,
+ ODP_AUTH_ALG_AES_GCM,
auth_key,
&aes128_gcm_cipher_range[i],
&aes128_gcm_auth_range[i],
@@ -495,7 +553,7 @@ void crypto_test_enc_alg_aes128_cbc(void)
iv.length = sizeof(aes128_cbc_reference_iv[i]);
alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES128_CBC,
+ ODP_CIPHER_ALG_AES_CBC,
iv,
NULL,
cipher_key,
@@ -526,7 +584,7 @@ void crypto_test_enc_alg_aes128_cbc_ovr_iv(void)
cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
alg_test(ODP_CRYPTO_OP_ENCODE,
- ODP_CIPHER_ALG_AES128_CBC,
+ ODP_CIPHER_ALG_AES_CBC,
iv,
aes128_cbc_reference_iv[i],
cipher_key,
@@ -561,7 +619,7 @@ void crypto_test_dec_alg_aes128_cbc(void)
iv.length = sizeof(aes128_cbc_reference_iv[i]);
alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES128_CBC,
+ ODP_CIPHER_ALG_AES_CBC,
iv,
NULL,
cipher_key,
@@ -594,7 +652,7 @@ void crypto_test_dec_alg_aes128_cbc_ovr_iv(void)
cipher_key.length = sizeof(aes128_cbc_reference_key[i]);
alg_test(ODP_CRYPTO_OP_DECODE,
- ODP_CIPHER_ALG_AES128_CBC,
+ ODP_CIPHER_ALG_AES_CBC,
iv,
aes128_cbc_reference_iv[i],
cipher_key,
@@ -634,7 +692,7 @@ void crypto_test_alg_hmac_md5(void)
iv,
iv.data,
cipher_key,
- ODP_AUTH_ALG_MD5_96,
+ ODP_AUTH_ALG_MD5_HMAC,
auth_key,
NULL, NULL,
hmac_md5_reference_plaintext[i],
@@ -672,7 +730,7 @@ void crypto_test_alg_hmac_sha256(void)
iv,
iv.data,
cipher_key,
- ODP_AUTH_ALG_SHA256_128,
+ ODP_AUTH_ALG_SHA256_HMAC,
auth_key,
NULL, NULL,
hmac_sha256_reference_plaintext[i],
diff --git a/test/common_plat/validation/api/lock/lock.c b/test/common_plat/validation/api/lock/lock.c
index a668a3157..bd9a2aad2 100644
--- a/test/common_plat/validation/api/lock/lock.c
+++ b/test/common_plat/validation/api/lock/lock.c
@@ -1189,6 +1189,29 @@ int lock_init(odp_instance_t *inst)
return ret;
}
+int lock_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
odp_suiteinfo_t lock_suites[] = {
{"nolocking", lock_suite_init, NULL,
lock_suite_no_locking}, /* must be first */
@@ -1214,6 +1237,7 @@ int lock_main(int argc, char *argv[])
return -1;
odp_cunit_register_global_init(lock_init);
+ odp_cunit_register_global_term(lock_term);
ret = odp_cunit_register(lock_suites);
diff --git a/test/common_plat/validation/api/lock/lock.h b/test/common_plat/validation/api/lock/lock.h
index 5adc63352..e0f49728b 100644
--- a/test/common_plat/validation/api/lock/lock.h
+++ b/test/common_plat/validation/api/lock/lock.h
@@ -38,6 +38,7 @@ extern odp_suiteinfo_t lock_suites[];
/* executable init/term functions: */
int lock_init(odp_instance_t *inst);
+int lock_term(odp_instance_t inst);
/* main test program: */
int lock_main(int argc, char *argv[]);
diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c
index c75cde9ad..fa5206fe4 100644
--- a/test/common_plat/validation/api/packet/packet.c
+++ b/test/common_plat/validation/api/packet/packet.c
@@ -32,11 +32,19 @@ static struct udata_struct {
"abcdefg",
};
-static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2)
+#define packet_compare_offset(pkt1, off1, pkt2, off2, len) \
+ _packet_compare_offset((pkt1), (off1), (pkt2), (off2), (len), __LINE__)
+
+#define packet_compare_data(pkt1, pkt2) \
+ _packet_compare_data((pkt1), (pkt2), __LINE__)
+
+static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2,
+ int line)
{
uint32_t len = odp_packet_len(pkt1);
uint32_t offset = 0;
uint32_t seglen1, seglen2, cmplen;
+ int ret;
CU_ASSERT_FATAL(len == odp_packet_len(pkt2));
@@ -47,13 +55,52 @@ static void _packet_compare_data(odp_packet_t pkt1, odp_packet_t pkt2)
CU_ASSERT_PTR_NOT_NULL_FATAL(pkt1map);
CU_ASSERT_PTR_NOT_NULL_FATAL(pkt2map);
cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
- CU_ASSERT(!memcmp(pkt1map, pkt2map, cmplen));
+ ret = memcmp(pkt1map, pkt2map, cmplen);
+
+ if (ret) {
+ printf("\ncompare_data failed: line %i, offset %"
+ PRIu32 "\n", line, offset);
+ }
+
+ CU_ASSERT(ret == 0);
offset += cmplen;
len -= cmplen;
}
}
+static int fill_data_forward(odp_packet_t pkt, uint32_t offset, uint32_t len,
+ uint32_t *cur_data)
+{
+ uint8_t buf[len];
+ uint32_t i, data;
+
+ data = *cur_data;
+
+ for (i = 0; i < len; i++)
+ buf[i] = data++;
+
+ *cur_data = data;
+
+ return odp_packet_copy_from_mem(pkt, offset, len, buf);
+}
+
+static int fill_data_backward(odp_packet_t pkt, uint32_t offset, uint32_t len,
+ uint32_t *cur_data)
+{
+ uint8_t buf[len];
+ uint32_t i, data;
+
+ data = *cur_data;
+
+ for (i = 0; i < len; i++)
+ buf[len - i - 1] = data++;
+
+ *cur_data = data;
+
+ return odp_packet_copy_from_mem(pkt, offset, len, buf);
+}
+
int packet_suite_init(void)
{
odp_pool_param_t params;
@@ -66,7 +113,12 @@ int packet_suite_init(void)
if (odp_pool_capability(&capa) < 0)
return -1;
- packet_len = capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE;
+ /* Pick a typical packet size and decrement it to the single segment
+ * limit if needed (min_seg_len maybe equal to max_len
+ * on some systems). */
+ packet_len = 512;
+ while (packet_len > (capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE))
+ packet_len--;
if (capa.pkt.max_len) {
segmented_packet_len = capa.pkt.max_len;
@@ -137,6 +189,7 @@ int packet_suite_init(void)
udat_size = odp_packet_user_area_size(test_packet);
if (!udat || udat_size != sizeof(struct udata_struct))
return -1;
+
odp_pool_print(packet_pool);
memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
@@ -289,23 +342,86 @@ void packet_test_alloc_free_multi(void)
void packet_test_alloc_segmented(void)
{
+ const int num = 5;
+ odp_packet_t pkts[num];
odp_packet_t pkt;
- uint32_t len;
+ uint32_t max_len;
+ odp_pool_t pool;
+ odp_pool_param_t params;
odp_pool_capability_t capa;
+ int ret, i, num_alloc;
CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
if (capa.pkt.max_len)
- len = capa.pkt.max_len;
+ max_len = capa.pkt.max_len;
else
- len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt;
+ max_len = capa.pkt.min_seg_len * capa.pkt.max_segs_per_pkt;
+
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_PACKET;
+ params.pkt.seg_len = capa.pkt.min_seg_len;
+ params.pkt.len = max_len;
+
+ /* Ensure that 'num' segmented packets can be allocated */
+ params.pkt.num = num * capa.pkt.max_segs_per_pkt;
- pkt = odp_packet_alloc(packet_pool, len);
+ pool = odp_pool_create("pool_alloc_segmented", &params);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ /* Less than max len allocs */
+ pkt = odp_packet_alloc(pool, max_len / 2);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_len(pkt) == len);
+ CU_ASSERT(odp_packet_len(pkt) == max_len / 2);
+
+ odp_packet_free(pkt);
+
+ num_alloc = 0;
+ for (i = 0; i < num; i++) {
+ ret = odp_packet_alloc_multi(pool, max_len / 2,
+ &pkts[num_alloc], num - num_alloc);
+ CU_ASSERT_FATAL(ret >= 0);
+ num_alloc += ret;
+ if (num_alloc >= num)
+ break;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_packet_len(pkts[i]) == max_len / 2);
+
+ odp_packet_free_multi(pkts, num_alloc);
+
+ /* Max len allocs */
+ pkt = odp_packet_alloc(pool, max_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_len(pkt) == max_len);
+
if (segmentation_supported)
CU_ASSERT(odp_packet_is_segmented(pkt) == 1);
+
odp_packet_free(pkt);
+
+ num_alloc = 0;
+ for (i = 0; i < num; i++) {
+ ret = odp_packet_alloc_multi(pool, max_len,
+ &pkts[num_alloc], num - num_alloc);
+ CU_ASSERT_FATAL(ret >= 0);
+ num_alloc += ret;
+ if (num_alloc >= num)
+ break;
+ }
+
+ CU_ASSERT(num_alloc == num);
+
+ for (i = 0; i < num_alloc; i++)
+ CU_ASSERT(odp_packet_len(pkts[i]) == max_len);
+
+ odp_packet_free_multi(pkts, num_alloc);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
}
void packet_test_event_conversion(void)
@@ -321,7 +437,7 @@ void packet_test_event_conversion(void)
tmp_pkt = odp_packet_from_event(ev);
CU_ASSERT_FATAL(tmp_pkt != ODP_PACKET_INVALID);
CU_ASSERT(tmp_pkt == pkt);
- _packet_compare_data(tmp_pkt, pkt);
+ packet_compare_data(tmp_pkt, pkt);
}
void packet_test_basic_metadata(void)
@@ -659,9 +775,10 @@ void packet_test_tailroom(void)
_verify_tailroom_shift(&pkt, 0);
if (segmentation_supported) {
- _verify_tailroom_shift(&pkt, pull_val);
+ push_val = room + 100;
+ _verify_tailroom_shift(&pkt, push_val);
_verify_tailroom_shift(&pkt, 0);
- _verify_tailroom_shift(&pkt, -pull_val);
+ _verify_tailroom_shift(&pkt, -push_val);
}
odp_packet_free(pkt);
@@ -960,9 +1077,10 @@ static void _packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
odp_packet_t pkt2, uint32_t off2,
- uint32_t len)
+ uint32_t len, int line)
{
uint32_t seglen1, seglen2, cmplen;
+ int ret;
if (off1 + len > odp_packet_len(pkt1) ||
off2 + len > odp_packet_len(pkt2))
@@ -977,7 +1095,15 @@ static void _packet_compare_offset(odp_packet_t pkt1, uint32_t off1,
cmplen = seglen1 < seglen2 ? seglen1 : seglen2;
if (len < cmplen)
cmplen = len;
- CU_ASSERT(!memcmp(pkt1map, pkt2map, cmplen));
+
+ ret = memcmp(pkt1map, pkt2map, cmplen);
+
+ if (ret) {
+ printf("\ncompare_offset failed: line %i, off1 %"
+ PRIu32 ", off2 %" PRIu32 "\n", line, off1, off2);
+ }
+
+ CU_ASSERT(ret == 0);
off1 += cmplen;
off2 += cmplen;
@@ -1000,7 +1126,7 @@ void packet_test_copy(void)
pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- _packet_compare_data(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
pool = odp_packet_pool(pkt);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
pkt_copy = odp_packet_copy(pkt, pool);
@@ -1011,7 +1137,7 @@ void packet_test_copy(void)
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
_packet_compare_inflags(pkt, pkt_copy);
- _packet_compare_data(pkt, pkt_copy);
+ packet_compare_data(pkt, pkt_copy);
CU_ASSERT(odp_packet_user_area_size(pkt) ==
odp_packet_user_area_size(test_packet));
_packet_compare_udata(pkt, pkt_copy);
@@ -1020,7 +1146,7 @@ void packet_test_copy(void)
pkt = odp_packet_copy(test_packet, packet_pool_double_uarea);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- _packet_compare_data(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
pool = odp_packet_pool(pkt);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
pkt_copy = odp_packet_copy(pkt, pool);
@@ -1031,7 +1157,7 @@ void packet_test_copy(void)
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
_packet_compare_inflags(pkt, pkt_copy);
- _packet_compare_data(pkt, pkt_copy);
+ packet_compare_data(pkt, pkt_copy);
CU_ASSERT(odp_packet_user_area_size(pkt) ==
2 * odp_packet_user_area_size(test_packet));
_packet_compare_udata(pkt, pkt_copy);
@@ -1050,7 +1176,7 @@ void packet_test_copy(void)
CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_part));
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_part));
- _packet_compare_data(pkt, pkt_part);
+ packet_compare_data(pkt, pkt_part);
odp_packet_free(pkt_part);
plen = odp_packet_len(pkt);
@@ -1058,14 +1184,14 @@ void packet_test_copy(void)
pkt_part = odp_packet_copy_part(pkt, i, plen / 4, pool);
CU_ASSERT_FATAL(pkt_part != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_len(pkt_part) == plen / 4);
- _packet_compare_offset(pkt_part, 0, pkt, i, plen / 4);
+ packet_compare_offset(pkt_part, 0, pkt, i, plen / 4);
odp_packet_free(pkt_part);
}
/* Test copy and move apis */
CU_ASSERT(odp_packet_copy_data(pkt, 0, plen - plen / 8, plen / 8) == 0);
- _packet_compare_offset(pkt, 0, pkt, plen - plen / 8, plen / 8);
- _packet_compare_offset(pkt, 0, test_packet, plen - plen / 8, plen / 8);
+ packet_compare_offset(pkt, 0, pkt, plen - plen / 8, plen / 8);
+ packet_compare_offset(pkt, 0, test_packet, plen - plen / 8, plen / 8);
/* Test segment crossing if we support segments */
pkt_data = odp_packet_offset(pkt, 0, &seg_len, NULL);
@@ -1081,7 +1207,7 @@ void packet_test_copy(void)
pkt_part = odp_packet_copy_part(pkt, src_offset, 20, pool);
CU_ASSERT(odp_packet_move_data(pkt, dst_offset, src_offset, 20) == 0);
- _packet_compare_offset(pkt, dst_offset, pkt_part, 0, 20);
+ packet_compare_offset(pkt, dst_offset, pkt_part, 0, 20);
odp_packet_free(pkt_part);
odp_packet_free(pkt);
@@ -1130,7 +1256,7 @@ void packet_test_copydata(void)
1) == 0);
}
- _packet_compare_offset(pkt, 0, test_packet, 0, pkt_len / 2);
+ packet_compare_offset(pkt, 0, test_packet, 0, pkt_len / 2);
odp_packet_free(pkt);
pkt = odp_packet_alloc(odp_packet_pool(segmented_test_packet),
@@ -1140,9 +1266,9 @@ void packet_test_copydata(void)
CU_ASSERT(odp_packet_copy_from_pkt(pkt, 0, segmented_test_packet,
odp_packet_len(pkt) / 4,
odp_packet_len(pkt)) == 0);
- _packet_compare_offset(pkt, 0, segmented_test_packet,
- odp_packet_len(pkt) / 4,
- odp_packet_len(pkt));
+ packet_compare_offset(pkt, 0, segmented_test_packet,
+ odp_packet_len(pkt) / 4,
+ odp_packet_len(pkt));
odp_packet_free(pkt);
}
@@ -1151,21 +1277,27 @@ void packet_test_concatsplit(void)
odp_packet_t pkt, pkt2;
uint32_t pkt_len;
odp_packet_t splits[4];
+ odp_pool_t pool;
- pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
+ pool = odp_packet_pool(test_packet);
+ pkt = odp_packet_copy(test_packet, pool);
+ pkt2 = odp_packet_copy(test_packet, pool);
pkt_len = odp_packet_len(test_packet);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt_len == odp_packet_len(pkt));
+ CU_ASSERT(pkt_len == odp_packet_len(pkt2));
- CU_ASSERT(odp_packet_concat(&pkt, pkt) == 0);
+ CU_ASSERT(odp_packet_concat(&pkt, pkt2) >= 0);
CU_ASSERT(odp_packet_len(pkt) == pkt_len * 2);
- _packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
+ packet_compare_offset(pkt, 0, pkt, pkt_len, pkt_len);
CU_ASSERT(odp_packet_split(&pkt, pkt_len, &pkt2) == 0);
CU_ASSERT(pkt != pkt2);
CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt2));
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt2));
- _packet_compare_data(pkt, pkt2);
- _packet_compare_data(pkt, test_packet);
+ packet_compare_data(pkt, pkt2);
+ packet_compare_data(pkt, test_packet);
odp_packet_free(pkt);
odp_packet_free(pkt2);
@@ -1175,26 +1307,26 @@ void packet_test_concatsplit(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
pkt_len = odp_packet_len(pkt);
- _packet_compare_data(pkt, segmented_test_packet);
+ packet_compare_data(pkt, segmented_test_packet);
CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
CU_ASSERT(pkt != splits[0]);
CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(splits[0]));
CU_ASSERT(odp_packet_len(pkt) == pkt_len / 2);
CU_ASSERT(odp_packet_len(pkt) + odp_packet_len(splits[0]) == pkt_len);
- _packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
- _packet_compare_offset(splits[0], 0, segmented_test_packet,
- pkt_len / 2, odp_packet_len(splits[0]));
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ packet_compare_offset(splits[0], 0, segmented_test_packet,
+ pkt_len / 2, odp_packet_len(splits[0]));
- CU_ASSERT(odp_packet_concat(&pkt, splits[0]) == 0);
- _packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
- _packet_compare_offset(pkt, pkt_len / 2, segmented_test_packet,
- pkt_len / 2, pkt_len / 2);
- _packet_compare_offset(pkt, 0, segmented_test_packet, 0,
- pkt_len);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0, pkt_len / 2);
+ packet_compare_offset(pkt, pkt_len / 2, segmented_test_packet,
+ pkt_len / 2, pkt_len / 2);
+ packet_compare_offset(pkt, 0, segmented_test_packet, 0,
+ pkt_len);
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
- _packet_compare_data(pkt, segmented_test_packet);
+ packet_compare_data(pkt, segmented_test_packet);
CU_ASSERT(odp_packet_split(&pkt, pkt_len / 2, &splits[0]) == 0);
CU_ASSERT(odp_packet_split(&pkt, pkt_len / 4, &splits[1]) == 0);
@@ -1203,16 +1335,493 @@ void packet_test_concatsplit(void)
CU_ASSERT(odp_packet_len(splits[0]) + odp_packet_len(splits[1]) +
odp_packet_len(splits[2]) + odp_packet_len(pkt) == pkt_len);
- CU_ASSERT(odp_packet_concat(&pkt, splits[2]) == 0);
- CU_ASSERT(odp_packet_concat(&pkt, splits[1]) == 0);
- CU_ASSERT(odp_packet_concat(&pkt, splits[0]) == 0);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[2]) >= 0);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[1]) >= 0);
+ CU_ASSERT(odp_packet_concat(&pkt, splits[0]) >= 0);
CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(segmented_test_packet));
- _packet_compare_data(pkt, segmented_test_packet);
+ packet_compare_data(pkt, segmented_test_packet);
odp_packet_free(pkt);
}
+void packet_test_concat_small(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt, pkt2;
+ int ret;
+ uint8_t *data;
+ uint32_t i;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len && capa.pkt.max_len < len)
+ len = capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 100;
+
+ pool = odp_pool_create("packet_pool_concat", &param);
+ CU_ASSERT(packet_pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt);
+ *data = 0;
+
+ for (i = 0; i < len - 1; i++) {
+ pkt2 = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt2);
+ *data = i + 1;
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+
+ if (ret < 0) {
+ odp_packet_free(pkt2);
+ break;
+ }
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++)
+ CU_ASSERT(buf[i] == (i % 256));
+
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void packet_test_concat_extend_trunc(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt, pkt2;
+ int i, ret;
+ uint32_t alloc_len, ext_len, trunc_len, cur_len;
+ uint32_t len = 1900;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len && capa.pkt.max_len < len)
+ len = capa.pkt.max_len;
+
+ alloc_len = len / 8;
+ ext_len = len / 4;
+ trunc_len = len / 3;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 100;
+
+ pool = odp_pool_create("packet_pool_concat", &param);
+ CU_ASSERT(packet_pool != ODP_POOL_INVALID);
+
+ pkt = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_len = odp_packet_len(pkt);
+
+ for (i = 0; i < 2; i++) {
+ pkt2 = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+
+ if (ret < 0)
+ odp_packet_free(pkt2);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
+ cur_len = odp_packet_len(pkt);
+ }
+
+ ret = odp_packet_extend_tail(&pkt, ext_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_extend_head(&pkt, ext_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + ext_len));
+ cur_len = odp_packet_len(pkt);
+
+ pkt2 = odp_packet_alloc(pool, alloc_len);
+ CU_ASSERT_FATAL(pkt2 != ODP_PACKET_INVALID);
+
+ ret = odp_packet_concat(&pkt, pkt2);
+ CU_ASSERT(ret >= 0);
+
+ if (ret < 0)
+ odp_packet_free(pkt2);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len + alloc_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_trunc_head(&pkt, trunc_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
+ cur_len = odp_packet_len(pkt);
+
+ ret = odp_packet_trunc_tail(&pkt, trunc_len, NULL, NULL);
+ CU_ASSERT(ret >= 0);
+
+ CU_ASSERT(odp_packet_len(pkt) == (cur_len - trunc_len));
+ cur_len = odp_packet_len(pkt);
+
+ odp_packet_free(pkt);
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void packet_test_extend_small(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len;
+ int tail = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len && capa.pkt.max_len < len)
+ len = capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 100;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT(packet_pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2; round++) {
+ pkt = odp_packet_alloc(pool, 1);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ data = odp_packet_data(pkt);
+ *data = 0;
+
+ for (i = 0; i < len - 1; i++) {
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, 1,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, 1,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ }
+
+ if (ret < 0)
+ break;
+
+ if (tail) {
+ /* assert needs brackets */
+ CU_ASSERT(seg_len == 1);
+ } else {
+ CU_ASSERT(seg_len > 0);
+ }
+
+ *data = i + 1;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ tail = 0;
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void packet_test_extend_large(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len, ext_len, cur_len, cur_data;
+ int tail = 1;
+ int num_div = 16;
+ int div = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len && capa.pkt.max_len < len)
+ len = capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 100;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT(packet_pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2 * num_div; round++) {
+ ext_len = len / div;
+ cur_len = ext_len;
+
+ pkt = odp_packet_alloc(pool, ext_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_data = 0;
+
+ if (tail) {
+ ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ while (cur_len < len) {
+ if ((len - cur_len) < ext_len)
+ ext_len = len - cur_len;
+
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ }
+
+ if (ret < 0)
+ break;
+
+ if (tail) {
+ /* assert needs brackets */
+ CU_ASSERT((seg_len > 0) &&
+ (seg_len <= ext_len));
+ ret = fill_data_forward(pkt, cur_len, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ CU_ASSERT(seg_len > 0);
+ CU_ASSERT(data == odp_packet_data(pkt));
+ ret = fill_data_backward(pkt, 0, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ cur_len += ext_len;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ div++;
+ if (div > num_div) {
+ /* test extend head */
+ div = 1;
+ tail = 0;
+ }
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+void packet_test_extend_mix(void)
+{
+ odp_pool_capability_t capa;
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_packet_t pkt;
+ int ret, round;
+ uint8_t *data;
+ uint32_t i, seg_len, ext_len, cur_len, cur_data;
+ int small_count;
+ int tail = 1;
+ uint32_t len = 32000;
+ uint8_t buf[len];
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ if (capa.pkt.max_len && capa.pkt.max_len < len)
+ len = capa.pkt.max_len;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.len = len;
+ param.pkt.num = 100;
+
+ pool = odp_pool_create("packet_pool_extend", &param);
+ CU_ASSERT(packet_pool != ODP_POOL_INVALID);
+
+ for (round = 0; round < 2; round++) {
+ small_count = 30;
+ ext_len = len / 10;
+ cur_len = ext_len;
+
+ pkt = odp_packet_alloc(pool, ext_len);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ cur_data = 0;
+
+ if (tail) {
+ ret = fill_data_forward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = fill_data_backward(pkt, 0, ext_len, &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ while (cur_len < len) {
+ if (small_count) {
+ small_count--;
+ ext_len = len / 100;
+ } else {
+ ext_len = len / 4;
+ }
+
+ if ((len - cur_len) < ext_len)
+ ext_len = len - cur_len;
+
+ if (tail) {
+ ret = odp_packet_extend_tail(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT((seg_len > 0) &&
+ (seg_len <= ext_len));
+ ret = fill_data_forward(pkt, cur_len, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ } else {
+ ret = odp_packet_extend_head(&pkt, ext_len,
+ (void **)&data,
+ &seg_len);
+ CU_ASSERT(ret >= 0);
+ CU_ASSERT(seg_len > 0);
+ CU_ASSERT(data == odp_packet_data(pkt));
+ ret = fill_data_backward(pkt, 0, ext_len,
+ &cur_data);
+ CU_ASSERT(ret == 0);
+ }
+
+ cur_len += ext_len;
+ }
+
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ len = odp_packet_len(pkt);
+
+ memset(buf, 0, len);
+ CU_ASSERT(odp_packet_copy_to_mem(pkt, 0, len, buf) == 0);
+
+ for (i = 0; i < len; i++) {
+ int match;
+
+ if (tail) {
+ match = (buf[i] == (i % 256));
+ CU_ASSERT(match);
+ } else {
+ match = (buf[len - 1 - i] == (i % 256));
+ CU_ASSERT(match);
+ }
+
+ /* Limit the number of failed asserts to
+ one per packet */
+ if (!match)
+ break;
+ }
+
+ odp_packet_free(pkt);
+
+ tail = 0;
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
void packet_test_align(void)
{
odp_packet_t pkt;
@@ -1242,9 +1851,9 @@ void packet_test_align(void)
/* Alignment doesn't change packet length or contents */
CU_ASSERT(odp_packet_len(pkt) == pkt_len);
(void)odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
- _packet_compare_offset(pkt, offset,
- segmented_test_packet, offset,
- aligned_seglen);
+ packet_compare_offset(pkt, offset,
+ segmented_test_packet, offset,
+ aligned_seglen);
/* Verify requested contiguous addressabilty */
CU_ASSERT(aligned_seglen >= seg_len + 2);
@@ -1264,8 +1873,8 @@ void packet_test_align(void)
aligned_data = odp_packet_offset(pkt, offset, &aligned_seglen, NULL);
CU_ASSERT(odp_packet_len(pkt) == pkt_len);
- _packet_compare_offset(pkt, offset, segmented_test_packet, offset,
- aligned_seglen);
+ packet_compare_offset(pkt, offset, segmented_test_packet, offset,
+ aligned_seglen);
CU_ASSERT((uintptr_t)aligned_data % max_align == 0);
odp_packet_free(pkt);
@@ -1338,6 +1947,11 @@ odp_testinfo_t packet_suite[] = {
ODP_TEST_INFO(packet_test_copy),
ODP_TEST_INFO(packet_test_copydata),
ODP_TEST_INFO(packet_test_concatsplit),
+ ODP_TEST_INFO(packet_test_concat_small),
+ ODP_TEST_INFO(packet_test_concat_extend_trunc),
+ ODP_TEST_INFO(packet_test_extend_small),
+ ODP_TEST_INFO(packet_test_extend_large),
+ ODP_TEST_INFO(packet_test_extend_mix),
ODP_TEST_INFO(packet_test_align),
ODP_TEST_INFO(packet_test_offset),
ODP_TEST_INFO_NULL,
diff --git a/test/common_plat/validation/api/packet/packet.h b/test/common_plat/validation/api/packet/packet.h
index 10a377cf0..9bc3d6362 100644
--- a/test/common_plat/validation/api/packet/packet.h
+++ b/test/common_plat/validation/api/packet/packet.h
@@ -30,6 +30,11 @@ void packet_test_add_rem_data(void);
void packet_test_copy(void);
void packet_test_copydata(void);
void packet_test_concatsplit(void);
+void packet_test_concat_small(void);
+void packet_test_concat_extend_trunc(void);
+void packet_test_extend_small(void);
+void packet_test_extend_large(void);
+void packet_test_extend_mix(void);
void packet_test_align(void);
void packet_test_offset(void);
diff --git a/test/common_plat/validation/api/pktio/pktio.c b/test/common_plat/validation/api/pktio/pktio.c
index 7c979fbee..c23e2cc76 100644
--- a/test/common_plat/validation/api/pktio/pktio.c
+++ b/test/common_plat/validation/api/pktio/pktio.c
@@ -31,6 +31,8 @@
#define PKTIN_TS_MAX_RES 10000000000
#define PKTIN_TS_CMP_RES 1
+#define PKTIO_SRC_MAC {1, 2, 3, 4, 5, 6}
+#define PKTIO_DST_MAC {6, 5, 4, 3, 2, 1}
#undef DEBUG_STATS
/** interface names used for testing */
@@ -120,8 +122,12 @@ static inline void _pktio_wait_linkup(odp_pktio_t pktio)
}
}
-static void set_pool_len(odp_pool_param_t *params)
+static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa)
{
+ uint32_t seg_len;
+
+ seg_len = capa->pkt.max_seg_len;
+
switch (pool_segmentation) {
case PKT_POOL_SEGMENTED:
/* Force segment to minimum size */
@@ -130,7 +136,7 @@ static void set_pool_len(odp_pool_param_t *params)
break;
case PKT_POOL_UNSEGMENTED:
default:
- params->pkt.seg_len = PKT_BUF_SIZE;
+ params->pkt.seg_len = seg_len;
params->pkt.len = PKT_BUF_SIZE;
break;
}
@@ -245,7 +251,8 @@ static uint32_t pktio_init_packet(odp_packet_t pkt)
odph_udphdr_t *udp;
char *buf;
uint16_t seq;
- uint8_t mac[ODP_PKTIO_MACADDR_MAXSIZE] = {0};
+ uint8_t src_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_SRC_MAC;
+ uint8_t dst_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_DST_MAC;
int pkt_len = odp_packet_len(pkt);
buf = odp_packet_data(pkt);
@@ -253,8 +260,8 @@ static uint32_t pktio_init_packet(odp_packet_t pkt)
/* Ethernet */
odp_packet_l2_offset_set(pkt, 0);
eth = (odph_ethhdr_t *)buf;
- memcpy(eth->src.addr, mac, ODPH_ETHADDR_LEN);
- memcpy(eth->dst.addr, mac, ODPH_ETHADDR_LEN);
+ memcpy(eth->src.addr, src_mac, ODPH_ETHADDR_LEN);
+ memcpy(eth->dst.addr, dst_mac, ODPH_ETHADDR_LEN);
eth->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
/* IP */
@@ -309,13 +316,17 @@ static int pktio_fixup_checksums(odp_packet_t pkt)
static int default_pool_create(void)
{
odp_pool_param_t params;
+ odp_pool_capability_t pool_capa;
char pool_name[ODP_POOL_NAME_LEN];
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
+
if (default_pkt_pool != ODP_POOL_INVALID)
return -1;
- memset(&params, 0, sizeof(params));
- set_pool_len(&params);
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
params.pkt.num = PKT_BUF_NUM;
params.type = ODP_POOL_PACKET;
@@ -598,6 +609,7 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
int i, ret, num_rx;
if (packet_len == USE_MTU) {
+ odp_pool_capability_t pool_capa;
uint32_t mtu;
mtu = odp_pktio_mtu(pktio_a->id);
@@ -607,6 +619,11 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
packet_len = mtu;
if (packet_len > PKT_LEN_MAX)
packet_len = PKT_LEN_MAX;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+
+ if (packet_len > pool_capa.pkt.max_len)
+ packet_len = pool_capa.pkt.max_len;
}
/* generate test packets to send */
@@ -1673,10 +1690,11 @@ int pktio_check_send_failure(void)
odp_pktio_close(pktio_tx);
- if (mtu <= pool_capa.pkt.max_len - 32)
- return ODP_TEST_ACTIVE;
+ /* Failure test supports only single segment */
+ if (pool_capa.pkt.max_seg_len < mtu + 32)
+ return ODP_TEST_INACTIVE;
- return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
}
void pktio_test_send_failure(void)
@@ -1691,6 +1709,7 @@ void pktio_test_send_failure(void)
int long_pkt_idx = TX_BATCH_LEN / 2;
pktio_info_t info_rx;
odp_pktout_queue_t pktout;
+ odp_pool_capability_t pool_capa;
pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
ODP_PKTOUT_MODE_DIRECT);
@@ -1709,9 +1728,16 @@ void pktio_test_send_failure(void)
_pktio_wait_linkup(pktio_tx);
+ CU_ASSERT_FATAL(odp_pool_capability(&pool_capa) == 0);
+
+ if (pool_capa.pkt.max_seg_len < mtu + 32) {
+ CU_FAIL("Max packet seg length is too small.");
+ return;
+ }
+
/* configure the pool so that we can generate test packets larger
* than the interface MTU */
- memset(&pool_params, 0, sizeof(pool_params));
+ odp_pool_param_init(&pool_params);
pool_params.pkt.len = mtu + 32;
pool_params.pkt.seg_len = pool_params.pkt.len;
pool_params.pkt.num = TX_BATCH_LEN + 1;
@@ -1999,9 +2025,13 @@ static int create_pool(const char *iface, int num)
{
char pool_name[ODP_POOL_NAME_LEN];
odp_pool_param_t params;
+ odp_pool_capability_t pool_capa;
+
+ if (odp_pool_capability(&pool_capa) != 0)
+ return -1;
- memset(&params, 0, sizeof(params));
- set_pool_len(&params);
+ odp_pool_param_init(&params);
+ set_pool_len(&params, &pool_capa);
params.pkt.num = PKT_BUF_NUM;
params.type = ODP_POOL_PACKET;
diff --git a/test/common_plat/validation/api/pool/pool.c b/test/common_plat/validation/api/pool/pool.c
index d48ac2a34..8687941f7 100644
--- a/test/common_plat/validation/api/pool/pool.c
+++ b/test/common_plat/validation/api/pool/pool.c
@@ -8,19 +8,14 @@
#include "odp_cunit_common.h"
#include "pool.h"
-static int pool_name_number = 1;
static const int default_buffer_size = 1500;
static const int default_buffer_num = 1000;
static void pool_create_destroy(odp_pool_param_t *params)
{
odp_pool_t pool;
- char pool_name[ODP_POOL_NAME_LEN];
- snprintf(pool_name, sizeof(pool_name),
- "test_pool-%d", pool_name_number++);
-
- pool = odp_pool_create(pool_name, params);
+ pool = odp_pool_create(NULL, params);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
CU_ASSERT(odp_pool_to_u64(pool) !=
odp_pool_to_u64(ODP_POOL_INVALID));
diff --git a/test/common_plat/validation/api/queue/queue.c b/test/common_plat/validation/api/queue/queue.c
index dc3a977cb..1f7913a12 100644
--- a/test/common_plat/validation/api/queue/queue.c
+++ b/test/common_plat/validation/api/queue/queue.c
@@ -137,7 +137,7 @@ void queue_test_mode(void)
void queue_test_param(void)
{
- odp_queue_t queue;
+ odp_queue_t queue, null_queue;
odp_event_t enev[MAX_BUFFER_QUEUE];
odp_event_t deev[MAX_BUFFER_QUEUE];
odp_buffer_t buf;
@@ -173,6 +173,11 @@ void queue_test_param(void)
CU_ASSERT(&queue_context == odp_queue_context(queue));
CU_ASSERT(odp_queue_destroy(queue) == 0);
+ /* Create queue with no name */
+ odp_queue_param_init(&qparams);
+ null_queue = odp_queue_create(NULL, &qparams);
+ CU_ASSERT(ODP_QUEUE_INVALID != null_queue);
+
/* Plain type queue */
odp_queue_param_init(&qparams);
qparams.type = ODP_QUEUE_TYPE_PLAIN;
@@ -185,6 +190,9 @@ void queue_test_param(void)
CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
CU_ASSERT(&queue_context == odp_queue_context(queue));
+ /* Destroy queue with no name */
+ CU_ASSERT(odp_queue_destroy(null_queue) == 0);
+
msg_pool = odp_pool_lookup("msg_pool");
buf = odp_buffer_alloc(msg_pool);
CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
diff --git a/test/common_plat/validation/api/random/random.c b/test/common_plat/validation/api/random/random.c
index 7572366c2..a0e2ef72f 100644
--- a/test/common_plat/validation/api/random/random.c
+++ b/test/common_plat/validation/api/random/random.c
@@ -13,12 +13,58 @@ void random_test_get_size(void)
int32_t ret;
uint8_t buf[32];
- ret = odp_random_data(buf, sizeof(buf), false);
+ ret = odp_random_data(buf, sizeof(buf), ODP_RANDOM_BASIC);
CU_ASSERT(ret == sizeof(buf));
}
+void random_test_kind(void)
+{
+ int32_t rc;
+ uint8_t buf[4096];
+ uint32_t buf_size = sizeof(buf);
+ odp_random_kind_t max_kind = odp_random_max_kind();
+
+ rc = odp_random_data(buf, buf_size, max_kind);
+ CU_ASSERT(rc > 0);
+
+ switch (max_kind) {
+ case ODP_RANDOM_BASIC:
+ rc = odp_random_data(buf, 4, ODP_RANDOM_CRYPTO);
+ CU_ASSERT(rc < 0);
+ /* Fall through */
+
+ case ODP_RANDOM_CRYPTO:
+ rc = odp_random_data(buf, 4, ODP_RANDOM_TRUE);
+ CU_ASSERT(rc < 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void random_test_repeat(void)
+{
+ uint8_t buf1[1024];
+ uint8_t buf2[1024];
+ int32_t rc;
+ uint64_t seed1 = 12345897;
+ uint64_t seed2 = seed1;
+
+ rc = odp_random_test_data(buf1, sizeof(buf1), &seed1);
+ CU_ASSERT(rc == sizeof(buf1));
+
+ rc = odp_random_test_data(buf2, sizeof(buf2), &seed2);
+ CU_ASSERT(rc == sizeof(buf2));
+
+ CU_ASSERT(seed1 == seed2);
+ CU_ASSERT(memcmp(buf1, buf2, sizeof(buf1)) == 0);
+}
+
odp_testinfo_t random_suite[] = {
ODP_TEST_INFO(random_test_get_size),
+ ODP_TEST_INFO(random_test_kind),
+ ODP_TEST_INFO(random_test_repeat),
ODP_TEST_INFO_NULL,
};
diff --git a/test/common_plat/validation/api/random/random.h b/test/common_plat/validation/api/random/random.h
index 26202cc37..c4bca7827 100644
--- a/test/common_plat/validation/api/random/random.h
+++ b/test/common_plat/validation/api/random/random.h
@@ -11,6 +11,8 @@
/* test functions: */
void random_test_get_size(void);
+void random_test_kind(void);
+void random_test_repeat(void);
/* test arrays: */
extern odp_testinfo_t random_suite[];
diff --git a/test/common_plat/validation/api/scheduler/scheduler.c b/test/common_plat/validation/api/scheduler/scheduler.c
index 919cfb6ce..952561cd3 100644
--- a/test/common_plat/validation/api/scheduler/scheduler.c
+++ b/test/common_plat/validation/api/scheduler/scheduler.c
@@ -273,7 +273,7 @@ void scheduler_test_groups(void)
ODP_SCHED_SYNC_ORDERED};
int thr_id = odp_thread_id();
odp_thrmask_t zeromask, mymask, testmask;
- odp_schedule_group_t mygrp1, mygrp2, lookup;
+ odp_schedule_group_t mygrp1, mygrp2, null_grp, lookup;
odp_schedule_group_info_t info;
odp_thrmask_zero(&zeromask);
@@ -327,6 +327,10 @@ void scheduler_test_groups(void)
CU_ASSERT(rc == 0);
CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));
+ /* Create group with no name */
+ null_grp = odp_schedule_group_create(NULL, &zeromask);
+ CU_ASSERT(null_grp != ODP_SCHED_GROUP_INVALID);
+
/* We shouldn't be able to find our second group before creating it */
lookup = odp_schedule_group_lookup("Test Group 2");
CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);
@@ -338,6 +342,9 @@ void scheduler_test_groups(void)
lookup = odp_schedule_group_lookup("Test Group 2");
CU_ASSERT(lookup == mygrp2);
+ /* Destroy group with no name */
+ CU_ASSERT_FATAL(odp_schedule_group_destroy(null_grp) == 0);
+
/* Verify we're not part of it */
rc = odp_schedule_group_thrmask(mygrp2, &testmask);
CU_ASSERT(rc == 0);
@@ -1577,6 +1584,7 @@ static int destroy_queues(void)
int scheduler_suite_term(void)
{
odp_pool_t pool;
+ odp_shm_t shm;
if (destroy_queues() != 0) {
fprintf(stderr, "error: failed to destroy queues\n");
@@ -1587,6 +1595,14 @@ int scheduler_suite_term(void)
if (odp_pool_destroy(pool) != 0)
fprintf(stderr, "error: failed to destroy pool\n");
+ shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+ if (odp_shm_free(shm) != 0)
+ fprintf(stderr, "error: failed to free shm\n");
+
+ shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+ if (odp_shm_free(shm) != 0)
+ fprintf(stderr, "error: failed to free shm\n");
+
return 0;
}
diff --git a/test/common_plat/validation/api/shmem/shmem.c b/test/common_plat/validation/api/shmem/shmem.c
index cbff6738c..0e757a708 100644
--- a/test/common_plat/validation/api/shmem/shmem.c
+++ b/test/common_plat/validation/api/shmem/shmem.c
@@ -7,82 +7,712 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
#include "shmem.h"
+#include <stdlib.h>
-#define ALIGE_SIZE (128)
-#define TESTNAME "cunit_test_shared_data"
+#define ALIGN_SIZE (128)
+#define MEM_NAME "test_shmem"
+#define NAME_LEN (sizeof(MEM_NAME) + 20)
#define TEST_SHARE_FOO (0xf0f0f0f0)
#define TEST_SHARE_BAR (0xf0f0f0f)
+#define SMALL_MEM 10
+#define MEDIUM_MEM 4096
+#define BIG_MEM 65536
+#define STRESS_SIZE 32 /* power of 2 and <=256 */
+#define STRESS_RANDOM_SZ 5
+#define STRESS_ITERATION 5000
-static odp_barrier_t test_barrier;
+typedef enum {
+ STRESS_FREE, /* entry is free and can be allocated */
+ STRESS_BUSY, /* entry is being processed: don't touch */
+ STRESS_ALLOC /* entry is allocated and can be freed */
+} stress_state_t;
-static int run_shm_thread(void *arg ODP_UNUSED)
+typedef struct {
+ stress_state_t state;
+ odp_shm_t shm;
+ char name[NAME_LEN];
+ void *address;
+ uint32_t flags;
+ uint32_t size;
+ uint64_t align;
+ uint8_t data_val;
+} stress_data_t;
+
+typedef struct {
+ odp_barrier_t test_barrier1;
+ odp_barrier_t test_barrier2;
+ odp_barrier_t test_barrier3;
+ odp_barrier_t test_barrier4;
+ uint32_t foo;
+ uint32_t bar;
+ odp_atomic_u32_t index;
+ uint32_t nb_threads;
+ odp_shm_t shm[MAX_WORKERS];
+ void *address[MAX_WORKERS];
+ char name[MAX_WORKERS][NAME_LEN];
+ odp_spinlock_t stress_lock;
+ stress_data_t stress[STRESS_SIZE];
+} shared_test_data_t;
+
+/* memory stuff expected to fit in a single page */
+typedef struct {
+ int data[SMALL_MEM];
+} shared_test_data_small_t;
+
+/* memory stuff expected to fit in a huge page */
+typedef struct {
+ int data[MEDIUM_MEM];
+} shared_test_data_medium_t;
+
+/* memory stuff expected to fit in many huge pages */
+typedef struct {
+ int data[BIG_MEM];
+} shared_test_data_big_t;
+
+/*
+ * thread part for the shmem_test_basic test
+ */
+static int run_test_basic_thread(void *arg ODP_UNUSED)
{
odp_shm_info_t info;
odp_shm_t shm;
- test_shared_data_t *test_shared_data;
+ shared_test_data_t *shared_test_data;
int thr;
- odp_barrier_wait(&test_barrier);
thr = odp_thread_id();
printf("Thread %i starts\n", thr);
- shm = odp_shm_lookup(TESTNAME);
+ shm = odp_shm_lookup(MEM_NAME);
CU_ASSERT(ODP_SHM_INVALID != shm);
- test_shared_data = odp_shm_addr(shm);
- CU_ASSERT(TEST_SHARE_FOO == test_shared_data->foo);
- CU_ASSERT(TEST_SHARE_BAR == test_shared_data->bar);
+ shared_test_data = odp_shm_addr(shm);
+ CU_ASSERT(NULL != shared_test_data);
+
+ odp_barrier_wait(&shared_test_data->test_barrier1);
+ odp_shm_print_all();
+ CU_ASSERT(TEST_SHARE_FOO == shared_test_data->foo);
+ CU_ASSERT(TEST_SHARE_BAR == shared_test_data->bar);
CU_ASSERT(0 == odp_shm_info(shm, &info));
- CU_ASSERT(0 == strcmp(TESTNAME, info.name));
+ CU_ASSERT(0 == strcmp(MEM_NAME, info.name));
CU_ASSERT(0 == info.flags);
- CU_ASSERT(test_shared_data == info.addr);
- CU_ASSERT(sizeof(test_shared_data_t) <= info.size);
-#ifdef MAP_HUGETLB
- CU_ASSERT(odp_sys_huge_page_size() == info.page_size);
-#else
- CU_ASSERT(odp_sys_page_size() == info.page_size);
-#endif
+ CU_ASSERT(shared_test_data == info.addr);
+ CU_ASSERT(sizeof(shared_test_data_t) <= info.size);
+ CU_ASSERT((info.page_size == odp_sys_huge_page_size()) ||
+ (info.page_size == odp_sys_page_size()))
odp_shm_print_all();
fflush(stdout);
return CU_get_number_of_failures();
}
-void shmem_test_odp_shm_sunnyday(void)
+/*
+ * test basic things: shmem creation, info, share, and free
+ */
+void shmem_test_basic(void)
{
pthrd_arg thrdarg;
odp_shm_t shm;
- test_shared_data_t *test_shared_data;
+ odp_shm_t shm2;
+ shared_test_data_t *shared_test_data;
odp_cpumask_t unused;
- shm = odp_shm_reserve(TESTNAME,
- sizeof(test_shared_data_t), ALIGE_SIZE, 0);
+ shm = odp_shm_reserve(MEM_NAME,
+ sizeof(shared_test_data_t), ALIGN_SIZE, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
- CU_ASSERT(odp_shm_to_u64(shm) != odp_shm_to_u64(ODP_SHM_INVALID));
+ CU_ASSERT(odp_shm_to_u64(shm) !=
+ odp_shm_to_u64(ODP_SHM_INVALID));
+
+ /* also check that another reserve with same name is accepted: */
+ shm2 = odp_shm_reserve(MEM_NAME,
+ sizeof(shared_test_data_t), ALIGN_SIZE, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm2);
+ CU_ASSERT(odp_shm_to_u64(shm2) !=
+ odp_shm_to_u64(ODP_SHM_INVALID));
CU_ASSERT(0 == odp_shm_free(shm));
- CU_ASSERT(ODP_SHM_INVALID == odp_shm_lookup(TESTNAME));
+ CU_ASSERT(0 == odp_shm_free(shm2));
+ CU_ASSERT(ODP_SHM_INVALID == odp_shm_lookup(MEM_NAME));
- shm = odp_shm_reserve(TESTNAME,
- sizeof(test_shared_data_t), ALIGE_SIZE, 0);
+ shm = odp_shm_reserve(MEM_NAME,
+ sizeof(shared_test_data_t), ALIGN_SIZE, 0);
CU_ASSERT(ODP_SHM_INVALID != shm);
- test_shared_data = odp_shm_addr(shm);
- CU_ASSERT_FATAL(NULL != test_shared_data);
- test_shared_data->foo = TEST_SHARE_FOO;
- test_shared_data->bar = TEST_SHARE_BAR;
+ shared_test_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != shared_test_data);
+ shared_test_data->foo = TEST_SHARE_FOO;
+ shared_test_data->bar = TEST_SHARE_BAR;
+
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+
+ if (thrdarg.numthrds > MAX_WORKERS)
+ thrdarg.numthrds = MAX_WORKERS;
+
+ odp_barrier_init(&shared_test_data->test_barrier1, thrdarg.numthrds);
+ odp_cunit_thread_create(run_test_basic_thread, &thrdarg);
+ CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+
+ CU_ASSERT(0 == odp_shm_free(shm));
+}
+
+/*
+ * thread part for the shmem_test_reserve_after_fork
+ */
+static int run_test_reserve_after_fork(void *arg ODP_UNUSED)
+{
+ odp_shm_t shm;
+ shared_test_data_t *glob_data;
+ int thr;
+ int thr_index;
+ int size;
+ shared_test_data_small_t *pattern_small;
+ shared_test_data_medium_t *pattern_medium;
+ shared_test_data_big_t *pattern_big;
+ int i;
+
+ thr = odp_thread_id();
+ printf("Thread %i starts\n", thr);
+
+ shm = odp_shm_lookup(MEM_NAME);
+ glob_data = odp_shm_addr(shm);
+
+ /*
+ * odp_thread_id are not guaranteed to be consecutive, so we create
+ * a consecutive ID
+ */
+ thr_index = odp_atomic_fetch_inc_u32(&glob_data->index);
+
+ /* allocate some memory (of different sizes) and fill with pattern */
+ snprintf(glob_data->name[thr_index], NAME_LEN, "%s-%09d",
+ MEM_NAME, thr_index);
+ switch (thr_index % 3) {
+ case 0:
+ size = sizeof(shared_test_data_small_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size, 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_small = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ for (i = 0; i < SMALL_MEM; i++)
+ pattern_small->data[i] = i;
+ break;
+ case 1:
+ size = sizeof(shared_test_data_medium_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size, 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_medium = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ for (i = 0; i < MEDIUM_MEM; i++)
+ pattern_medium->data[i] = (i << 2);
+ break;
+ case 2:
+ size = sizeof(shared_test_data_big_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size, 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_big = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ for (i = 0; i < BIG_MEM; i++)
+ pattern_big->data[i] = (i >> 2);
+ break;
+ }
+
+ /* print block address */
+ printf("In thread: Block index: %d mapped at %lx\n",
+ thr_index, (long int)odp_shm_addr(shm));
+
+ odp_barrier_wait(&glob_data->test_barrier1);
+ odp_barrier_wait(&glob_data->test_barrier2);
+
+ fflush(stdout);
+ return CU_get_number_of_failures();
+}
+
+/*
+ * test sharing memory reserved after odp_thread creation (e.g. fork()):
+ */
+void shmem_test_reserve_after_fork(void)
+{
+ pthrd_arg thrdarg;
+ odp_shm_t shm;
+ odp_shm_t thr_shm;
+ shared_test_data_t *glob_data;
+ odp_cpumask_t unused;
+ int thr_index;
+ int i;
+ void *address;
+ shared_test_data_small_t *pattern_small;
+ shared_test_data_medium_t *pattern_medium;
+ shared_test_data_big_t *pattern_big;
+
+ shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t), 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(glob_data);
+
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ if (thrdarg.numthrds > MAX_WORKERS)
+ thrdarg.numthrds = MAX_WORKERS;
+
+ odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
+ odp_atomic_store_u32(&glob_data->index, 0);
+
+ odp_cunit_thread_create(run_test_reserve_after_fork, &thrdarg);
+
+ /* wait until all threads have made their shm_reserve: */
+ odp_barrier_wait(&glob_data->test_barrier1);
+
+ /* perform a lookup of all memories: */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
+ CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
+ }
+
+ /* check that the patterns are correct: */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ switch (thr_index % 3) {
+ case 0:
+ pattern_small =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ for (i = 0; i < SMALL_MEM; i++)
+ CU_ASSERT(pattern_small->data[i] == i);
+ break;
+ case 1:
+ pattern_medium =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ for (i = 0; i < MEDIUM_MEM; i++)
+ CU_ASSERT(pattern_medium->data[i] == (i << 2));
+ break;
+ case 2:
+ pattern_big =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ for (i = 0; i < BIG_MEM; i++)
+ CU_ASSERT(pattern_big->data[i] == (i >> 2));
+ break;
+ }
+ }
+
+ /*
+ * print the mapping address of the blocks
+ */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ address = odp_shm_addr(glob_data->shm[thr_index]);
+ printf("In main Block index: %d mapped at %lx\n",
+ thr_index, (long int)address);
+ }
+
+ /* unblock the threads and let them terminate (no free is done): */
+ odp_barrier_wait(&glob_data->test_barrier2);
+
+ /* at the same time, (race),free of all memories: */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ thr_shm = glob_data->shm[thr_index];
+ CU_ASSERT(odp_shm_free(thr_shm) == 0);
+ }
+
+ /* wait for all thread endings: */
+ CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+
+ /* just glob_data should remain: */
+
+ CU_ASSERT(0 == odp_shm_free(shm));
+}
+
+/*
+ * thread part for the shmem_test_singleva_after_fork
+ */
+static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
+{
+ odp_shm_t shm;
+ shared_test_data_t *glob_data;
+ int thr;
+ int thr_index;
+ int size;
+ shared_test_data_small_t *pattern_small;
+ shared_test_data_medium_t *pattern_medium;
+ shared_test_data_big_t *pattern_big;
+ uint32_t i;
+ int ret;
+
+ thr = odp_thread_id();
+ printf("Thread %i starts\n", thr);
+
+ shm = odp_shm_lookup(MEM_NAME);
+ glob_data = odp_shm_addr(shm);
+
+ /*
+ * odp_thread_id are not guaranteed to be consecutive, so we create
+ * a consecutive ID
+ */
+ thr_index = odp_atomic_fetch_inc_u32(&glob_data->index);
+
+ /* allocate some memory (of different sizes) and fill with pattern */
+ snprintf(glob_data->name[thr_index], NAME_LEN, "%s-%09d",
+ MEM_NAME, thr_index);
+ switch (thr_index % 3) {
+ case 0:
+ size = sizeof(shared_test_data_small_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size,
+ 0, ODP_SHM_SINGLE_VA);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_small = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ glob_data->address[thr_index] = (void *)pattern_small;
+ for (i = 0; i < SMALL_MEM; i++)
+ pattern_small->data[i] = i;
+ break;
+ case 1:
+ size = sizeof(shared_test_data_medium_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size,
+ 0, ODP_SHM_SINGLE_VA);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_medium = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ glob_data->address[thr_index] = (void *)pattern_medium;
+ for (i = 0; i < MEDIUM_MEM; i++)
+ pattern_medium->data[i] = (i << 2);
+ break;
+ case 2:
+ size = sizeof(shared_test_data_big_t);
+ shm = odp_shm_reserve(glob_data->name[thr_index], size,
+ 0, ODP_SHM_SINGLE_VA);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data->shm[thr_index] = shm;
+ pattern_big = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ glob_data->address[thr_index] = (void *)pattern_big;
+ for (i = 0; i < BIG_MEM; i++)
+ pattern_big->data[i] = (i >> 2);
+ break;
+ }
+
+ /* print block address */
+ printf("In thread: Block index: %d mapped at %lx\n",
+ thr_index, (long int)odp_shm_addr(shm));
+
+ odp_barrier_wait(&glob_data->test_barrier1);
+ odp_barrier_wait(&glob_data->test_barrier2);
+
+ /* map each-other block, checking common address: */
+ for (i = 0; i < glob_data->nb_threads; i++) {
+ shm = odp_shm_lookup(glob_data->name[i]);
+ CU_ASSERT(shm == glob_data->shm[i]);
+ CU_ASSERT(odp_shm_addr(shm) == glob_data->address[i]);
+ }
+
+ /* wait for main control task and free the allocated block */
+ odp_barrier_wait(&glob_data->test_barrier3);
+ odp_barrier_wait(&glob_data->test_barrier4);
+ ret = odp_shm_free(glob_data->shm[thr_index]);
+ CU_ASSERT(ret == 0);
+
+ fflush(stdout);
+ return CU_get_number_of_failures();
+}
+
+/*
+ * test sharing memory reserved after odp_thread creation (e.g. fork()):
+ * with single VA flag.
+ */
+void shmem_test_singleva_after_fork(void)
+{
+ pthrd_arg thrdarg;
+ odp_shm_t shm;
+ odp_shm_t thr_shm;
+ shared_test_data_t *glob_data;
+ odp_cpumask_t unused;
+ int thr_index;
+ int i;
+ void *address;
+ shared_test_data_small_t *pattern_small;
+ shared_test_data_medium_t *pattern_medium;
+ shared_test_data_big_t *pattern_big;
+
+ shm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t),
+ 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != shm);
+ glob_data = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(glob_data);
thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ if (thrdarg.numthrds > MAX_WORKERS)
+ thrdarg.numthrds = MAX_WORKERS;
+
+ glob_data->nb_threads = thrdarg.numthrds;
+ odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier2, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier3, thrdarg.numthrds + 1);
+ odp_barrier_init(&glob_data->test_barrier4, thrdarg.numthrds + 1);
+ odp_atomic_store_u32(&glob_data->index, 0);
+
+ odp_cunit_thread_create(run_test_singleva_after_fork, &thrdarg);
+
+ /* wait until all threads have made their shm_reserve: */
+ odp_barrier_wait(&glob_data->test_barrier1);
+
+ /* perform a lookup of all memories: */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ thr_shm = odp_shm_lookup(glob_data->name[thr_index]);
+ CU_ASSERT(thr_shm == glob_data->shm[thr_index]);
+ }
+
+ /* check that the patterns are correct: */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ switch (thr_index % 3) {
+ case 0:
+ pattern_small =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ for (i = 0; i < SMALL_MEM; i++)
+ CU_ASSERT(pattern_small->data[i] == i);
+ break;
+ case 1:
+ pattern_medium =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ for (i = 0; i < MEDIUM_MEM; i++)
+ CU_ASSERT(pattern_medium->data[i] == (i << 2));
+ break;
+ case 2:
+ pattern_big =
+ odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ for (i = 0; i < BIG_MEM; i++)
+ CU_ASSERT(pattern_big->data[i] == (i >> 2));
+ break;
+ }
+ }
+
+ /*
+ * check that the mapping address is common to all (SINGLE_VA):
+ */
+ for (thr_index = 0; thr_index < thrdarg.numthrds; thr_index++) {
+ address = odp_shm_addr(glob_data->shm[thr_index]);
+ CU_ASSERT(glob_data->address[thr_index] == address);
+ }
+
+ /* unblock the threads and let them map each-other blocks: */
+ odp_barrier_wait(&glob_data->test_barrier2);
+
+ /* then check mem status */
+ odp_barrier_wait(&glob_data->test_barrier3);
+
+ /* unblock the threads and let them free all thread blocks: */
+ odp_barrier_wait(&glob_data->test_barrier4);
+
+ /* wait for all thread endings: */
+ CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+
+ /* just glob_data should remain: */
+
+ CU_ASSERT(0 == odp_shm_free(shm));
+}
+
+/*
+ * thread part for the shmem_test_stress
+ */
+static int run_test_stress(void *arg ODP_UNUSED)
+{
+ odp_shm_t shm;
+ uint8_t *address;
+ shared_test_data_t *glob_data;
+ uint8_t random_bytes[STRESS_RANDOM_SZ];
+ uint32_t index;
+ uint32_t size;
+ uint64_t align;
+ uint32_t flags;
+ uint8_t data;
+ uint32_t iter;
+ uint32_t i;
+
+ shm = odp_shm_lookup(MEM_NAME);
+ glob_data = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(glob_data);
+
+ /* wait for general GO! */
+ odp_barrier_wait(&glob_data->test_barrier1);
+
+ /*
+ * at each iteration: pick up a random index for
+ * glob_data->stress[index]: If the entry is free, allocated mem
+ * randomly. If it is already allocated, make checks and free it:
+ * Note that different tread can allocate or free a given block
+ */
+ for (iter = 0; iter < STRESS_ITERATION; iter++) {
+ /* get 4 random bytes from which index, size ,align, flags
+ * and data will be derived:
+ */
+ odp_random_data(random_bytes, STRESS_RANDOM_SZ, 0);
+ index = random_bytes[0] & (STRESS_SIZE - 1);
+ odp_spinlock_lock(&glob_data->stress_lock);
+
+ switch (glob_data->stress[index].state) {
+ case STRESS_FREE:
+ /* allocated a new block for this entry */
+
+ glob_data->stress[index].state = STRESS_BUSY;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+
+ size = (random_bytes[1] + 1) << 6; /* up to 16Kb */
+ /* we just play with the VA flag. randomly setting
+ * the mlock flag may exceed user ulimit -l
+ */
+ flags = random_bytes[2] & ODP_SHM_SINGLE_VA;
+ align = (random_bytes[3] + 1) << 6;/* up to 16Kb */
+ data = random_bytes[4];
+
+ snprintf(glob_data->stress[index].name, NAME_LEN,
+ "%s-%09d", MEM_NAME, index);
+ shm = odp_shm_reserve(glob_data->stress[index].name,
+ size, align, flags);
+ glob_data->stress[index].shm = shm;
+ if (shm == ODP_SHM_INVALID) { /* out of mem ? */
+ odp_spinlock_lock(&glob_data->stress_lock);
+ glob_data->stress[index].state = STRESS_ALLOC;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+ continue;
+ }
+
+ address = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(address);
+ glob_data->stress[index].address = address;
+ glob_data->stress[index].flags = flags;
+ glob_data->stress[index].size = size;
+ glob_data->stress[index].align = align;
+ glob_data->stress[index].data_val = data;
+
+ /* write some data: writing each byte would be a
+ * waste of time: just make sure each page is reached */
+ for (i = 0; i < size; i += 256)
+ address[i] = (data++) & 0xFF;
+ odp_spinlock_lock(&glob_data->stress_lock);
+ glob_data->stress[index].state = STRESS_ALLOC;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+
+ break;
+
+ case STRESS_ALLOC:
+ /* free the block for this entry */
+
+ glob_data->stress[index].state = STRESS_BUSY;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+ shm = glob_data->stress[index].shm;
+
+ if (shm == ODP_SHM_INVALID) { /* out of mem ? */
+ odp_spinlock_lock(&glob_data->stress_lock);
+ glob_data->stress[index].state = STRESS_FREE;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+ continue;
+ }
+
+ CU_ASSERT(odp_shm_lookup(glob_data->stress[index].name)
+ != 0);
+
+ address = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL(address);
+
+ align = glob_data->stress[index].align;
+ if (align) {
+ align = glob_data->stress[index].align;
+ CU_ASSERT(((uintptr_t)address & (align - 1))
+ == 0)
+ }
+
+ flags = glob_data->stress[index].flags;
+ if (flags & ODP_SHM_SINGLE_VA)
+ CU_ASSERT(glob_data->stress[index].address ==
+ address)
+
+ /* check that data is reachable and correct: */
+ data = glob_data->stress[index].data_val;
+ size = glob_data->stress[index].size;
+ for (i = 0; i < size; i += 256) {
+ CU_ASSERT(address[i] == (data & 0xFF));
+ data++;
+ }
+
+ CU_ASSERT(!odp_shm_free(glob_data->stress[index].shm));
+
+ odp_spinlock_lock(&glob_data->stress_lock);
+ glob_data->stress[index].state = STRESS_FREE;
+ odp_spinlock_unlock(&glob_data->stress_lock);
+
+ break;
+
+ case STRESS_BUSY:
+ default:
+ odp_spinlock_unlock(&glob_data->stress_lock);
+ break;
+ }
+ }
+
+ fflush(stdout);
+ return CU_get_number_of_failures();
+}
+
+/*
+ * stress tests
+ */
+void shmem_test_stress(void)
+{
+ pthrd_arg thrdarg;
+ odp_shm_t shm;
+ odp_shm_t globshm;
+ shared_test_data_t *glob_data;
+ odp_cpumask_t unused;
+ uint32_t i;
+
+ globshm = odp_shm_reserve(MEM_NAME, sizeof(shared_test_data_t),
+ 0, 0);
+ CU_ASSERT(ODP_SHM_INVALID != globshm);
+ glob_data = odp_shm_addr(globshm);
+ CU_ASSERT_PTR_NOT_NULL(glob_data);
+
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
if (thrdarg.numthrds > MAX_WORKERS)
thrdarg.numthrds = MAX_WORKERS;
- odp_barrier_init(&test_barrier, thrdarg.numthrds);
- odp_cunit_thread_create(run_shm_thread, &thrdarg);
+ glob_data->nb_threads = thrdarg.numthrds;
+ odp_barrier_init(&glob_data->test_barrier1, thrdarg.numthrds);
+ odp_spinlock_init(&glob_data->stress_lock);
+
+ /* before starting the threads, mark all entries as free: */
+ for (i = 0; i < STRESS_SIZE; i++)
+ glob_data->stress[i].state = STRESS_FREE;
+
+ /* create threads */
+ odp_cunit_thread_create(run_test_stress, &thrdarg);
+
+ /* wait for all thread endings: */
CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+
+ /* release left overs: */
+ for (i = 0; i < STRESS_SIZE; i++) {
+ shm = glob_data->stress[i].shm;
+ if ((glob_data->stress[i].state == STRESS_ALLOC) &&
+ (glob_data->stress[i].shm != ODP_SHM_INVALID)) {
+ CU_ASSERT(odp_shm_lookup(glob_data->stress[i].name) ==
+ shm);
+ CU_ASSERT(!odp_shm_free(shm));
+ }
+ }
+
+ CU_ASSERT(0 == odp_shm_free(globshm));
+
+ /* check that no memory is left over: */
}
odp_testinfo_t shmem_suite[] = {
- ODP_TEST_INFO(shmem_test_odp_shm_sunnyday),
+ ODP_TEST_INFO(shmem_test_basic),
+ ODP_TEST_INFO(shmem_test_reserve_after_fork),
+ ODP_TEST_INFO(shmem_test_singleva_after_fork),
+ ODP_TEST_INFO(shmem_test_stress),
ODP_TEST_INFO_NULL,
};
diff --git a/test/common_plat/validation/api/shmem/shmem.h b/test/common_plat/validation/api/shmem/shmem.h
index a5893d931..092aa8005 100644
--- a/test/common_plat/validation/api/shmem/shmem.h
+++ b/test/common_plat/validation/api/shmem/shmem.h
@@ -10,7 +10,10 @@
#include <odp_cunit_common.h>
/* test functions: */
-void shmem_test_odp_shm_sunnyday(void);
+void shmem_test_basic(void);
+void shmem_test_reserve_after_fork(void);
+void shmem_test_singleva_after_fork(void);
+void shmem_test_stress(void);
/* test arrays: */
extern odp_testinfo_t shmem_suite[];
diff --git a/test/common_plat/validation/api/timer/timer.c b/test/common_plat/validation/api/timer/timer.c
index 0007639cc..1945afaa3 100644
--- a/test/common_plat/validation/api/timer/timer.c
+++ b/test/common_plat/validation/api/timer/timer.c
@@ -156,7 +156,7 @@ void timer_test_odp_timer_cancel(void)
tparam.num_timers = 1;
tparam.priv = 0;
tparam.clk_src = ODP_CLOCK_CPU;
- tp = odp_timer_pool_create("timer_pool0", &tparam);
+ tp = odp_timer_pool_create(NULL, &tparam);
if (tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
diff --git a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
index fcc71876c..027175807 100644
--- a/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
+++ b/test/common_plat/validation/api/traffic_mngr/traffic_mngr.c
@@ -944,8 +944,8 @@ static void dump_rcvd_pkts(uint32_t first_rcv_idx, uint32_t last_rcv_idx)
odp_packet_t rcv_pkt;
uint32_t rcv_idx;
int32_t xmt_idx;
- uint16_t unique_id;
- uint8_t is_ipv4;
+ uint16_t unique_id = 0;
+ uint8_t is_ipv4 = 0;
int rc;
for (rcv_idx = first_rcv_idx; rcv_idx <= last_rcv_idx; rcv_idx++) {
diff --git a/test/linux-generic/pktio_ipc/ipc_common.c b/test/linux-generic/pktio_ipc/ipc_common.c
index 387c92141..85cbc8b41 100644
--- a/test/linux-generic/pktio_ipc/ipc_common.c
+++ b/test/linux-generic/pktio_ipc/ipc_common.c
@@ -8,7 +8,8 @@
/** Run time in seconds */
int run_time_sec;
-int ipc_name_space;
+/** Pid of the master process */
+int master_pid;
int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
odp_packet_t pkt_tbl[], int num)
@@ -33,6 +34,7 @@ int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
while (sent != num) {
ret = odp_pktout_send(pktout, &pkt_tbl[sent], num - sent);
if (ret < 0) {
+ EXAMPLE_ERR("odp_pktout_send return %d\n", ret);
for (i = sent; i < num; i++)
odp_packet_free(pkt_tbl[i]);
return -1;
@@ -43,6 +45,7 @@ int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
if (odp_time_cmp(end_time, odp_time_local()) < 0) {
for (i = sent; i < num; i++)
odp_packet_free(pkt_tbl[i]);
+ EXAMPLE_ERR("Send Timeout!\n");
return -1;
}
}
@@ -50,17 +53,25 @@ int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
return 0;
}
-odp_pktio_t create_pktio(odp_pool_t pool)
+odp_pktio_t create_pktio(odp_pool_t pool, int master_pid)
{
odp_pktio_param_t pktio_param;
odp_pktio_t ipc_pktio;
+ char name[30];
odp_pktio_param_init(&pktio_param);
- printf("pid: %d, create IPC pktio\n", getpid());
- ipc_pktio = odp_pktio_open("ipc_pktio", pool, &pktio_param);
- if (ipc_pktio == ODP_PKTIO_INVALID)
- EXAMPLE_ABORT("Error: ipc pktio create failed.\n");
+ if (master_pid)
+ sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
+ else
+ sprintf(name, TEST_IPC_PKTIO_NAME);
+
+ printf("pid: %d, create IPC pktio %s\n", getpid(), name);
+ ipc_pktio = odp_pktio_open(name, pool, &pktio_param);
+ if (ipc_pktio == ODP_PKTIO_INVALID) {
+ EXAMPLE_ERR("Error: ipc pktio %s create failed.\n", name);
+ return ODP_PKTIO_INVALID;
+ }
if (odp_pktin_queue_config(ipc_pktio, NULL)) {
EXAMPLE_ERR("Input queue config failed\n");
@@ -88,16 +99,16 @@ void parse_args(int argc, char *argv[])
int long_index;
static struct option longopts[] = {
{"time", required_argument, NULL, 't'},
- {"ns", required_argument, NULL, 'n'}, /* ipc name space */
+ {"pid", required_argument, NULL, 'p'}, /* master process pid */
{"help", no_argument, NULL, 'h'}, /* return 'h' */
{NULL, 0, NULL, 0}
};
run_time_sec = 0; /* loop forever if time to run is 0 */
- ipc_name_space = 0;
+ master_pid = 0;
while (1) {
- opt = getopt_long(argc, argv, "+t:n:h",
+ opt = getopt_long(argc, argv, "+t:p:h",
longopts, &long_index);
if (opt == -1)
@@ -107,24 +118,18 @@ void parse_args(int argc, char *argv[])
case 't':
run_time_sec = atoi(optarg);
break;
- case 'n':
- ipc_name_space = atoi(optarg);
+ case 'p':
+ master_pid = atoi(optarg);
break;
case 'h':
+ default:
usage(argv[0]);
exit(EXIT_SUCCESS);
break;
- default:
- break;
}
}
optind = 1; /* reset 'extern optind' from the getopt lib */
-
- if (!ipc_name_space) {
- usage(argv[0]);
- exit(1);
- }
}
/**
diff --git a/test/linux-generic/pktio_ipc/ipc_common.h b/test/linux-generic/pktio_ipc/ipc_common.h
index 99276b599..8804994e1 100644
--- a/test/linux-generic/pktio_ipc/ipc_common.h
+++ b/test/linux-generic/pktio_ipc/ipc_common.h
@@ -30,7 +30,7 @@
/** @def SHM_PKT_POOL_BUF_SIZE
* @brief Buffer size of the packet pool buffer
*/
-#define SHM_PKT_POOL_BUF_SIZE 1856
+#define SHM_PKT_POOL_BUF_SIZE 100
/** @def MAX_PKT_BURST
* @brief Maximum number of packet bursts
@@ -46,6 +46,12 @@
#define TEST_ALLOC_MAGIC 0x1234adcd
+#define TEST_IPC_PKTIO_NAME "ipc:ipktio"
+#define TEST_IPC_PKTIO_PID_NAME "ipc:%d:ipktio"
+
+/** Can be any name, same or not the same. */
+#define TEST_IPC_POOL_NAME "ipc_packet_pool"
+
/** magic number and sequence at start of packet payload */
typedef struct ODP_PACKED {
odp_u32be_t magic;
@@ -63,8 +69,8 @@ char *pktio_name;
/** Run time in seconds */
int run_time_sec;
-/** IPC name space id /dev/shm/odp-nsid-objname */
-int ipc_name_space;
+/** PID of the master process */
+int master_pid;
/* helper funcs */
void parse_args(int argc, char *argv[]);
@@ -75,11 +81,12 @@ void usage(char *progname);
* Create a ipc pktio handle.
*
* @param pool Pool to associate with device for packet RX/TX
+ * @param master_pid Pid of master process
*
* @return The handle of the created pktio object.
* @retval ODP_PKTIO_INVALID if the create fails.
*/
-odp_pktio_t create_pktio(odp_pool_t pool);
+odp_pktio_t create_pktio(odp_pool_t pool, int master_pid);
/** Spin and send all packet from table
*
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc1.c b/test/linux-generic/pktio_ipc/pktio_ipc1.c
index 5c1da2368..705c205db 100644
--- a/test/linux-generic/pktio_ipc/pktio_ipc1.c
+++ b/test/linux-generic/pktio_ipc/pktio_ipc1.c
@@ -23,9 +23,8 @@
*/
static int pktio_run_loop(odp_pool_t pool)
{
- int thr;
int pkts;
- odp_pktio_t ipc_pktio;
+ odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
odp_packet_t pkt_tbl[MAX_PKT_BURST];
uint64_t cnt = 0; /* increasing counter on each send packet */
uint64_t cnt_recv = 0; /* increasing counter to validate
@@ -42,22 +41,38 @@ static int pktio_run_loop(odp_pool_t pool)
odp_time_t wait;
int ret;
odp_pktin_queue_t pktin;
+ char name[30];
- thr = odp_thread_id();
-
- ipc_pktio = odp_pktio_lookup("ipc_pktio");
- if (ipc_pktio == ODP_PKTIO_INVALID) {
- EXAMPLE_ERR(" [%02i] Error: lookup of pktio %s failed\n",
- thr, "ipc_pktio");
- return -2;
- }
- printf(" [%02i] looked up ipc_pktio:%02" PRIu64 ", burst mode\n",
- thr, odp_pktio_to_u64(ipc_pktio));
+ if (master_pid)
+ sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
+ else
+ sprintf(name, TEST_IPC_PKTIO_NAME);
wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
start_cycle = odp_time_local();
current_cycle = start_cycle;
+ for (;;) {
+ if (run_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit, run_time_sec %d\n",
+ run_time_sec);
+ return -1;
+ }
+ }
+
+ ipc_pktio = create_pktio(pool, master_pid);
+ if (ipc_pktio != ODP_PKTIO_INVALID)
+ break;
+ if (!master_pid)
+ break;
+ }
+
+ if (ipc_pktio == ODP_PKTIO_INVALID)
+ return -1;
+
if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
EXAMPLE_ERR("no input queue\n");
return -1;
@@ -110,8 +125,12 @@ static int pktio_run_loop(odp_pool_t pool)
size_t off;
off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID)
- EXAMPLE_ABORT("invalid l4 offset\n");
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ EXAMPLE_ERR("invalid l4 offset\n");
+ }
off += ODPH_UDPHDR_LEN;
ret = odp_packet_copy_to_mem(pkt, off,
@@ -279,17 +298,13 @@ int main(int argc, char *argv[])
odp_pool_t pool;
odp_pool_param_t params;
odp_instance_t instance;
- odp_platform_init_t plat_idata;
int ret;
/* Parse and store the application arguments */
parse_args(argc, argv);
- memset(&plat_idata, 0, sizeof(odp_platform_init_t));
- plat_idata.ipc_ns = ipc_name_space;
-
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, &plat_idata)) {
+ if (odp_init_global(&instance, NULL, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -310,7 +325,7 @@ int main(int argc, char *argv[])
params.pkt.num = SHM_PKT_POOL_SIZE;
params.type = ODP_POOL_PACKET;
- pool = odp_pool_create("packet_pool1", &params);
+ pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
if (pool == ODP_POOL_INVALID) {
EXAMPLE_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
@@ -318,8 +333,6 @@ int main(int argc, char *argv[])
odp_pool_print(pool);
- create_pktio(pool);
-
ret = pktio_run_loop(pool);
if (odp_pool_destroy(pool)) {
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc2.c b/test/linux-generic/pktio_ipc/pktio_ipc2.c
index 5c1f142b6..daf384137 100644
--- a/test/linux-generic/pktio_ipc/pktio_ipc2.c
+++ b/test/linux-generic/pktio_ipc/pktio_ipc2.c
@@ -16,9 +16,9 @@
#include "ipc_common.h"
-static int ipc_second_process(void)
+static int ipc_second_process(int master_pid)
{
- odp_pktio_t ipc_pktio;
+ odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
odp_pool_param_t params;
odp_pool_t pool;
odp_packet_t pkt_tbl[MAX_PKT_BURST];
@@ -40,18 +40,41 @@ static int ipc_second_process(void)
params.pkt.num = SHM_PKT_POOL_SIZE;
params.type = ODP_POOL_PACKET;
- pool = odp_pool_create("packet_pool2", &params);
+ pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
if (pool == ODP_POOL_INVALID) {
EXAMPLE_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
}
- ipc_pktio = create_pktio(pool);
-
wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
start_cycle = odp_time_local();
+ for (;;) {
+ /* exit loop if time specified */
+ if (run_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit, run_time_sec %d\n",
+ run_time_sec);
+ goto not_started;
+ }
+ }
+
+ ipc_pktio = create_pktio(pool, master_pid);
+ if (ipc_pktio != ODP_PKTIO_INVALID)
+ break;
+ if (!master_pid)
+ break;
+ }
+
+ if (ipc_pktio == ODP_PKTIO_INVALID) {
+ odp_pool_destroy(pool);
+ return -1;
+ }
+
if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
+ odp_pool_destroy(pool);
EXAMPLE_ERR("no input queue\n");
return -1;
}
@@ -97,8 +120,12 @@ static int ipc_second_process(void)
size_t off;
off = odp_packet_l4_offset(pkt);
- if (off == ODP_PACKET_OFFSET_INVALID)
- EXAMPLE_ABORT("invalid l4 offset\n");
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ EXAMPLE_ERR("invalid l4 offset\n");
+ for (int j = i; j < pkts; j++)
+ odp_packet_free(pkt_tbl[j]);
+ break;
+ }
off += ODPH_UDPHDR_LEN;
ret = odp_packet_copy_to_mem(pkt, off, sizeof(head),
@@ -106,8 +133,12 @@ static int ipc_second_process(void)
if (ret)
EXAMPLE_ABORT("unable copy out head data");
- if (head.magic != TEST_SEQ_MAGIC)
- EXAMPLE_ABORT("Wrong head magic!");
+ if (head.magic != TEST_SEQ_MAGIC) {
+ EXAMPLE_ERR("Wrong head magic! %x", head.magic);
+ for (int j = i; j < pkts; j++)
+ odp_packet_free(pkt_tbl[j]);
+ break;
+ }
/* Modify magic number in packet */
head.magic = TEST_SEQ_MAGIC_2;
@@ -118,7 +149,7 @@ static int ipc_second_process(void)
}
/* send all packets back */
- ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, pkts);
+ ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, i);
if (ret < 0)
EXAMPLE_ABORT("can not send packets\n");
@@ -176,16 +207,12 @@ not_started:
int main(int argc, char *argv[])
{
odp_instance_t instance;
- odp_platform_init_t plat_idata;
int ret;
/* Parse and store the application arguments */
parse_args(argc, argv);
- memset(&plat_idata, 0, sizeof(odp_platform_init_t));
- plat_idata.ipc_ns = ipc_name_space;
-
- if (odp_init_global(&instance, NULL, &plat_idata)) {
+ if (odp_init_global(&instance, NULL, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -196,7 +223,7 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
- ret = ipc_second_process();
+ ret = ipc_second_process(master_pid);
if (odp_term_local()) {
EXAMPLE_ERR("Error: odp_term_local() failed.\n");
diff --git a/test/linux-generic/pktio_ipc/pktio_ipc_run.sh b/test/linux-generic/pktio_ipc/pktio_ipc_run.sh
index bd64bafc2..52e8d42a0 100755
--- a/test/linux-generic/pktio_ipc/pktio_ipc_run.sh
+++ b/test/linux-generic/pktio_ipc/pktio_ipc_run.sh
@@ -20,55 +20,63 @@ PATH=.:$PATH
run()
{
local ret=0
- IPC_NS=`expr $$ + 5000`
- IPC_NS=`expr ${IPC_NS} % 65000`
- IPC_NS=`expr ${IPC_NS} + 2`
- echo "Using ns ${IPC_NS}"
-
#if test was interrupted with CTRL+c than files
#might remain in shm. Needed cleanely delete them.
- rm -rf /dev/shm/odp-${IPC_NS}* 2>&1 > /dev/null
+ rm -rf /tmp/odp-* 2>&1 > /dev/null
echo "==== run pktio_ipc1 then pktio_ipc2 ===="
- pktio_ipc1${EXEEXT} -n ${IPC_NS} -t 30 &
+ pktio_ipc1${EXEEXT} -t 10 &
IPC_PID=$!
- pktio_ipc2${EXEEXT} -n ${IPC_NS} -t 10
+ pktio_ipc2${EXEEXT} -p ${IPC_PID} -t 5
ret=$?
# pktio_ipc1 should do clean up and exit just
# after pktio_ipc2 exited. If it does not happen
# kill him in test.
- sleep 1
- kill ${IPC_PID} 2>&1 > /dev/null
+ sleep 13
+ (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
if [ $? -eq 0 ]; then
- rm -rf /dev/shm/odp-${IPC_NS}* 2>&1 > /dev/null
+ echo "pktio_ipc1${EXEEXT} was killed"
+ ls -l /tmp/odp* 2> /dev/null
+ rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
+ else
+ echo "normal exit of 2 application"
+ ls -l /tmp/odp* 2> /dev/null
fi
if [ $ret -ne 0 ]; then
echo "!!!First stage FAILED $ret!!!"
- ls -l /dev/shm/
exit $ret
else
echo "First stage PASSED"
fi
-
echo "==== run pktio_ipc2 then pktio_ipc1 ===="
- IPC_NS=`expr $IPC_NS - 1`
- echo "Using ns ${IPC_NS}"
-
- pktio_ipc2${EXEEXT} -n ${IPC_NS} -t 10 &
+ pktio_ipc2${EXEEXT} -t 10 &
IPC_PID=$!
- pktio_ipc1${EXEEXT} -n ${IPC_NS} -t 20
+ pktio_ipc1${EXEEXT} -p ${IPC_PID} -t 5
ret=$?
- (kill ${IPC_PID} 2>&1 > /dev/null) > /dev/null || true
+ # pktio_ipc2 do not exit on pktio_ipc1 disconnect
+ # wait until it exits cleanly
+ sleep 13
+ (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "pktio_ipc2${EXEEXT} was killed"
+ ls -l /tmp/odp* 2> /dev/null
+ rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
+ else
+ echo "normal exit of 2 application"
+ ls -l /tmp/odp* 2> /dev/null
+ fi
if [ $ret -ne 0 ]; then
echo "!!! FAILED !!!"
- ls -l /dev/shm/
+ ls -l /tmp/odp* 2> /dev/null
+ rm -rf /tmp/odp-${IPC_PID}* 2>&1 > /dev/null
exit $ret
else
+ ls -l /tmp/odp* 2> /dev/null
echo "Second stage PASSED"
fi
diff --git a/test/linux-generic/validation/api/shmem/.gitignore b/test/linux-generic/validation/api/shmem/.gitignore
index 76270794c..74195f576 100644
--- a/test/linux-generic/validation/api/shmem/.gitignore
+++ b/test/linux-generic/validation/api/shmem/.gitignore
@@ -1,2 +1,3 @@
shmem_linux
-shmem_odp
+shmem_odp1
+shmem_odp2
diff --git a/test/linux-generic/validation/api/shmem/Makefile.am b/test/linux-generic/validation/api/shmem/Makefile.am
index 341747f81..b0ae62738 100644
--- a/test/linux-generic/validation/api/shmem/Makefile.am
+++ b/test/linux-generic/validation/api/shmem/Makefile.am
@@ -2,19 +2,27 @@ include ../Makefile.inc
#the main test program is shmem_linux, which, in turn, starts a shmem_odp:
test_PROGRAMS = shmem_linux$(EXEEXT)
-test_extra_PROGRAMS = shmem_odp$(EXEEXT)
+test_extra_PROGRAMS = shmem_odp1$(EXEEXT) shmem_odp2$(EXEEXT)
test_extradir = $(testdir)
#shmem_linux is stand alone, pure linux (no ODP):
dist_shmem_linux_SOURCES = shmem_linux.c
shmem_linux_LDFLAGS = $(AM_LDFLAGS) -lrt
-#shmem_odp is the odp part:
-dist_shmem_odp_SOURCES = shmem_odp.c
-shmem_odp_CFLAGS = $(AM_CFLAGS) \
+#shmem_odp1 and shmem_odp2 are the 2 ODP processes:
+dist_shmem_odp1_SOURCES = shmem_odp1.c
+shmem_odp1_CFLAGS = $(AM_CFLAGS) \
$(INCCUNIT_COMMON) \
$(INCODP)
-shmem_odp_LDFLAGS = $(AM_LDFLAGS)
-shmem_odp_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
+shmem_odp1_LDFLAGS = $(AM_LDFLAGS)
+shmem_odp1_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
-noinst_HEADERS = shmem_common.h shmem_linux.h shmem_odp.h
+dist_shmem_odp2_SOURCES = shmem_odp2.c
+shmem_odp2_CFLAGS = $(AM_CFLAGS) \
+ $(INCCUNIT_COMMON) \
+ $(INCODP)
+shmem_odp2_LDFLAGS = $(AM_LDFLAGS)
+shmem_odp2_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
+
+
+noinst_HEADERS = shmem_common.h shmem_linux.h shmem_odp1.h shmem_odp2.h
diff --git a/test/linux-generic/validation/api/shmem/shmem_linux.c b/test/linux-generic/validation/api/shmem/shmem_linux.c
index 212a6c13a..2f4c7628d 100644
--- a/test/linux-generic/validation/api/shmem/shmem_linux.c
+++ b/test/linux-generic/validation/api/shmem/shmem_linux.c
@@ -5,8 +5,10 @@
*/
/* this test makes sure that odp shared memory created with the ODP_SHM_PROC
- * flag is visible under linux. It therefore checks both that the device
- * name under /dev/shm is correct, and also checks that the memory contents
+ * flag is visible under linux, and checks that memory created with the
+ * ODP_SHM_EXPORT flag is visible by other ODP instances.
+ * It therefore checks both that the link
+ * name under /tmp is correct, and also checks that the memory contents
* is indeed shared.
* we want:
* -the odp test to run using C UNIT
@@ -15,18 +17,47 @@
*
* To achieve this, the flow of operations is as follows:
*
- * linux process (main, non odp) | ODP process
- * (shmem_linux.c) | (shmem_odp.c)
+ * linux process (main, non odp) |
+ * (shmem_linux.c) |
+ * |
+ * |
* |
* main() |
- * forks odp process | allocate shmem
- * wait for named pipe creation | populate shmem
+ * forks odp_app1 process |
+ * wait for named pipe creation |
+ * |
+ * | ODP_APP1 process
+ * | (shmem_odp1.c)
+ * |
+ * | allocate shmem
+ * | populate shmem
* | create named pipe
- * read shared memory | wait for test report in fifo
+ * | wait for test report in fifo...
+ * read shared memory |
* check if memory contents is OK |
- * if OK, write "S" in fifo, else "F" | report success or failure to C-Unit
- * wait for child terminaison & status| terminate with usual F/S status
+ * If not OK, write "F" in fifo and |
+ * exit with failure code. | -------------------
+ * |
+ * forks odp app2 process | ODP APP2 process
+ * wait for child terminaison & status| (shmem_odp2.c)
+ * | lookup ODP_APP1 shared memory,
+ * | check if memory contents is OK
+ * | Exit(0) on success, exit(1) on fail
+ * If child failed, write "F" in fifo |
+ * exit with failure code. | -------------------
+ * |
+ * OK, write "S" in fifo, |
+ * wait for child terminaison & status|
* terminate with same status as child|
+ * | ODP APP1 process
+ * | (shmem_odp1.c)
+ * |
+ * | ...(continued)
+ * | read S(success) or F(fail) from fifo
+ * | report success or failure to C-Unit
+ * | Exit(0) on success, exit(1) on fail
+ * wait for child terminaison & status |
+ * terminate with same status as child |
* |
* \|/
* time
@@ -45,12 +76,77 @@
#include <sys/mman.h>
#include <libgen.h>
#include <linux/limits.h>
+#include <inttypes.h>
#include "shmem_linux.h"
#include "shmem_common.h"
-#define ODP_APP_NAME "shmem_odp" /* name of the odp program, in this dir */
-#define DEVNAME_FMT "odp-%d-%s" /* shm device format: odp-<pid>-<name> */
-#define MAX_FIFO_WAIT 30 /* Max time waiting for the fifo (sec) */
+#define ODP_APP1_NAME "shmem_odp1" /* name of the odp1 program, in this dir */
+#define ODP_APP2_NAME "shmem_odp2" /* name of the odp2 program, in this dir */
+#define DEVNAME_FMT "/tmp/odp-%" PRIu64 "-shm-%s" /* odp-<pid>-shm-<name> */
+#define MAX_FIFO_WAIT 30 /* Max time waiting for the fifo (sec) */
+
+/*
+ * read the attributes of a externaly shared mem object:
+ * input: ext_odp_pid, blockname: the remote ODP instance and the exported
+ * block name to be searched.
+ * Output: filename: the memory block underlaying file to be opened
+ * (the given buffer should be big enough i.e. at
+ * least ISHM_FILENAME_MAXLEN bytes)
+ * The 3 following parameters are really here for debug
+ * as they are really meaningles in a non-odp process:
+ * len: the block real length (bytes, multiple of page sz)
+ * flags: the _ishm flags setting the block was created with
+ * align: the alignement setting the block was created with
+ *
+ * return 0 on success, non zero on error
+ */
+static int read_shmem_attribues(uint64_t ext_odp_pid, const char *blockname,
+ char *filename, uint64_t *len,
+ uint32_t *flags, uint64_t *user_len,
+ uint32_t *user_flags, uint32_t *align)
+{
+ char shm_attr_filename[PATH_MAX];
+ FILE *export_file;
+
+ sprintf(shm_attr_filename, DEVNAME_FMT, ext_odp_pid, blockname);
+
+ /* O_CREAT flag not given => failure if shm_attr_filename does not
+ * already exist */
+ export_file = fopen(shm_attr_filename, "r");
+ if (export_file == NULL)
+ return -1;
+
+ if (fscanf(export_file, "ODP exported shm block info: ") != 0)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "ishm_blockname: %*s ") != 0)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "file: %s ", filename) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "length: %" PRIu64 " ", len) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "flags: %" PRIu32 " ", flags) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "user_length: %" PRIu64 " ", user_len) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "user_flags: %" PRIu32 " ", user_flags) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "align: %" PRIu32 " ", align) != 1)
+ goto export_file_read_err;
+
+ fclose(export_file);
+ return 0;
+
+export_file_read_err:
+ fclose(export_file);
+ return -1;
+}
void test_success(char *fifo_name, int fd, pid_t odp_app)
{
@@ -60,7 +156,7 @@ void test_success(char *fifo_name, int fd, pid_t odp_app)
/* write "Success" to the FIFO */
nb_char = write(fd, &result, sizeof(char));
close(fd);
- /* wait for the odp app to terminate */
+ /* wait for the odp app1 to terminate */
waitpid(odp_app, &status, 0);
/* if the write failed, report an error anyway */
if (nb_char != 1)
@@ -77,10 +173,10 @@ void test_failure(char *fifo_name, int fd, pid_t odp_app)
int nb_char __attribute__((unused)); /*ignored: we fail anyway */
result = TEST_FAILURE;
- /* write "Success" to the FIFO */
+ /* write "Failure" to the FIFO */
nb_char = write(fd, &result, sizeof(char));
close(fd);
- /* wait for the odp app to terminate */
+ /* wait for the odp app1 to terminate */
waitpid(odp_app, &status, 0);
unlink(fifo_name);
exit(1); /* error */
@@ -89,33 +185,45 @@ void test_failure(char *fifo_name, int fd, pid_t odp_app)
int main(int argc __attribute__((unused)), char *argv[])
{
char prg_name[PATH_MAX];
- char odp_name[PATH_MAX];
+ char odp_name1[PATH_MAX];
+ char odp_name2[PATH_MAX];
int nb_sec;
int size;
- pid_t odp_app;
- char *odp_params = NULL;
+ pid_t odp_app1;
+ pid_t odp_app2;
+ char *odp_params1 = NULL;
+ char *odp_params2[3];
+ char pid1[10];
char fifo_name[PATH_MAX]; /* fifo for linux->odp feedback */
int fifo_fd = -1;
- char shm_devname[PATH_MAX];/* shared mem device name, under /dev/shm */
+ char shm_filename[PATH_MAX];/* shared mem device name, under /dev/shm */
+ uint64_t len;
+ uint32_t flags;
+ uint64_t user_len;
+ uint32_t user_flags;
+ uint32_t align;
int shm_fd;
test_shared_linux_data_t *addr;
+ int app2_status;
- /* odp app is in the same directory as this file: */
+ /* odp_app1 is in the same directory as this file: */
strncpy(prg_name, argv[0], PATH_MAX - 1);
- sprintf(odp_name, "%s/%s", dirname(prg_name), ODP_APP_NAME);
+ sprintf(odp_name1, "%s/%s", dirname(prg_name), ODP_APP1_NAME);
/* start the ODP application: */
- odp_app = fork();
- if (odp_app < 0) /* error */
+ odp_app1 = fork();
+ if (odp_app1 < 0) /* error */
exit(1);
- if (odp_app == 0) /* child */
- execv(odp_name, &odp_params);
+ if (odp_app1 == 0) { /* child */
+ execv(odp_name1, &odp_params1); /* no return unless error */
+ fprintf(stderr, "execv failed: %s\n", strerror(errno));
+ }
/* wait max 30 sec for the fifo to be created by the ODP side.
* Just die if time expire as there is no fifo to communicate
* through... */
- sprintf(fifo_name, FIFO_NAME_FMT, odp_app);
+ sprintf(fifo_name, FIFO_NAME_FMT, odp_app1);
for (nb_sec = 0; nb_sec < MAX_FIFO_WAIT; nb_sec++) {
fifo_fd = open(fifo_name, O_WRONLY);
if (fifo_fd >= 0)
@@ -130,30 +238,62 @@ int main(int argc __attribute__((unused)), char *argv[])
* ODP application is up and running, and has allocated shmem.
* check to see if linux can see the created shared memory: */
- sprintf(shm_devname, DEVNAME_FMT, odp_app, ODP_SHM_NAME);
+ /* read the shared memory attributes (includes the shm filename): */
+ if (read_shmem_attribues(odp_app1, ODP_SHM_NAME,
+ shm_filename, &len, &flags,
+ &user_len, &user_flags, &align) != 0)
+ test_failure(fifo_name, fifo_fd, odp_app1);
- /* O_CREAT flag not given => failure if shm_devname does not already
+ /* open the shm filename (which is either on /tmp or on hugetlbfs)
+ * O_CREAT flag not given => failure if shm_devname does not already
* exist */
- shm_fd = shm_open(shm_devname, O_RDONLY,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ shm_fd = open(shm_filename, O_RDONLY,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (shm_fd == -1)
- test_failure(fifo_name, shm_fd, odp_app);
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
- /* we know that the linux generic ODP actually allocates the required
- * size + alignment and aligns the returned address after.
- * we must do the same here: */
- size = sizeof(test_shared_linux_data_t) + ALIGN_SIZE;
- addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, 0);
- if (addr == MAP_FAILED)
- test_failure(fifo_name, shm_fd, odp_app);
+ /* linux ODP guarantees page size alignement. Larger alignment may
+ * fail as 2 different processes will have fully unrelated
+ * virtual spaces.
+ */
+ size = sizeof(test_shared_linux_data_t);
- /* perform manual alignment */
- addr = (test_shared_linux_data_t *)((((unsigned long int)addr +
- ALIGN_SIZE - 1) / ALIGN_SIZE) * ALIGN_SIZE);
+ addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, 0);
+ if (addr == MAP_FAILED) {
+ printf("shmem_linux: map failed!\n");
+ test_failure(fifo_name, fifo_fd, odp_app1);
+ }
/* check that we see what the ODP application wrote in the memory */
- if ((addr->foo == TEST_SHARE_FOO) && (addr->bar == TEST_SHARE_BAR))
- test_success(fifo_name, fifo_fd, odp_app);
- else
- test_failure(fifo_name, fifo_fd, odp_app);
+ if ((addr->foo != TEST_SHARE_FOO) || (addr->bar != TEST_SHARE_BAR))
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
+
+ /* odp_app2 is in the same directory as this file: */
+ strncpy(prg_name, argv[0], PATH_MAX - 1);
+ sprintf(odp_name2, "%s/%s", dirname(prg_name), ODP_APP2_NAME);
+
+ /* start the second ODP application with pid of ODP_APP1 as parameter:*/
+ sprintf(pid1, "%d", odp_app1);
+ odp_params2[0] = odp_name2;
+ odp_params2[1] = pid1;
+ odp_params2[2] = NULL;
+ odp_app2 = fork();
+ if (odp_app2 < 0) /* error */
+ exit(1);
+
+ if (odp_app2 == 0) { /* child */
+ execv(odp_name2, odp_params2); /* no return unless error */
+ fprintf(stderr, "execv failed: %s\n", strerror(errno));
+ }
+
+ /* wait for the second ODP application to terminate:
+ * status is OK if that second ODP application could see the
+ * memory shared by the first one. */
+ waitpid(odp_app2, &app2_status, 0);
+
+ if (app2_status)
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
+
+ /* everything looked good: */
+ test_success(fifo_name, fifo_fd, odp_app1);
}
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp.c b/test/linux-generic/validation/api/shmem/shmem_odp1.c
index a1f750f89..3869c2e1c 100644
--- a/test/linux-generic/validation/api/shmem/shmem_odp.c
+++ b/test/linux-generic/validation/api/shmem/shmem_odp1.c
@@ -13,7 +13,7 @@
#include <fcntl.h>
#include <odp_cunit_common.h>
-#include "shmem_odp.h"
+#include "shmem_odp1.h"
#include "shmem_common.h"
#define TEST_SHARE_FOO (0xf0f0f0f0)
@@ -27,9 +27,10 @@ void shmem_test_odp_shm_proc(void)
test_shared_data_t *test_shared_data;
char test_result;
+ /* reminder: ODP_SHM_PROC => export to linux, ODP_SHM_EXPORT=>to odp */
shm = odp_shm_reserve(ODP_SHM_NAME,
sizeof(test_shared_data_t),
- ALIGN_SIZE, ODP_SHM_PROC);
+ ALIGN_SIZE, ODP_SHM_PROC | ODP_SHM_EXPORT);
CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
test_shared_data = odp_shm_addr(shm);
CU_ASSERT_FATAL(NULL != test_shared_data);
@@ -39,15 +40,18 @@ void shmem_test_odp_shm_proc(void)
odp_mb_full();
/* open the fifo: this will indicate to linux process that it can
- * start the shmem lookup and check if it sees the data */
+ * start the shmem lookups and check if it sees the data */
sprintf(fifo_name, FIFO_NAME_FMT, getpid());
CU_ASSERT_FATAL(mkfifo(fifo_name, 0666) == 0);
/* read from the fifo: the linux process result: */
+ printf("shmem_odp1: opening fifo: %s\n", fifo_name);
fd = open(fifo_name, O_RDONLY);
CU_ASSERT_FATAL(fd >= 0);
+ printf("shmem_odp1: reading fifo: %s\n", fifo_name);
CU_ASSERT(read(fd, &test_result, sizeof(char)) == 1);
+ printf("shmem_odp1: closing fifo: %s\n", fifo_name);
close(fd);
CU_ASSERT_FATAL(test_result == TEST_SUCCESS);
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp.h b/test/linux-generic/validation/api/shmem/shmem_odp1.h
index 614bbf805..614bbf805 100644
--- a/test/linux-generic/validation/api/shmem/shmem_odp.h
+++ b/test/linux-generic/validation/api/shmem/shmem_odp1.h
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp2.c b/test/linux-generic/validation/api/shmem/shmem_odp2.c
new file mode 100644
index 000000000..7d8c682b1
--- /dev/null
+++ b/test/linux-generic/validation/api/shmem/shmem_odp2.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+#include <odp_cunit_common.h>
+#include "shmem_odp2.h"
+#include "shmem_common.h"
+
+#define TEST_SHARE_FOO (0xf0f0f0f0)
+#define TEST_SHARE_BAR (0xf0f0f0f)
+
+/* The C unit test harness is run by ODP1 app which will be told the return
+ * staus of this process. See top of shmem_linux.c for chart flow of events
+ */
+int main(int argc, char *argv[])
+{
+ odp_instance_t odp1;
+ odp_instance_t odp2;
+ odp_shm_t shm;
+ odp_shm_info_t info;
+ test_shared_data_t *test_shared_data;
+
+ /* odp init: */
+ if (0 != odp_init_global(&odp2, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return 1;
+ }
+ if (0 != odp_init_local(odp2, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return 1;
+ }
+
+ /* test: map ODP1 memory and check its contents:
+ * The pid of the ODP instantiation process sharing its memory
+ * is given as first arg. In linux-generic ODP, this pid is actually
+ * the ODP instance */
+ if (argc != 2) {
+ fprintf(stderr, "One single parameter expected, %d found.\n",
+ argc);
+ return 1;
+ }
+ odp1 = (odp_instance_t)atoi(argv[1]);
+
+ printf("shmem_odp2: trying to grab %s from pid %d\n",
+ ODP_SHM_NAME, (int)odp1);
+ shm = odp_shm_import(ODP_SHM_NAME, odp1, ODP_SHM_NAME);
+ if (shm == ODP_SHM_INVALID) {
+ fprintf(stderr, "error: odp_shm_lookup_external failed.\n");
+ return 1;
+ }
+
+ /* check that the read size matches the allocated size (in other ODP):*/
+ if ((odp_shm_info(shm, &info)) ||
+ (info.size != sizeof(*test_shared_data))) {
+ fprintf(stderr, "error: odp_shm_info failed.\n");
+ return 1;
+ }
+
+ test_shared_data = odp_shm_addr(shm);
+ if (test_shared_data == NULL) {
+ fprintf(stderr, "error: odp_shm_addr failed.\n");
+ return 1;
+ }
+
+ if (test_shared_data->foo != TEST_SHARE_FOO) {
+ fprintf(stderr, "error: Invalid data TEST_SHARE_FOO.\n");
+ return 1;
+ }
+
+ if (test_shared_data->bar != TEST_SHARE_BAR) {
+ fprintf(stderr, "error: Invalid data TEST_SHARE_BAR.\n");
+ return 1;
+ }
+
+ if (odp_shm_free(shm) != 0) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return 1;
+ }
+
+ /* odp term: */
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return 1;
+ }
+
+ if (0 != odp_term_global(odp2)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/test/linux-generic/validation/api/shmem/shmem_odp2.h b/test/linux-generic/validation/api/shmem/shmem_odp2.h
new file mode 100644
index 000000000..a8db909a8
--- /dev/null
+++ b/test/linux-generic/validation/api/shmem/shmem_odp2.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+int main(int argc, char *argv[]);