aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-generic
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2023-02-24 09:39:16 +0200
committerGitHub <noreply@github.com>2023-02-24 09:39:16 +0200
commit0f2f3271f64b25d7ce101b7a5c82b7fc17ba2d32 (patch)
tree5d4df86a452c52b1421b8039835ec793ac4019c7 /platform/linux-generic
parent282188e33695bb6863e289df61e0dfbd87ca205c (diff)
parentc7c99cfd5635da8c0afc7f3803ce6e1c82c3465b (diff)
Merge ODP v1.40.0.0v1.40.0.0_DPDK_19.11
Merge ODP linux-generic v1.40.0.0 into linux-dpdk.
Diffstat (limited to 'platform/linux-generic')
-rw-r--r--platform/linux-generic/Makefile.am15
-rw-r--r--platform/linux-generic/README26
-rw-r--r--platform/linux-generic/arch/aarch64/odp_crypto_armv8.c128
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/buffer.h20
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/buffer_types.h40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/crypto.h15
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/crypto_types.h42
-rw-r--r--platform/linux-generic/include/odp/api/plat/buffer_inlines.h5
-rw-r--r--platform/linux-generic/include/odp/api/plat/crypto_inlines.h65
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_validation_external.h111
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h3
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_inline_types.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inlines.h10
-rw-r--r--platform/linux-generic/include/odp_config_internal.h10
-rw-r--r--platform/linux-generic/include/odp_event_internal.h7
-rw-r--r--platform/linux-generic/include/odp_event_validation_internal.h52
-rw-r--r--platform/linux-generic/include/odp_init_internal.h3
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h8
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h1
-rw-r--r--platform/linux-generic/include/odp_queue_basic_internal.h5
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_internal.h210
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_u32_internal.h25
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_u64_internal.h25
-rw-r--r--platform/linux-generic/libodp-linux.pc.in2
-rw-r--r--platform/linux-generic/m4/configure.m46
-rw-r--r--platform/linux-generic/m4/odp_crypto.m412
-rw-r--r--platform/linux-generic/m4/odp_event_validation.m423
-rw-r--r--platform/linux-generic/m4/odp_ipsec_mb.m419
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m42
-rw-r--r--platform/linux-generic/odp_crypto_api.c11
-rw-r--r--platform/linux-generic/odp_crypto_ipsecmb.c881
-rw-r--r--platform/linux-generic/odp_crypto_null.c68
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c395
-rw-r--r--platform/linux-generic/odp_event.c25
-rw-r--r--platform/linux-generic/odp_event_validation.c260
-rw-r--r--platform/linux-generic/odp_init.c16
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c39
-rw-r--r--platform/linux-generic/odp_packet.c21
-rw-r--r--platform/linux-generic/odp_packet_flags.c2
-rw-r--r--platform/linux-generic/odp_packet_io.c29
-rw-r--r--platform/linux-generic/odp_pool.c31
-rw-r--r--platform/linux-generic/odp_queue_basic.c67
-rw-r--r--platform/linux-generic/odp_schedule_basic.c22
-rw-r--r--platform/linux-generic/odp_shared_memory.c9
-rw-r--r--platform/linux-generic/odp_stash.c665
-rw-r--r--platform/linux-generic/odp_timer.c9
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c6
-rw-r--r--platform/linux-generic/pktio/dpdk.c10
-rw-r--r--platform/linux-generic/pktio/loop.c323
-rw-r--r--platform/linux-generic/pktio/stats/packet_io_stats.c5
-rw-r--r--platform/linux-generic/test/inline-timer.conf2
-rw-r--r--platform/linux-generic/test/packet_align.conf2
-rw-r--r--platform/linux-generic/test/process-mode.conf2
-rw-r--r--platform/linux-generic/test/sched-basic.conf3
-rw-r--r--platform/linux-generic/test/stash-custom.conf8
56 files changed, 2916 insertions, 887 deletions
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index e762148aa..d49a2138b 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -34,9 +34,11 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/buffer_inline_types.h \
include/odp/api/plat/byteorder_inlines.h \
include/odp/api/plat/cpu_inlines.h \
+ include/odp/api/plat/crypto_inlines.h \
include/odp/api/plat/debug_inlines.h \
include/odp/api/plat/event_inlines.h \
include/odp/api/plat/event_inline_types.h \
+ include/odp/api/plat/event_validation_external.h \
include/odp/api/plat/event_vector_inline_types.h \
include/odp/api/plat/hash_inlines.h \
include/odp/api/plat/ipsec_inlines.h \
@@ -70,11 +72,13 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/atomic.h \
include-abi/odp/api/abi/barrier.h \
include-abi/odp/api/abi/buffer.h \
+ include-abi/odp/api/abi/buffer_types.h \
include-abi/odp/api/abi/byteorder.h \
include-abi/odp/api/abi/classification.h \
include-abi/odp/api/abi/comp.h \
include-abi/odp/api/abi/cpumask.h \
include-abi/odp/api/abi/crypto.h \
+ include-abi/odp/api/abi/crypto_types.h \
include-abi/odp/api/abi/debug.h \
include-abi/odp/api/abi/dma_types.h \
include-abi/odp/api/abi/errno.h \
@@ -129,6 +133,7 @@ noinst_HEADERS = \
include/odp_debug_internal.h \
include/odp_errno_define.h \
include/odp_event_internal.h \
+ include/odp_event_validation_internal.h \
include/odp_fdserver_internal.h \
include/odp_forward_typedefs_internal.h \
include/odp_global_data.h \
@@ -164,6 +169,8 @@ noinst_HEADERS = \
include/odp_ring_common.h \
include/odp_ring_internal.h \
include/odp_ring_mpmc_internal.h \
+ include/odp_ring_mpmc_u32_internal.h \
+ include/odp_ring_mpmc_u64_internal.h \
include/odp_ring_ptr_internal.h \
include/odp_ring_spsc_internal.h \
include/odp_ring_st_internal.h \
@@ -205,6 +212,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_dma.c \
odp_errno.c \
odp_event.c \
+ odp_event_validation.c \
odp_fdserver.c \
odp_hash_crc_gen.c \
odp_impl.c \
@@ -278,16 +286,22 @@ if WITH_ARMV8_CRYPTO
__LIB__libodp_linux_la_SOURCES += \
arch/aarch64/odp_crypto_armv8.c
else
+if WITH_IPSECMB_CRYPTO
+__LIB__libodp_linux_la_SOURCES += \
+ odp_crypto_ipsecmb.c
+else
__LIB__libodp_linux_la_SOURCES += \
odp_crypto_null.c
endif
endif
+endif
if ODP_ABI_COMPAT
__LIB__libodp_linux_la_SOURCES += \
odp_atomic_api.c \
odp_buffer_api.c \
odp_byteorder_api.c \
odp_cpu_api.c \
+ odp_crypto_api.c \
odp_event_api.c \
odp_hash_api.c \
odp_ipsec_api.c \
@@ -438,6 +452,7 @@ __LIB__libodp_linux_la_LIBADD += $(DPDK_LIBS_LIBODP)
__LIB__libodp_linux_la_LIBADD += $(PTHREAD_LIBS)
__LIB__libodp_linux_la_LIBADD += $(TIMER_LIBS)
__LIB__libodp_linux_la_LIBADD += $(LIBXDP_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(IPSEC_MB_LIBS)
if ODP_PKTIO_PCAP
__LIB__libodp_linux_la_LIBADD += $(PCAP_LIBS)
diff --git a/platform/linux-generic/README b/platform/linux-generic/README
index 8f41d1d45..138e6040c 100644
--- a/platform/linux-generic/README
+++ b/platform/linux-generic/README
@@ -1,5 +1,5 @@
Copyright (c) 2014-2018, Linaro Limited
-Copyright (c) 2019, Nokia
+Copyright (c) 2019-2023, Nokia
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
@@ -52,7 +52,29 @@ SPDX-License-Identifier: BSD-3-Clause
Note that there may be issues with the quality or security of rdrand and
rdseed. [2]
-6. References
+6. Event validation
+ ODP linux-generic implementation supports additional fast path event
+ validity checks which are disabled by default to minimize overhead. These
+ checks can be enabled with --enable-event-validation [abort/warn] or
+ --enabled-debug=full configuration options.
+
+ Event validation adds additional endmark data to ODP buffers and packets,
+ which is used to detect data writes outside allowed areas. Endmarks are
+ checked by the implementation each time application calls one the following
+ API functions:
+ - odp_buffer_free() / odp_buffer_free_multi()
+ - odp_buffer_is_valid()
+ - odp_event_free() / odp_event_free_multi() / odp_event_free_sp()
+ - odp_event_is_valid()
+ - odp_packet_free() / odp_packet_free_multi() / odp_packet_free_sp()
+ - odp_packet_is_valid()
+ - odp_queue_enq() / odp_queue_enq_multi()
+
+ Event validation can function in two modes: abort (default) and warn. In
+ abort mode the application is terminated immediately if an event validity
+ check fails. In warn mode only an error log message is printed.
+
+7. References
[1] Intel Digital Random Number Generator (DRNG) Software Implementation
Guide. John P Mechalas, 17 October 2018.
https://www.intel.com/content/www/us/en/developer/articles/guide/intel-digital-random-number-generator-drng-software-implementation-guide.html
diff --git a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
index 11fadc971..67ae6a389 100644
--- a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
+++ b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
@@ -1,6 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2021, ARM Limited
- * Copyright (c) 2022, Nokia
+ * Copyright (c) 2022-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -38,6 +38,10 @@
#define ARM_CRYPTO_MAX_DATA_LENGTH 65536
#define ARM_CRYPTO_MAX_DIGEST_LENGTH 16
+#define AES_GCM_IV_LEN 12
+ODP_STATIC_ASSERT(AES_GCM_IV_LEN <= ARM_CRYPTO_MAX_IV_LENGTH,
+ "AES_GCM_IV_LEN exceeds ARM_CRYPTO_MAX_IV_LENGTH");
+
/*
* ARM crypto library may read up to 15 bytes past the end of input
* data and AAD and write up to 15 bytes past the end of output data.
@@ -70,9 +74,9 @@ static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
#ifdef __ARM_FEATURE_AES
static const odp_crypto_cipher_capability_t cipher_capa_aes_gcm[] = {
-{.key_len = 16, .iv_len = 12},
-{.key_len = 24, .iv_len = 12},
-{.key_len = 32, .iv_len = 12} };
+{.key_len = 16, .iv_len = AES_GCM_IV_LEN},
+{.key_len = 24, .iv_len = AES_GCM_IV_LEN},
+{.key_len = 32, .iv_len = AES_GCM_IV_LEN} };
#endif
/*
@@ -111,18 +115,11 @@ struct odp_crypto_generic_session_t {
odp_crypto_session_param_t p;
struct {
-#if ODP_DEPRECATED_API
- /* Copy of session IV data */
- uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
-#endif
uint8_t key_data[ARM_CRYPTO_MAX_CIPHER_KEY_LENGTH];
} cipher;
struct {
uint8_t key[ARM_CRYPTO_MAX_AUTH_KEY_LENGTH];
-#if ODP_DEPRECATED_API
- uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
-#endif
} auth;
crypto_func_t func;
@@ -176,21 +173,13 @@ void free_session(odp_crypto_generic_session_t *session)
odp_spinlock_unlock(&global->lock);
}
-static
-odp_crypto_packet_result_t *get_op_result_from_packet(odp_packet_t pkt)
-{
- odp_packet_hdr_t *hdr = packet_hdr(pkt);
-
- return &hdr->crypto_op_result;
-}
-
static inline void set_crypto_op_result(odp_packet_t pkt,
odp_crypto_alg_err_t cipher_err,
odp_crypto_alg_err_t auth_err)
{
odp_crypto_packet_result_t *op_result;
- op_result = get_op_result_from_packet(pkt);
+ op_result = &packet_hdr(pkt)->crypto_op_result;
op_result->cipher_status.alg_err = cipher_err;
op_result->cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
op_result->auth_status.alg_err = auth_err;
@@ -235,8 +224,8 @@ void aes_gcm_encrypt(odp_packet_t pkt,
.d = {0, 0}
}
};
- uint8_t *iv_ptr;
- uint64_t iv_bit_length = session->p.cipher_iv_len * 8;
+ uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
+ uint64_t iv_bit_length = AES_GCM_IV_LEN * 8;
uint64_t plaintext_bit_length = param->cipher_range.length * 8;
uint64_t aad_bit_length = session->p.auth_aad_len * 8;
uint32_t in_pos = param->cipher_range.offset;
@@ -252,21 +241,13 @@ void aes_gcm_encrypt(odp_packet_t pkt,
goto err;
}
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- goto err;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
+ /* The crypto lib may read 16 bytes. Copy to a big enough buffer */
+ _ODP_ASSERT(param->cipher_iv_ptr != NULL);
+ memcpy(iv_data, param->cipher_iv_ptr, AES_GCM_IV_LEN);
cs.constants = &session->cc;
- rc = armv8_aes_gcm_set_counter(iv_ptr, iv_bit_length, &cs);
+ rc = armv8_aes_gcm_set_counter(iv_data, iv_bit_length, &cs);
if (odp_unlikely(rc)) {
_ODP_DBG("ARM Crypto: Failure while setting nonce\n");
goto err;
@@ -335,9 +316,9 @@ void aes_gcm_decrypt(odp_packet_t pkt,
.d = {0, 0}
}
};
- uint8_t *iv_ptr;
+ uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
uint8_t tag[AES_GCM_TAG_LEN];
- uint64_t iv_bit_length = session->p.cipher_iv_len * 8;
+ uint64_t iv_bit_length = AES_GCM_IV_LEN * 8;
uint64_t plaintext_bit_length = param->cipher_range.length * 8;
uint64_t aad_bit_length = session->p.auth_aad_len * 8;
uint32_t in_pos = param->cipher_range.offset;
@@ -352,21 +333,13 @@ void aes_gcm_decrypt(odp_packet_t pkt,
goto err;
}
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- goto err;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
+ /* The crypto lib may read 16 bytes. Copy to a big enough buffer */
+ _ODP_ASSERT(param->cipher_iv_ptr != NULL);
+ memcpy(iv_data, param->cipher_iv_ptr, AES_GCM_IV_LEN);
cs.constants = &session->cc;
- rc = armv8_aes_gcm_set_counter(iv_ptr, iv_bit_length, &cs);
+ rc = armv8_aes_gcm_set_counter(iv_data, iv_bit_length, &cs);
if (odp_unlikely(rc)) {
_ODP_DBG("ARM Crypto: Failure while setting nonce\n");
goto err;
@@ -433,7 +406,7 @@ static int process_aes_gcm_param(odp_crypto_generic_session_t *session)
return -1;
/* Verify IV len is correct */
- if (12 != session->p.cipher_iv_len)
+ if (session->p.cipher_iv_len != AES_GCM_IV_LEN)
return -1;
if (ARM_CRYPTO_MAX_CIPHER_KEY_LENGTH < session->p.cipher_key.length)
@@ -555,6 +528,12 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
return -1;
}
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
/* Allocate memory for this session */
session = alloc_session();
if (NULL == session) {
@@ -577,17 +556,6 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
goto err;
}
-#if ODP_DEPRECATED_API
- /* Copy IV data */
- if (session->p.cipher_iv.data)
- memcpy(session->cipher.iv_data, session->p.cipher_iv.data,
- session->p.cipher_iv.length);
-
- if (session->p.auth_iv.data)
- memcpy(session->auth.iv_data, session->p.auth_iv.data,
- session->p.auth_iv.length);
-#endif
-
/* Process based on cipher */
switch (param->cipher_alg) {
case ODP_CIPHER_ALG_NULL:
@@ -705,6 +673,10 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
odp_crypto_op_result_t local_result;
int rc;
+ if (((odp_crypto_generic_session_t *)(intptr_t)param->session)->p.op_type !=
+ ODP_CRYPTO_OP_TYPE_LEGACY)
+ return -1;
+
packet_param.session = param->session;
packet_param.cipher_iv_ptr = param->cipher_iv_ptr;
packet_param.auth_iv_ptr = param->auth_iv_ptr;
@@ -877,35 +849,6 @@ uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
return (uint64_t)hdl;
}
-odp_packet_t odp_crypto_packet_from_event(odp_event_t ev)
-{
- /* This check not mandated by the API specification */
- _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
- _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_CRYPTO);
-
- return odp_packet_from_event(ev);
-}
-
-odp_event_t odp_crypto_packet_to_event(odp_packet_t pkt)
-{
- return odp_packet_to_event(pkt);
-}
-
-int odp_crypto_result(odp_crypto_packet_result_t *result,
- odp_packet_t packet)
-{
- odp_crypto_packet_result_t *op_result;
-
- _ODP_ASSERT(odp_event_subtype(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET_CRYPTO);
-
- op_result = get_op_result_from_packet(packet);
-
- memcpy(result, op_result, sizeof(*result));
-
- return 0;
-}
-
static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
{
int md_copy;
@@ -934,6 +877,9 @@ static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *sessio
{
int rc;
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC))
+ return pkt_in;
+
if (odp_likely(pkt_in == pkt_out))
return pkt_out;
@@ -1018,7 +964,9 @@ int odp_crypto_op_enq(const odp_packet_t pkt_in[],
_ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
_ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
- pkt = pkt_out[i];
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
rc = crypto_int(pkt_in[i], &pkt, &param[i]);
if (rc < 0)
break;
diff --git a/platform/linux-generic/include-abi/odp/api/abi/buffer.h b/platform/linux-generic/include-abi/odp/api/abi/buffer.h
index 8239e15da..a6309fe39 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/buffer.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/buffer.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,7 +8,7 @@
/**
* @file
*
- * ODP buffer descriptor
+ * ODP buffer
*/
#ifndef ODP_API_ABI_BUFFER_H_
@@ -17,24 +18,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_buffer
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_buffer_t);
-
-#define ODP_BUFFER_INVALID _odp_cast_scalar(odp_buffer_t, 0)
-
-/* Inlined functions for non-ABI compat mode */
+/* Inlined API functions */
#include <odp/api/plat/buffer_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h b/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h
new file mode 100644
index 000000000..1d54bab07
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP buffer types
+ */
+
+#ifndef ODP_API_ABI_BUFFER_TYPES_H_
+#define ODP_API_ABI_BUFFER_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_buffer
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_buffer_t);
+
+#define ODP_BUFFER_INVALID _odp_cast_scalar(odp_buffer_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/crypto.h b/platform/linux-generic/include-abi/odp/api/abi/crypto.h
index b57667e76..bef725c28 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/crypto.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/crypto.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,18 +19,8 @@
extern "C" {
#endif
-/** @ingroup odp_crypto
- * @{
- */
-
-#define ODP_CRYPTO_SESSION_INVALID (0xffffffffffffffffULL)
-
-typedef uint64_t odp_crypto_session_t;
-typedef ODP_HANDLE_T(odp_crypto_compl_t);
-
-/**
- * @}
- */
+/* Inlined API functions */
+#include <odp/api/plat/crypto_inlines.h>
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/crypto_types.h b/platform/linux-generic/include-abi/odp/api/abi/crypto_types.h
new file mode 100644
index 000000000..a5cb43c5d
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/crypto_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP crypto
+ */
+
+#ifndef ODP_API_ABI_CRYPTO_TYPES_H_
+#define ODP_API_ABI_CRYPTO_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_crypto
+ * @{
+ */
+
+#define ODP_CRYPTO_SESSION_INVALID (0xffffffffffffffffULL)
+
+typedef uint64_t odp_crypto_session_t;
+typedef ODP_HANDLE_T(odp_crypto_compl_t);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/buffer_inlines.h b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h
index 34d4b5675..75ef36cf3 100644
--- a/platform/linux-generic/include/odp/api/plat/buffer_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019-2022, Nokia
+/* Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,11 +7,10 @@
#ifndef ODP_PLAT_BUFFER_INLINES_H_
#define ODP_PLAT_BUFFER_INLINES_H_
+#include <odp/api/buffer_types.h>
#include <odp/api/event.h>
#include <odp/api/pool_types.h>
-#include <odp/api/abi/buffer.h>
-
#include <odp/api/plat/buffer_inline_types.h>
#include <odp/api/plat/debug_inlines.h>
#include <odp/api/plat/event_inline_types.h>
diff --git a/platform/linux-generic/include/odp/api/plat/crypto_inlines.h b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
new file mode 100644
index 000000000..8e98d8580
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CRYPTO_INLINES_H_
+#define ODP_PLAT_CRYPTO_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/crypto_types.h>
+#include <odp/api/event.h>
+#include <odp/api/packet.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/packet_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_crypto_packet_from_event __odp_crypto_packet_from_event
+ #define odp_crypto_packet_to_event __odp_crypto_packet_to_event
+ #define odp_crypto_result __odp_crypto_result
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_packet_t odp_crypto_packet_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_CRYPTO);
+
+ return odp_packet_from_event(ev);
+}
+
+_ODP_INLINE odp_event_t odp_crypto_packet_to_event(odp_packet_t pkt)
+{
+ return odp_packet_to_event(pkt);
+}
+
+_ODP_INLINE int odp_crypto_result(odp_crypto_packet_result_t *result, odp_packet_t pkt)
+{
+ odp_crypto_packet_result_t *op_result;
+
+ _ODP_ASSERT(odp_packet_subtype(pkt) == ODP_EVENT_PACKET_CRYPTO);
+
+ op_result = _odp_pkt_get_ptr(pkt, odp_crypto_packet_result_t, crypto_op);
+
+ *result = *op_result;
+
+ return 0;
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_validation_external.h b/platform/linux-generic/include/odp/api/plat/event_validation_external.h
new file mode 100644
index 000000000..7f5c0364f
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_validation_external.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event validation
+ *
+ * @warning These definitions are not part of ODP API, they are for
+ * implementation internal use only.
+ */
+
+#ifndef ODP_EVENT_VALIDATION_EXTERNAL_H_
+#define ODP_EVENT_VALIDATION_EXTERNAL_H_
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/buffer_types.h>
+#include <odp/api/event_types.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Enumerations for identifying ODP API functions */
+typedef enum {
+ _ODP_EV_BUFFER_FREE = 0,
+ _ODP_EV_BUFFER_FREE_MULTI,
+ _ODP_EV_BUFFER_IS_VALID,
+ _ODP_EV_EVENT_FREE,
+ _ODP_EV_EVENT_FREE_MULTI,
+ _ODP_EV_EVENT_FREE_SP,
+ _ODP_EV_EVENT_IS_VALID,
+ _ODP_EV_PACKET_FREE,
+ _ODP_EV_PACKET_FREE_MULTI,
+ _ODP_EV_PACKET_FREE_SP,
+ _ODP_EV_PACKET_IS_VALID,
+ _ODP_EV_QUEUE_ENQ,
+ _ODP_EV_QUEUE_ENQ_MULTI,
+ _ODP_EV_MAX
+} _odp_ev_id_t;
+
+/* Implementation internal event validation functions */
+#if _ODP_EVENT_VALIDATION
+
+int _odp_event_validate(odp_event_t event, _odp_ev_id_t id);
+
+int _odp_event_validate_multi(const odp_event_t event[], int num, _odp_ev_id_t id);
+
+int _odp_buffer_validate(odp_buffer_t buf, _odp_ev_id_t ev_id);
+
+int _odp_buffer_validate_multi(const odp_buffer_t buf[], int num, _odp_ev_id_t ev_id);
+
+int _odp_packet_validate(odp_packet_t pkt, _odp_ev_id_t ev_id);
+
+int _odp_packet_validate_multi(const odp_packet_t pkt[], int num, _odp_ev_id_t ev_id);
+
+#else
+
+static inline int _odp_event_validate(odp_event_t event ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_event_validate_multi(const odp_event_t event[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_buffer_validate(odp_buffer_t buf ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_buffer_validate_multi(const odp_buffer_t buf[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_packet_validate(odp_packet_t pkt ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_packet_validate_multi(const odp_packet_t pkt[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+#endif /* _ODP_EVENT_VALIDATION */
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
index ae03457f9..6773b73ad 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -53,6 +53,7 @@ typedef struct _odp_packet_inline_offset_t {
uint16_t subtype;
uint16_t cls_mark;
uint16_t ipsec_ctx;
+ uint16_t crypto_op;
} _odp_packet_inline_offset_t;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index 01d47d837..93e95e21c 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -632,7 +632,8 @@ _ODP_INLINE uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
return _odp_pool_get(pool, uint32_t, ext_pkt_buf_size) -
- _odp_pool_get(pool, uint32_t, ext_head_offset);
+ _odp_pool_get(pool, uint32_t, ext_head_offset) -
+ _odp_pool_get(pool, uint32_t, trailer_size);
}
_ODP_INLINE void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
diff --git a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
index 02f59f982..fbff7eda7 100644
--- a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
@@ -30,6 +30,7 @@ typedef struct _odp_pool_inline_offset_t {
uint16_t index;
uint16_t seg_len;
uint16_t uarea_size;
+ uint16_t trailer_size;
uint16_t ext_head_offset;
uint16_t ext_pkt_buf_size;
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inlines.h b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
index 22673a887..609c0c9e4 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,6 +8,9 @@
#ifndef ODP_PLAT_QUEUE_INLINES_H_
#define ODP_PLAT_QUEUE_INLINES_H_
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_validation_external.h>
#include <odp/api/plat/queue_inline_types.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
@@ -37,12 +41,18 @@ _ODP_INLINE void *odp_queue_context(odp_queue_t handle)
_ODP_INLINE int odp_queue_enq(odp_queue_t queue, odp_event_t ev)
{
+ if (odp_unlikely(_odp_event_validate(ev, _ODP_EV_QUEUE_ENQ)))
+ return -1;
+
return _odp_queue_api->queue_enq(queue, ev);
}
_ODP_INLINE int odp_queue_enq_multi(odp_queue_t queue,
const odp_event_t events[], int num)
{
+ if (odp_unlikely(_odp_event_validate_multi(events, num, _ODP_EV_QUEUE_ENQ_MULTI)))
+ return -1;
+
return _odp_queue_api->queue_enq_multi(queue, events, num);
}
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index d3d09abf4..e4f8d6d6d 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2016-2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -66,7 +66,7 @@ extern "C" {
/*
* Maximum number of stashes
*/
-#define CONFIG_MAX_STASHES 128
+#define CONFIG_MAX_STASHES 2048
/*
* Maximum number of packet IO resources
@@ -134,10 +134,10 @@ extern "C" {
/*
* Number of shared memory blocks reserved for implementation internal use.
*
- * Each stash requires one SHM block, each pool requires three blocks (buffers,
- * ring, user area), and 20 blocks are reserved for per ODP module global data.
+ * Each pool requires three blocks (buffers, ring, user area), and 20 blocks
+ * are reserved for per ODP module global data.
*/
-#define CONFIG_INTERNAL_SHM_BLOCKS (CONFIG_MAX_STASHES + (ODP_CONFIG_POOLS * 3) + 20)
+#define CONFIG_INTERNAL_SHM_BLOCKS ((ODP_CONFIG_POOLS * 3) + 20)
/*
* Maximum number of shared memory blocks.
diff --git a/platform/linux-generic/include/odp_event_internal.h b/platform/linux-generic/include/odp_event_internal.h
index 5a29e926e..4bc28d708 100644
--- a/platform/linux-generic/include/odp_event_internal.h
+++ b/platform/linux-generic/include/odp_event_internal.h
@@ -53,7 +53,7 @@ typedef struct _odp_event_hdr_t {
/* --- Mostly read only data --- */
- /* Initial buffer tail pointer */
+ /* Initial buffer tail pointer and endmark location (if enabled) */
uint8_t *buf_end;
/* Combined pool and event index */
@@ -85,6 +85,11 @@ static inline void _odp_event_type_set(odp_event_t event, int ev)
_odp_event_hdr(event)->event_type = ev;
}
+static inline uint64_t *_odp_event_endmark_get_ptr(odp_event_t event)
+{
+ return (uint64_t *)(uintptr_t)_odp_event_hdr(event)->buf_end;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_event_validation_internal.h b/platform/linux-generic/include/odp_event_validation_internal.h
new file mode 100644
index 000000000..f4ac16f31
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_validation_internal.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_EVENT_VALIDATION_INTERNAL_H_
+#define ODP_EVENT_VALIDATION_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_validation_external.h>
+
+#include <odp_event_internal.h>
+
+#include <stdint.h>
+
+#if _ODP_EVENT_VALIDATION
+
+#define _ODP_EV_ENDMARK_VAL 0xDEADBEEFDEADBEEF
+#define _ODP_EV_ENDMARK_SIZE (sizeof(uint64_t))
+
+static inline void _odp_event_endmark_set(odp_event_t event)
+{
+ uint64_t *endmark_ptr;
+
+ endmark_ptr = _odp_event_endmark_get_ptr(event);
+ *endmark_ptr = _ODP_EV_ENDMARK_VAL;
+}
+
+#else
+
+#define _ODP_EV_ENDMARK_VAL 0
+#define _ODP_EV_ENDMARK_SIZE 0
+
+static inline void _odp_event_endmark_set(odp_event_t event ODP_UNUSED)
+{
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/platform/linux-generic/include/odp_init_internal.h b/platform/linux-generic/include/odp_init_internal.h
index 2a1039854..24e8346ad 100644
--- a/platform/linux-generic/include/odp_init_internal.h
+++ b/platform/linux-generic/include/odp_init_internal.h
@@ -33,6 +33,9 @@ int _odp_pool_init_local(void);
int _odp_pool_term_global(void);
int _odp_pool_term_local(void);
+int _odp_event_validation_init_global(void);
+int _odp_event_validation_term_global(void);
+
int _odp_queue_init_global(void);
int _odp_queue_term_global(void);
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 954602959..187a3a76f 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -68,12 +68,8 @@ struct pktio_if_ops;
#define PKTIO_PRIVATE_SIZE 33792
#elif defined(_ODP_PKTIO_XDP)
#define PKTIO_PRIVATE_SIZE 29696
-#elif defined(_ODP_PKTIO_DPDK) && ODP_CACHE_LINE_SIZE == 128
-#define PKTIO_PRIVATE_SIZE 4160
-#elif defined(_ODP_PKTIO_DPDK)
-#define PKTIO_PRIVATE_SIZE 3968
#else
-#define PKTIO_PRIVATE_SIZE 384
+#define PKTIO_PRIVATE_SIZE 9216
#endif
typedef struct ODP_ALIGNED_CACHE {
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 1c5b51c3d..c8d2168f3 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -87,6 +87,7 @@ typedef struct pool_t {
uint32_t block_size;
uint32_t block_offset;
uint32_t num_populated;
+ uint32_t trailer_size;
uint8_t *base_addr;
uint8_t *max_addr;
uint8_t *uarea_base_addr;
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h
index 830f50a9d..3cdcf8600 100644
--- a/platform/linux-generic/include/odp_queue_basic_internal.h
+++ b/platform/linux-generic/include/odp_queue_basic_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -23,7 +24,7 @@ extern "C" {
#include <odp/api/ticketlock.h>
#include <odp_config_internal.h>
#include <odp_macros_internal.h>
-#include <odp_ring_mpmc_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
#include <odp_ring_st_internal.h>
#include <odp_ring_spsc_internal.h>
#include <odp_queue_lf.h>
@@ -47,7 +48,7 @@ typedef struct ODP_ALIGNED_CACHE queue_entry_s {
odp_queue_type_t type;
/* MPMC ring (2 cache lines). */
- ring_mpmc_t ring_mpmc;
+ ring_mpmc_u32_t ring_mpmc;
odp_ticketlock_t lock;
union {
diff --git a/platform/linux-generic/include/odp_ring_mpmc_internal.h b/platform/linux-generic/include/odp_ring_mpmc_internal.h
index 6ed4dd4d1..e35179267 100644
--- a/platform/linux-generic/include/odp_ring_mpmc_internal.h
+++ b/platform/linux-generic/include/odp_ring_mpmc_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -19,6 +20,8 @@ extern "C" {
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/plat/cpu_inlines.h>
+#include <odp_ring_common.h>
+
/* Ring of uint32_t data
*
* Ring stores head and tail counters. Ring indexes are formed from these
@@ -34,14 +37,22 @@ extern "C" {
* r_tail r_head w_tail w_head
*
*/
-typedef struct {
+
+struct ring_mpmc_common {
odp_atomic_u32_t r_head ODP_ALIGNED_CACHE;
odp_atomic_u32_t r_tail;
odp_atomic_u32_t w_head ODP_ALIGNED_CACHE;
odp_atomic_u32_t w_tail;
+};
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_mpmc_common r;
+} ring_mpmc_u32_t;
-} ring_mpmc_t;
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_mpmc_common r;
+} ring_mpmc_u64_t;
static inline int ring_mpmc_cas_u32(odp_atomic_u32_t *atom,
uint32_t *old_val, uint32_t new_val)
@@ -52,21 +63,63 @@ static inline int ring_mpmc_cas_u32(odp_atomic_u32_t *atom,
__ATOMIC_RELAXED);
}
+#endif /* End of include guards */
+
+#undef _ring_mpmc_gen_t
+#undef _ring_mpmc_data_t
+#undef _RING_MPMC_INIT
+#undef _RING_MPMC_DEQ_MULTI
+#undef _RING_MPMC_ENQ_MULTI
+#undef _RING_MPMC_DEQ_BATCH
+#undef _RING_MPMC_ENQ_BATCH
+#undef _RING_MPMC_IS_EMPTY
+#undef _RING_MPMC_LEN
+
+/* This header should NOT be included directly. There are no include guards for
+ * the following types and function definitions! */
+#ifndef _ODP_RING_TYPE
+#error Include type specific (u32/u64) ring header instead of this common file.
+#endif
+
+#if _ODP_RING_TYPE == _ODP_RING_TYPE_U32
+ #define _ring_mpmc_gen_t ring_mpmc_u32_t
+ #define _ring_mpmc_data_t uint32_t
+
+ #define _RING_MPMC_INIT ring_mpmc_u32_init
+ #define _RING_MPMC_DEQ_MULTI ring_mpmc_u32_deq_multi
+ #define _RING_MPMC_ENQ_MULTI ring_mpmc_u32_enq_multi
+ #define _RING_MPMC_DEQ_BATCH ring_mpmc_u32_deq_batch
+ #define _RING_MPMC_ENQ_BATCH ring_mpmc_u32_enq_batch
+ #define _RING_MPMC_IS_EMPTY ring_mpmc_u32_is_empty
+ #define _RING_MPMC_LEN ring_mpmc_u32_len
+#elif _ODP_RING_TYPE == _ODP_RING_TYPE_U64
+ #define _ring_mpmc_gen_t ring_mpmc_u64_t
+ #define _ring_mpmc_data_t uint64_t
+
+ #define _RING_MPMC_INIT ring_mpmc_u64_init
+ #define _RING_MPMC_DEQ_MULTI ring_mpmc_u64_deq_multi
+ #define _RING_MPMC_ENQ_MULTI ring_mpmc_u64_enq_multi
+ #define _RING_MPMC_DEQ_BATCH ring_mpmc_u64_deq_batch
+ #define _RING_MPMC_ENQ_BATCH ring_mpmc_u64_enq_batch
+ #define _RING_MPMC_IS_EMPTY ring_mpmc_u64_is_empty
+ #define _RING_MPMC_LEN ring_mpmc_u64_len
+#endif
+
/* Initialize ring */
-static inline void ring_mpmc_init(ring_mpmc_t *ring)
+static inline void _RING_MPMC_INIT(_ring_mpmc_gen_t *ring)
{
- odp_atomic_init_u32(&ring->w_head, 0);
- odp_atomic_init_u32(&ring->w_tail, 0);
- odp_atomic_init_u32(&ring->r_head, 0);
- odp_atomic_init_u32(&ring->r_tail, 0);
+ odp_atomic_init_u32(&ring->r.w_head, 0);
+ odp_atomic_init_u32(&ring->r.w_tail, 0);
+ odp_atomic_init_u32(&ring->r.r_head, 0);
+ odp_atomic_init_u32(&ring->r.r_tail, 0);
}
-/* Dequeue data from the ring head. Num is smaller than ring size. */
-static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t *ring,
- uint32_t *ring_data,
- uint32_t ring_mask,
- uint32_t data[],
- uint32_t num)
+/* Dequeue data from the ring head */
+static inline uint32_t _RING_MPMC_DEQ_MULTI(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ _ring_mpmc_data_t data[],
+ uint32_t num)
{
uint32_t old_head, new_head, w_tail, num_data, i;
@@ -75,9 +128,9 @@ static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t *ring,
* When CAS operation succeeds, this thread owns data between old
* and new r_head. */
do {
- old_head = odp_atomic_load_acq_u32(&ring->r_head);
+ old_head = odp_atomic_load_acq_u32(&ring->r.r_head);
odp_prefetch(&ring_data[(old_head + 1) & ring_mask]);
- w_tail = odp_atomic_load_acq_u32(&ring->w_tail);
+ w_tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
num_data = w_tail - old_head;
/* Ring is empty */
@@ -90,7 +143,49 @@ static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t *ring,
new_head = old_head + num;
- } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r_head, &old_head,
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.r_head, &old_head,
+ new_head) == 0));
+
+ /* Read data. This will not move above load acquire of r_head. */
+ for (i = 0; i < num; i++)
+ data[i] = ring_data[(old_head + 1 + i) & ring_mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new reader tail, writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
+
+ return num;
+}
+
+/* Dequeue num or 0 data from the ring head */
+static inline uint32_t _RING_MPMC_DEQ_BATCH(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, w_tail, num_data, i;
+
+ /* Load acquires ensure that w_tail load happens after r_head load,
+ * and thus r_head value is always behind or equal to w_tail value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new r_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r.r_head);
+ odp_prefetch(&ring_data[(old_head + 1) & ring_mask]);
+ w_tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
+ num_data = w_tail - old_head;
+
+ /* Not enough data available */
+ if (num_data < num)
+ return 0;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.r_head, &old_head,
new_head) == 0));
/* Read data. This will not move above load acquire of r_head. */
@@ -98,21 +193,21 @@ static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t *ring,
data[i] = ring_data[(old_head + 1 + i) & ring_mask];
/* Wait until other readers have updated the tail */
- while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != old_head))
odp_cpu_pause();
/* Release the new reader tail, writers acquire it. */
- odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
return num;
}
-/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */
-static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
- uint32_t *ring_data,
- uint32_t ring_mask,
- const uint32_t data[],
- uint32_t num)
+/* Enqueue multiple data into the ring tail */
+static inline uint32_t _RING_MPMC_ENQ_MULTI(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ const _ring_mpmc_data_t data[],
+ uint32_t num)
{
uint32_t old_head, new_head, r_tail, num_free, i;
uint32_t size = ring_mask + 1;
@@ -122,8 +217,8 @@ static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
* When CAS operation succeeds, this thread owns data between old
* and new w_head. */
do {
- r_tail = odp_atomic_load_acq_u32(&ring->r_tail);
- old_head = odp_atomic_load_acq_u32(&ring->w_head);
+ r_tail = odp_atomic_load_acq_u32(&ring->r.r_tail);
+ old_head = odp_atomic_load_acq_u32(&ring->r.w_head);
num_free = size - (old_head - r_tail);
@@ -137,7 +232,50 @@ static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
new_head = old_head + num;
- } while (odp_unlikely(ring_mpmc_cas_u32(&ring->w_head, &old_head,
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.w_head, &old_head,
+ new_head) == 0));
+
+ /* Write data. This will not move above load acquire of w_head. */
+ for (i = 0; i < num; i++)
+ ring_data[(old_head + 1 + i) & ring_mask] = data[i];
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.w_tail, new_head);
+
+ return num;
+}
+
+/* Enqueue num or 0 data into the ring tail */
+static inline uint32_t _RING_MPMC_ENQ_BATCH(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ const _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, r_tail, num_free, i;
+ uint32_t size = ring_mask + 1;
+
+ /* Load acquires ensure that w_head load happens after r_tail load,
+ * and thus r_tail value is always behind or equal to w_head value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new w_head. */
+ do {
+ r_tail = odp_atomic_load_acq_u32(&ring->r.r_tail);
+ old_head = odp_atomic_load_acq_u32(&ring->r.w_head);
+
+ num_free = size - (old_head - r_tail);
+
+ /* Not enough free space available */
+ if (num_free < num)
+ return 0;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.w_head, &old_head,
new_head) == 0));
/* Write data. This will not move above load acquire of w_head. */
@@ -145,29 +283,29 @@ static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
ring_data[(old_head + 1 + i) & ring_mask] = data[i];
/* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */
- odp_atomic_store_rel_u32(&ring->w_tail, new_head);
+ odp_atomic_store_rel_u32(&ring->r.w_tail, new_head);
return num;
}
/* Check if ring is empty */
-static inline int ring_mpmc_is_empty(ring_mpmc_t *ring)
+static inline int _RING_MPMC_IS_EMPTY(_ring_mpmc_gen_t *ring)
{
- uint32_t head = odp_atomic_load_u32(&ring->r_head);
- uint32_t tail = odp_atomic_load_u32(&ring->w_tail);
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
return head == tail;
}
/* Return current ring length */
-static inline uint32_t ring_mpmc_length(ring_mpmc_t *ring)
+static inline uint32_t _RING_MPMC_LEN(_ring_mpmc_gen_t *ring)
{
- uint32_t head = odp_atomic_load_u32(&ring->r_head);
- uint32_t tail = odp_atomic_load_u32(&ring->w_tail);
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
return tail - head;
}
@@ -175,5 +313,3 @@ static inline uint32_t ring_mpmc_length(ring_mpmc_t *ring)
#ifdef __cplusplus
}
#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h b/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h
new file mode 100644
index 000000000..4699b5b47
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_U32_INTERNAL_H_
+#define ODP_RING_MPMC_U32_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U32
+
+#include <odp_ring_mpmc_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h b/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h
new file mode 100644
index 000000000..e7bf31a94
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_U64_INTERNAL_H_
+#define ODP_RING_MPMC_U64_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U64
+
+#include <odp_ring_mpmc_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/libodp-linux.pc.in b/platform/linux-generic/libodp-linux.pc.in
index f9a339fb8..05ba5b9d6 100644
--- a/platform/linux-generic/libodp-linux.pc.in
+++ b/platform/linux-generic/libodp-linux.pc.in
@@ -8,5 +8,5 @@ Description: The ODP packet processing engine
Version: @PKGCONFIG_VERSION@
Requires.private: libconfig@AARCH64CRYPTO_PKG@
Libs: -L${libdir} -l@ODP_LIB_NAME@ @ATOMIC_LIBS_NON_ABI_COMPAT@
-Libs.private: @OPENSSL_STATIC_LIBS@ @DPDK_LIBS@ @PCAP_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ @LIBXDP_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@
+Libs.private: @OPENSSL_STATIC_LIBS@ @DPDK_LIBS@ @PCAP_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ @LIBXDP_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@ @IPSEC_MB_LIBS@
Cflags: -I${includedir}
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index 70a393f56..515c85239 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -7,6 +7,7 @@ ODP_ATOMIC
ODP_PTHREAD
ODP_TIMER
m4_include([platform/linux-generic/m4/odp_cpu.m4])
+m4_include([platform/linux-generic/m4/odp_event_validation.m4])
m4_include([platform/linux-generic/m4/odp_pcap.m4])
m4_include([platform/linux-generic/m4/odp_scheduler.m4])
@@ -23,17 +24,20 @@ AM_CONDITIONAL([ODP_PKTIO_PCAP], [test x$have_pcap = xyes])
m4_include([platform/linux-generic/m4/odp_libconfig.m4])
m4_include([platform/linux-generic/m4/odp_openssl.m4])
m4_include([platform/linux-generic/m4/odp_crypto.m4])
+m4_include([platform/linux-generic/m4/odp_ipsec_mb.m4])
m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_netmap.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
m4_include([platform/linux-generic/m4/odp_xdp.m4])
+ODP_EVENT_VALIDATION
ODP_SCHEDULER
-AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${AARCH64CRYPTO_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS} ${LIBXDP_LIBS}"])
+AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${AARCH64CRYPTO_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${IPSEC_MB_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS} ${LIBXDP_LIBS}"])
# Add text to the end of configure with platform specific settings.
# Make sure it's aligned same as other lines in configure.ac.
AS_VAR_APPEND([PLAT_CFG_TEXT], ["
+ event_validation: ${enable_event_validation}
openssl: ${with_openssl}
openssl_rand: ${openssl_rand}
crypto: ${with_crypto}
diff --git a/platform/linux-generic/m4/odp_crypto.m4 b/platform/linux-generic/m4/odp_crypto.m4
index 9bb99f7dd..1cec6edb4 100644
--- a/platform/linux-generic/m4/odp_crypto.m4
+++ b/platform/linux-generic/m4/odp_crypto.m4
@@ -3,7 +3,7 @@
# Select default crypto implementation
AC_ARG_WITH([crypto],
[AS_HELP_STRING([--with-crypto],
- [Choose crypto implementation (openssl/armv8crypto/null)]
+ [Choose crypto implementation (openssl/armv8crypto/ipsecmb/null)]
[[default=openssl] (linux-generic)])],
[], [with_crypto=openssl])
@@ -14,7 +14,7 @@ AS_IF([test "x$with_crypto" = "xyes"], [with_crypto=openssl])
AS_IF([test "x$with_crypto" = "xno"], [with_crypto=null])
AS_IF([test "x$with_crypto" = "xopenssl" -a "x$with_openssl" = "xno"], [with_crypto=null])
-AS_IF([test "x$with_crypto" != "xopenssl" -a "x$with_crypto" != "xarmv8crypto" -a "x$with_crypto" != "xnull"],
+AS_IF([test "x$with_crypto" != "xopenssl" -a "x$with_crypto" != "xarmv8crypto" -a "x$with_crypto" != "xipsecmb" -a "x$with_crypto" != "xnull"],
[AC_MSG_ERROR([Invalid crypto implementation name])])
##########################################################################
@@ -31,11 +31,19 @@ AS_IF([test "x$with_crypto" == "xarmv8crypto"],
[PKG_CHECK_MODULES([AARCH64CRYPTO], [libAArch64crypto])
AARCH64CRYPTO_PKG=", libAArch64crypto"
AC_SUBST([AARCH64CRYPTO_PKG])])
+
AC_CONFIG_COMMANDS_PRE([dnl
AM_CONDITIONAL([WITH_ARMV8_CRYPTO], [test "x$with_crypto" == "xarmv8crypto"])
])
##########################################################################
+# Multi-buffer IPSec library implementation
+##########################################################################
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([WITH_IPSECMB_CRYPTO], [test "x$with_crypto" == "xipsecmb"])
+])
+
+##########################################################################
# Null implementation
##########################################################################
AS_IF([test "x$with_crypto" == "xnull"],
diff --git a/platform/linux-generic/m4/odp_event_validation.m4 b/platform/linux-generic/m4/odp_event_validation.m4
new file mode 100644
index 000000000..08bb8902e
--- /dev/null
+++ b/platform/linux-generic/m4/odp_event_validation.m4
@@ -0,0 +1,23 @@
+# ODP_EVENT_VALIDATION
+# --------------------
+# Select event validation level
+AC_DEFUN([ODP_EVENT_VALIDATION], [dnl
+AC_ARG_ENABLE([event-validation],
+ [AS_HELP_STRING([--enable-event-validation],
+ [enable event validation (warn/abort)
+ [default=disabled] (linux-generic)])],
+ [], [AS_IF([test "x$enable_debug" = "xfull"],
+ [enable_event_validation=yes], [enable_event_validation=no])])
+
+# Default to abort mode if validation is enabled
+AS_IF([test "x$enable_event_validation" = "xyes"],
+ [enable_event_validation="abort"])
+
+validation_level=0
+AS_IF([test "x$enable_event_validation" = "xwarn"], [validation_level=1])
+AS_IF([test "x$enable_event_validation" = "xyes" -o "x$enable_event_validation" = "xabort"],
+ [validation_level=2])
+
+AC_DEFINE_UNQUOTED([_ODP_EVENT_VALIDATION], [$validation_level],
+ [Define to 1 or 2 to enable event validation])
+]) # ODP_EVENT_VALIDATION
diff --git a/platform/linux-generic/m4/odp_ipsec_mb.m4 b/platform/linux-generic/m4/odp_ipsec_mb.m4
new file mode 100644
index 000000000..3268d94c0
--- /dev/null
+++ b/platform/linux-generic/m4/odp_ipsec_mb.m4
@@ -0,0 +1,19 @@
+#########################################################################
+# Check for libIPSec_MB availability
+#########################################################################
+ipsecmb_support=no
+AC_CHECK_HEADERS([ipsec-mb.h],
+ [AC_CHECK_LIB([IPSec_MB], [init_mb_mgr_auto], [ipsecmb_support=yes],
+ [ipsecmb_support=no])],
+ [ipsecmb_support=no])
+
+AS_IF([test "x$with_crypto" = "xipsecmb" -a "x$ipsecmb_support" = "xno"],
+ [AC_MSG_ERROR([IPSec MB library not found on this platform])])
+
+if test "x$with_crypto" = "xipsecmb"; then
+ IPSEC_MB_LIBS="-lIPSec_MB"
+else
+ IPSEC_MB_LIBS=""
+fi
+
+AC_SUBST([IPSEC_MB_LIBS])
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
index 03dbc929d..0d268c935 100644
--- a/platform/linux-generic/m4/odp_libconfig.m4
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [22])
+m4_define([_odp_config_version_minor], [25])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-generic/odp_crypto_api.c b/platform/linux-generic/odp_crypto_api.c
new file mode 100644
index 000000000..646472e2e
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/crypto.h>
+
+/* Non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/crypto_inlines.h>
diff --git a/platform/linux-generic/odp_crypto_ipsecmb.c b/platform/linux-generic/odp_crypto_ipsecmb.c
new file mode 100644
index 000000000..a7ce6077a
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_ipsecmb.c
@@ -0,0 +1,881 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp/autoheader_internal.h>
+
+#include <odp/api/crypto.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_packet_internal.h>
+
+#include <ipsec-mb.h>
+
+#define MAX_SESSIONS 4000
+/* Length in bytes */
+#define IPSEC_MB_CRYPTO_MAX_CIPHER_KEY_LENGTH 32
+#define IPSEC_MB_CRYPTO_MAX_AUTH_KEY_LENGTH 32
+#define IPSEC_MB_CRYPTO_MAX_DATA_LENGTH 65536
+#define ZUC_DIGEST_LENGTH 4
+
+#define ODP_CRYPTO_IPSEC_MB_SHM_NAME "_odp_crypto_ipsecmb"
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
+{.key_len = 0, .iv_len = 0} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_zuc_eea3[] = {
+{.key_len = 16, .iv_len = 16},
+{.key_len = 32, .iv_len = 25} };
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_null[] = {
+{.digest_len = 0, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_zuc_eia3[] = {
+{.digest_len = 4, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 16},
+{.digest_len = 4, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 25} };
+
+/** Forward declaration of session structure */
+typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef odp_crypto_alg_err_t (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
+
+/**
+ * Per crypto session data structure
+ */
+struct odp_crypto_generic_session_t {
+ odp_crypto_generic_session_t *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
+ odp_bool_t do_cipher_first;
+
+ struct {
+ uint8_t key_data[IPSEC_MB_CRYPTO_MAX_CIPHER_KEY_LENGTH];
+ crypto_func_t func;
+ } cipher;
+
+ struct {
+ uint8_t key[IPSEC_MB_CRYPTO_MAX_AUTH_KEY_LENGTH];
+ crypto_func_t func;
+ } auth;
+
+ unsigned int idx;
+};
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+
+struct odp_crypto_global_s {
+ odp_spinlock_t lock;
+ odp_crypto_generic_session_t *free;
+ odp_crypto_generic_session_t sessions[MAX_SESSIONS];
+};
+
+static odp_crypto_global_t *global;
+
+typedef struct crypto_local_t {
+ uint8_t buffer[IPSEC_MB_CRYPTO_MAX_DATA_LENGTH];
+ IMB_MGR *mb_mgr;
+} crypto_local_t;
+
+static __thread crypto_local_t local;
+
+static
+odp_crypto_generic_session_t *alloc_session(void)
+{
+ odp_crypto_generic_session_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ if (!session)
+ return NULL;
+
+ session->idx = session - global->sessions;
+
+ return session;
+}
+
+static
+void free_session(odp_crypto_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static odp_crypto_alg_err_t
+null_crypto_routine(odp_packet_t pkt ODP_UNUSED,
+ const odp_crypto_packet_op_param_t *param ODP_UNUSED,
+ odp_crypto_generic_session_t *session ODP_UNUSED)
+{
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t zuc_eea3_cipher_op(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ uint8_t *iv_ptr = param->cipher_iv_ptr;
+ uint32_t in_pos = param->cipher_range.offset;
+ uint32_t in_len = param->cipher_range.length;
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the cipher range. Copy the cipher
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.cipher_key.length == 16) {
+ /* ZUC128 EEA3 */
+ IMB_ZUC_EEA3_1_BUFFER(mb_mgr, session->cipher.key_data,
+ iv_ptr,
+ data,
+ data,
+ in_len);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EEA3 */
+ IMB_ZUC256_EEA3_1_BUFFER(mb_mgr, session->cipher.key_data,
+ iv_ptr,
+ data,
+ data,
+ in_len);
+ }
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ if (odp_unlikely(seg_len < in_len))
+ odp_packet_copy_from_mem(pkt, in_pos, in_len, data);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_zuc_eea3_param(odp_crypto_generic_session_t *session)
+{
+ if (!((16 == session->p.cipher_key.length &&
+ 16 == session->p.cipher_iv_len) ||
+ (32 == session->p.cipher_key.length &&
+ 25 == session->p.cipher_iv_len)))
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ session->cipher.func = zuc_eea3_cipher_op;
+
+ return 0;
+}
+
+static
+odp_crypto_alg_err_t auth_zuc_eia3_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ uint8_t *iv_ptr = param->auth_iv_ptr;
+ uint32_t in_pos = param->auth_range.offset;
+ uint32_t in_len = param->auth_range.length;
+ uint32_t auth_tag;
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the auth range. Copy the auth
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.auth_key.length == 16) {
+ /* ZUC128 EIA3 */
+ IMB_ZUC_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &auth_tag);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EIA3 */
+ IMB_ZUC256_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &auth_tag);
+ }
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len,
+ &auth_tag);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_zuc_eia3_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ uint8_t *iv_ptr = param->auth_iv_ptr;
+ uint32_t in_pos = param->auth_range.offset;
+ uint32_t in_len = param->auth_range.length;
+ uint32_t bytes = ZUC_DIGEST_LENGTH;
+ uint32_t hash_in;
+ uint32_t hash_out;
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, &hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the auth range. Copy the auth
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.auth_key.length == 16) {
+ /* ZUC128 EIA3 */
+ IMB_ZUC_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &hash_out);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EIA3 */
+ IMB_ZUC256_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &hash_out);
+ }
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Verify match */
+ if (hash_in != hash_out)
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_auth_zuc_eia3_param(odp_crypto_generic_session_t *session)
+{
+ if (!((16 == session->p.auth_key.length &&
+ 16 == session->p.auth_iv_len) ||
+ (32 == session->p.auth_key.length &&
+ 25 == session->p.auth_iv_len)))
+ return -1;
+
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_zuc_eia3_gen;
+ else
+ session->auth.func = auth_zuc_eia3_check;
+
+ if (session->p.auth_digest_len != ZUC_DIGEST_LENGTH)
+ return -1;
+
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return 0;
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capa)
+{
+ if (NULL == capa)
+ return -1;
+
+ memset(capa, 0, sizeof(odp_crypto_capability_t));
+
+ capa->sync_mode = ODP_SUPPORT_PREFERRED;
+ capa->async_mode = ODP_SUPPORT_YES;
+ capa->queue_type_plain = 1;
+ capa->queue_type_sched = 1;
+
+ capa->ciphers.bit.null = 1;
+ capa->auths.bit.null = 1;
+
+ capa->ciphers.bit.zuc_eea3 = 1;
+ capa->auths.bit.zuc_eia3 = 1;
+
+ capa->max_sessions = MAX_SESSIONS;
+
+ return 0;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = cipher_capa_null;
+ num = sizeof(cipher_capa_null) / size;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ src = cipher_capa_zuc_eea3;
+ num = sizeof(cipher_capa_zuc_eea3) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = auth_capa_null;
+ num = sizeof(auth_capa_null) / size;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ src = auth_capa_zuc_eia3;
+ num = sizeof(auth_capa_zuc_eia3) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ int rc = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ session = alloc_session();
+ if (NULL == session) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ session->p = *param;
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ session->do_cipher_first = param->auth_cipher_text;
+ else
+ session->do_cipher_first = !param->auth_cipher_text;
+
+ /* Process based on cipher */
+ switch (param->cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ session->cipher.func = null_crypto_routine;
+ rc = 0;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ rc = process_zuc_eea3_param(session);
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on auth */
+ switch (param->auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ session->auth.func = null_crypto_routine;
+ rc = 0;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ rc = process_auth_zuc_eia3_param(session);
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL)
+ free_session(session);
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+ odp_crypto_generic_session_t *generic;
+
+ generic = (odp_crypto_generic_session_t *)(intptr_t)session;
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+#if ODP_DEPRECATED_API
+int
+odp_crypto_operation(odp_crypto_op_param_t *param,
+ odp_bool_t *posted,
+ odp_crypto_op_result_t *result)
+{
+ odp_crypto_packet_op_param_t packet_param;
+ odp_packet_t out_pkt = param->out_pkt;
+ odp_crypto_packet_result_t packet_result;
+ odp_crypto_op_result_t local_result;
+ int rc;
+
+ if (((odp_crypto_generic_session_t *)(intptr_t)param->session)->p.op_type !=
+ ODP_CRYPTO_OP_TYPE_LEGACY)
+ return -1;
+
+ packet_param.session = param->session;
+ packet_param.cipher_iv_ptr = param->cipher_iv_ptr;
+ packet_param.auth_iv_ptr = param->auth_iv_ptr;
+ packet_param.hash_result_offset = param->hash_result_offset;
+ packet_param.aad_ptr = param->aad_ptr;
+ packet_param.cipher_range = param->cipher_range;
+ packet_param.auth_range = param->auth_range;
+
+ rc = odp_crypto_op(&param->pkt, &out_pkt, &packet_param, 1);
+ if (rc <= 0)
+ return -1;
+
+ rc = odp_crypto_result(&packet_result, out_pkt);
+ if (rc < 0) {
+ /*
+ * We cannot fail since odp_crypto_op() has already processed
+ * the packet. Let's indicate error in the result instead.
+ */
+ packet_result.ok = false;
+ }
+
+ /* Indicate to caller operation was sync */
+ *posted = 0;
+
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_BASIC);
+
+ /* Fill in result */
+ local_result.ctx = param->ctx;
+ local_result.pkt = out_pkt;
+ local_result.cipher_status = packet_result.cipher_status;
+ local_result.auth_status = packet_result.auth_status;
+ local_result.ok = packet_result.ok;
+
+ /*
+ * Be bug-to-bug compatible. Return output packet also through params.
+ */
+ param->out_pkt = out_pkt;
+
+ *result = local_result;
+
+ return 0;
+}
+#endif
+
+int _odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(odp_crypto_global_t);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve(ODP_CRYPTO_IPSEC_MB_SHM_NAME, mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (ODP_SHM_INVALID == shm) {
+ _ODP_ERR("unable to allocate crypto pool\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(odp_shm_lookup(ODP_CRYPTO_IPSEC_MB_SHM_NAME));
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for %s\n", ODP_CRYPTO_IPSEC_MB_SHM_NAME);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_crypto_init_local(void)
+{
+ uint64_t flags = 0;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ memset(&local, 0, sizeof(local));
+
+ local.mb_mgr = alloc_mb_mgr(flags);
+ if (local.mb_mgr == NULL)
+ return -1;
+
+ init_mb_mgr_auto(local.mb_mgr, NULL);
+
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ free_mb_mgr(local.mb_mgr);
+ return 0;
+}
+
+#if ODP_DEPRECATED_API
+odp_crypto_compl_t odp_crypto_compl_from_event(odp_event_t ev)
+{
+ /* This check not mandated by the API specification */
+ if (odp_event_type(ev) != ODP_EVENT_CRYPTO_COMPL)
+ _ODP_ABORT("Event not a crypto completion");
+ return (odp_crypto_compl_t)ev;
+}
+
+odp_event_t odp_crypto_compl_to_event(odp_crypto_compl_t completion_event)
+{
+ return (odp_event_t)completion_event;
+}
+
+void
+odp_crypto_compl_result(odp_crypto_compl_t completion_event,
+ odp_crypto_op_result_t *result)
+{
+ (void)completion_event;
+ (void)result;
+
+ /* We won't get such events anyway, so there can be no result */
+ _ODP_ASSERT(0);
+}
+
+void
+odp_crypto_compl_free(odp_crypto_compl_t completion_event)
+{
+ odp_event_t ev = odp_crypto_compl_to_event(completion_event);
+
+ odp_buffer_free(odp_buffer_from_event(ev));
+}
+
+uint64_t odp_crypto_compl_to_u64(odp_crypto_compl_t hdl)
+{
+ return _odp_pri(hdl);
+}
+#endif /* ODP_DEPRECATED_API */
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC))
+ return pkt_in;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+ odp_crypto_packet_result_t *op_result;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
+
+ /* Invoke the crypto function */
+ if (session->do_cipher_first) {
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ rc_auth = session->auth.func(out_pkt, param, session);
+ } else {
+ rc_auth = session->auth.func(out_pkt, param, session);
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ }
+
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = rc_cipher;
+ op_result->cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
+ op_result->auth_status.alg_err = rc_auth;
+ op_result->auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
+ op_result->ok =
+ (rc_cipher == ODP_CRYPTO_ALG_ERR_NONE) &&
+ (rc_auth == ODP_CRYPTO_ALG_ERR_NONE);
+
+ /* Synchronous, simply return results */
+ *pkt_out = out_pkt;
+
+ return 0;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ int i, rc;
+ odp_crypto_generic_session_t *session;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_packet_t pkt;
+ odp_event_t event;
+ odp_crypto_generic_session_t *session;
+ int i, rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
index b9d319861..3ca27ca00 100644
--- a/platform/linux-generic/odp_crypto_null.c
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -201,7 +201,13 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
_ODP_ERR("Crypto is disabled\n");
/* Dummy output to avoid compiler warning about uninitialized
* variables */
- *status = ODP_CRYPTO_SES_CREATE_ERR_ENOMEM;
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
*session_out = ODP_CRYPTO_SESSION_INVALID;
return -1;
}
@@ -209,7 +215,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
/* Allocate memory for this session */
session = alloc_session();
if (NULL == session) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_ENOMEM;
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
goto err;
}
@@ -227,7 +233,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
/* Check result */
if (rc) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_INV_CIPHER;
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
goto err;
}
@@ -242,13 +248,13 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
/* Check result */
if (rc) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_INV_AUTH;
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
goto err;
}
/* We're happy */
*session_out = (intptr_t)session;
- *status = ODP_CRYPTO_SES_CREATE_ERR_NONE;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
return 0;
err:
@@ -281,6 +287,10 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
odp_crypto_op_result_t local_result;
int rc;
+ if (((odp_crypto_generic_session_t *)(intptr_t)param->session)->p.op_type !=
+ ODP_CRYPTO_OP_TYPE_LEGACY)
+ return -1;
+
packet_param.session = param->session;
packet_param.cipher_iv_ptr = param->cipher_iv_ptr;
packet_param.auth_iv_ptr = param->auth_iv_ptr;
@@ -449,43 +459,6 @@ uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
return (uint64_t)hdl;
}
-odp_packet_t odp_crypto_packet_from_event(odp_event_t ev)
-{
- /* This check not mandated by the API specification */
- _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
- _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_CRYPTO);
-
- return odp_packet_from_event(ev);
-}
-
-odp_event_t odp_crypto_packet_to_event(odp_packet_t pkt)
-{
- return odp_packet_to_event(pkt);
-}
-
-static
-odp_crypto_packet_result_t *get_op_result_from_packet(odp_packet_t pkt)
-{
- odp_packet_hdr_t *hdr = packet_hdr(pkt);
-
- return &hdr->crypto_op_result;
-}
-
-int odp_crypto_result(odp_crypto_packet_result_t *result,
- odp_packet_t packet)
-{
- odp_crypto_packet_result_t *op_result;
-
- _ODP_ASSERT(odp_event_subtype(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET_CRYPTO);
-
- op_result = get_op_result_from_packet(packet);
-
- memcpy(result, op_result, sizeof(*result));
-
- return 0;
-}
-
static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
{
int md_copy;
@@ -514,6 +487,9 @@ static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *sessio
{
int rc;
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC))
+ return pkt_in;
+
if (odp_likely(pkt_in == pkt_out))
return pkt_out;
@@ -555,7 +531,7 @@ int crypto_int(odp_packet_t pkt_in,
/* Fill in result */
packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
- op_result = get_op_result_from_packet(out_pkt);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
op_result->cipher_status.alg_err = ODP_CRYPTO_ALG_ERR_NONE;
op_result->cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
op_result->auth_status.alg_err = ODP_CRYPTO_ALG_ERR_NONE;
@@ -603,7 +579,9 @@ int odp_crypto_op_enq(const odp_packet_t pkt_in[],
_ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
_ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
- pkt = pkt_out[i];
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
rc = crypto_int(pkt_in[i], &pkt, &param[i]);
if (rc < 0)
break;
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
index 0f637850f..19925fc09 100644
--- a/platform/linux-generic/odp_crypto_openssl.c
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -217,14 +217,12 @@ struct odp_crypto_generic_session_t {
odp_crypto_session_param_t p;
odp_bool_t do_cipher_first;
+ uint8_t cipher_bit_mode : 1;
+ uint8_t cipher_range_used : 1;
+ uint8_t auth_range_used : 1;
struct {
-#if ODP_DEPRECATED_API
- /* Copy of session IV data */
- uint8_t iv_data[EVP_MAX_IV_LENGTH];
-#endif
uint8_t key_data[EVP_MAX_KEY_LENGTH];
-
const EVP_CIPHER *evp_cipher;
crypto_func_t func;
crypto_init_func_t init;
@@ -232,9 +230,6 @@ struct odp_crypto_generic_session_t {
struct {
uint8_t key[EVP_MAX_KEY_LENGTH];
-#if ODP_DEPRECATED_API
- uint8_t iv_data[EVP_MAX_IV_LENGTH];
-#endif
union {
const EVP_MD *evp_md;
const EVP_CIPHER *evp_cipher;
@@ -710,23 +705,11 @@ int packet_cmac_eia2(odp_packet_t pkt,
uint8_t *hash)
{
CMAC_CTX *ctx = local.cmac_ctx[session->idx];
- void *iv_ptr;
+ void *iv_ptr = param->auth_iv_ptr;
uint32_t offset = param->auth_range.offset;
uint32_t len = param->auth_range.length;
size_t outlen;
-#if ODP_DEPRECATED_API
- if (param->auth_iv_ptr)
- iv_ptr = param->auth_iv_ptr;
- else if (session->p.auth_iv.data)
- iv_ptr = session->auth.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->auth_iv_ptr;
- _ODP_ASSERT(session->p.auth_iv_len == 0 || iv_ptr != NULL);
-#endif
-
_ODP_ASSERT(offset + len <= odp_packet_len(pkt));
/* Reinitialize CMAC calculation without resetting the key */
@@ -1073,22 +1056,9 @@ odp_crypto_alg_err_t cipher_encrypt(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
ret = internal_encrypt(ctx, pkt, param);
@@ -1112,22 +1082,9 @@ odp_crypto_alg_err_t cipher_decrypt(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
ret = internal_decrypt(ctx, pkt, param);
@@ -1172,7 +1129,6 @@ odp_crypto_alg_err_t cipher_encrypt_bits(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = (param->cipher_range.length + 7) / 8;
@@ -1183,18 +1139,7 @@ odp_crypto_alg_err_t cipher_encrypt_bits(odp_packet_t pkt,
/* Range offset is in bits in bit mode but must be divisible by 8. */
offset = param->cipher_range.offset / 8;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
odp_packet_copy_to_mem(pkt, offset, in_len, data);
@@ -1216,7 +1161,6 @@ odp_crypto_alg_err_t cipher_decrypt_bits(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = (param->cipher_range.length + 7) / 8;
@@ -1227,18 +1171,7 @@ odp_crypto_alg_err_t cipher_decrypt_bits(odp_packet_t pkt,
/* Range offset is in bits in bit mode but must be divisible by 8. */
offset = param->cipher_range.offset / 8;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
odp_packet_copy_to_mem(pkt, offset, in_len, data);
@@ -1266,6 +1199,7 @@ static int process_cipher_param_bits(odp_crypto_generic_session_t *session,
session->p.cipher_iv_len)
return -1;
+ session->cipher_bit_mode = 1;
session->cipher.evp_cipher = cipher;
memcpy(session->cipher.key_data, session->p.cipher_key.data,
@@ -1303,24 +1237,11 @@ odp_crypto_alg_err_t aes_gcm_encrypt(odp_packet_t pkt,
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
const uint8_t *aad_head = param->aad_ptr;
uint32_t aad_len = session->p.auth_aad_len;
- void *iv_ptr;
int dummy_len = 0;
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
/* Authenticate header data (if any) without encrypting them */
if (aad_len > 0)
@@ -1359,23 +1280,10 @@ odp_crypto_alg_err_t aes_gcm_decrypt(odp_packet_t pkt,
const uint8_t *aad_head = param->aad_ptr;
uint32_t aad_len = session->p.auth_aad_len;
int dummy_len = 0;
- void *iv_ptr;
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
odp_packet_copy_to_mem(pkt, param->hash_result_offset,
session->p.auth_digest_len, block);
@@ -1440,23 +1348,10 @@ odp_crypto_alg_err_t aes_gmac_gen(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
- void *iv_ptr;
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->auth_iv_ptr)
- iv_ptr = param->auth_iv_ptr;
- else if (session->p.auth_iv.data)
- iv_ptr = session->auth.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->auth_iv_ptr;
- _ODP_ASSERT(session->p.auth_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->auth_iv_ptr);
ret = internal_aad(ctx, pkt, param, true);
@@ -1487,23 +1382,10 @@ odp_crypto_alg_err_t aes_gmac_check(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
- void *iv_ptr;
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->auth_iv_ptr)
- iv_ptr = param->auth_iv_ptr;
- else if (session->p.auth_iv.data)
- iv_ptr = session->auth.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->auth_iv_ptr;
- _ODP_ASSERT(session->p.auth_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->auth_iv_ptr);
odp_packet_copy_to_mem(pkt, param->hash_result_offset,
session->p.auth_digest_len, block);
@@ -1568,7 +1450,6 @@ odp_crypto_alg_err_t aes_ccm_encrypt(odp_packet_t pkt,
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
const uint8_t *aad_head = param->aad_ptr;
uint32_t aad_len = session->p.auth_aad_len;
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = param->cipher_range.length;
@@ -1576,21 +1457,9 @@ odp_crypto_alg_err_t aes_ccm_encrypt(odp_packet_t pkt,
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG,
session->p.auth_digest_len, NULL);
- EVP_EncryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, param->cipher_iv_ptr);
/* Set len */
EVP_EncryptUpdate(ctx, NULL, &dummy_len, NULL, in_len);
@@ -1640,7 +1509,6 @@ odp_crypto_alg_err_t aes_ccm_decrypt(odp_packet_t pkt,
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
const uint8_t *aad_head = param->aad_ptr;
uint32_t aad_len = session->p.auth_aad_len;
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = param->cipher_range.length;
@@ -1648,23 +1516,11 @@ odp_crypto_alg_err_t aes_ccm_decrypt(odp_packet_t pkt,
uint8_t block[EVP_MAX_MD_SIZE];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
odp_packet_copy_to_mem(pkt, param->hash_result_offset,
session->p.auth_digest_len, block);
EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG,
session->p.auth_digest_len, block);
- EVP_DecryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, param->cipher_iv_ptr);
/* Set len */
EVP_DecryptUpdate(ctx, NULL, &dummy_len, NULL, in_len);
@@ -1725,26 +1581,13 @@ odp_crypto_alg_err_t xts_encrypt(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = param->cipher_range.length;
uint8_t data[in_len];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
data);
@@ -1767,26 +1610,13 @@ odp_crypto_alg_err_t xts_decrypt(odp_packet_t pkt,
odp_crypto_generic_session_t *session)
{
EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
- void *iv_ptr;
int dummy_len = 0;
int cipher_len;
uint32_t in_len = param->cipher_range.length;
uint8_t data[in_len];
int ret;
-#if ODP_DEPRECATED_API
- if (param->cipher_iv_ptr)
- iv_ptr = param->cipher_iv_ptr;
- else if (session->p.cipher_iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-#else
- iv_ptr = param->cipher_iv_ptr;
- _ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
-#endif
-
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_ptr);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
data);
@@ -2222,6 +2052,10 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
/* Copy parameters */
session->p = *param;
+ session->cipher_bit_mode = 0;
+ session->auth_range_used = 1;
+ session->cipher_range_used = 1;
+
if (session->p.cipher_iv_len > EVP_MAX_IV_LENGTH) {
_ODP_DBG("Maximum IV length exceeded\n");
*status = ODP_CRYPTO_SES_ERR_CIPHER;
@@ -2234,17 +2068,6 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
goto err;
}
-#if ODP_DEPRECATED_API
- /* Copy IV data */
- if (session->p.cipher_iv.data)
- memcpy(session->cipher.iv_data, session->p.cipher_iv.data,
- session->p.cipher_iv.length);
-
- if (session->p.auth_iv.data)
- memcpy(session->auth.iv_data, session->p.auth_iv.data,
- session->p.auth_iv.length);
-#endif
-
/* Derive order */
if (ODP_CRYPTO_OP_ENCODE == param->op)
session->do_cipher_first = param->auth_cipher_text;
@@ -2256,6 +2079,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
case ODP_CIPHER_ALG_NULL:
session->cipher.func = null_crypto_routine;
session->cipher.init = null_crypto_init_routine;
+ session->cipher_range_used = 0;
rc = 0;
break;
case ODP_CIPHER_ALG_3DES_CBC:
@@ -2376,6 +2200,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
case ODP_AUTH_ALG_NULL:
session->auth.func = null_crypto_routine;
session->auth.init = null_crypto_init_routine;
+ session->auth_range_used = 0;
rc = 0;
break;
case ODP_AUTH_ALG_MD5_HMAC:
@@ -2409,6 +2234,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
} else {
rc = -1;
}
+ session->auth_range_used = 0;
break;
case ODP_AUTH_ALG_AES_GMAC:
if (param->auth_key.length == 16)
@@ -2430,6 +2256,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
} else {
rc = -1;
}
+ session->auth_range_used = 0;
break;
case ODP_AUTH_ALG_AES_CMAC:
if (param->auth_key.length == 16)
@@ -2455,6 +2282,7 @@ odp_crypto_session_create(const odp_crypto_session_param_t *param,
} else {
rc = -1;
}
+ session->auth_range_used = 0;
break;
#endif
case ODP_AUTH_ALG_AES_EIA2:
@@ -2530,6 +2358,10 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
odp_crypto_op_result_t local_result;
int rc;
+ if (((odp_crypto_generic_session_t *)(intptr_t)param->session)->p.op_type !=
+ ODP_CRYPTO_OP_TYPE_LEGACY)
+ return -1;
+
packet_param.session = param->session;
packet_param.cipher_iv_ptr = param->cipher_iv_ptr;
packet_param.auth_iv_ptr = param->auth_iv_ptr;
@@ -2778,43 +2610,6 @@ uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
return (uint64_t)hdl;
}
-odp_packet_t odp_crypto_packet_from_event(odp_event_t ev)
-{
- /* This check not mandated by the API specification */
- _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
- _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_CRYPTO);
-
- return odp_packet_from_event(ev);
-}
-
-odp_event_t odp_crypto_packet_to_event(odp_packet_t pkt)
-{
- return odp_packet_to_event(pkt);
-}
-
-static
-odp_crypto_packet_result_t *get_op_result_from_packet(odp_packet_t pkt)
-{
- odp_packet_hdr_t *hdr = packet_hdr(pkt);
-
- return &hdr->crypto_op_result;
-}
-
-int odp_crypto_result(odp_crypto_packet_result_t *result,
- odp_packet_t packet)
-{
- odp_crypto_packet_result_t *op_result;
-
- _ODP_ASSERT(odp_event_subtype(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET_CRYPTO);
-
- op_result = get_op_result_from_packet(packet);
-
- memcpy(result, op_result, sizeof(*result));
-
- return 0;
-}
-
static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
{
int md_copy;
@@ -2843,6 +2638,9 @@ static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *sessio
{
int rc;
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC))
+ return pkt_in;
+
if (odp_likely(pkt_in == pkt_out))
return pkt_out;
@@ -2894,6 +2692,8 @@ int crypto_int(odp_packet_t pkt_in,
goto out;
}
}
+ _ODP_ASSERT(session->p.cipher_iv_len == 0 || param->cipher_iv_ptr != NULL);
+ _ODP_ASSERT(session->p.auth_iv_len == 0 || param->auth_iv_ptr != NULL);
crypto_init(session);
@@ -2909,7 +2709,7 @@ int crypto_int(odp_packet_t pkt_in,
out:
/* Fill in result */
packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
- op_result = get_op_result_from_packet(out_pkt);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
op_result->cipher_status.alg_err = rc_cipher;
op_result->cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
op_result->auth_status.alg_err = rc_auth;
@@ -2924,6 +2724,113 @@ out:
return 0;
}
+/*
+ * Copy cipher range and auth range from src to dst,
+ * with shifting by dst_offset_shift.
+ */
+static void copy_ranges(odp_packet_t dst,
+ odp_packet_t src,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_packet_data_range_t c_range = param->cipher_range;
+ odp_packet_data_range_t a_range = param->auth_range;
+ int32_t shift = param->dst_offset_shift;
+ int rc;
+
+ if (session->cipher_bit_mode) {
+ c_range.offset /= 8;
+ c_range.length = (c_range.length + 7) / 8;
+ }
+
+ if (session->cipher_range_used) {
+ rc = odp_packet_copy_from_pkt(dst, c_range.offset + shift,
+ src, c_range.offset,
+ c_range.length);
+ if (rc) {
+ _ODP_ERR("cipher range copying failed\n");
+ return;
+ }
+ }
+ if (session->auth_range_used) {
+ rc = odp_packet_copy_from_pkt(dst, a_range.offset + shift,
+ src, a_range.offset,
+ a_range.length);
+ if (rc) {
+ _ODP_ERR("auth range copying failed\n");
+ return;
+ }
+ }
+}
+
+static int crypto_int_oop_encode(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_packet_op_param_t new_param = *param;
+ const uint32_t scale = session->cipher_bit_mode ? 8 : 1;
+
+ copy_ranges(*pkt_out, pkt_in, session, param);
+
+ new_param.cipher_range.offset += param->dst_offset_shift * scale;
+ new_param.auth_range.offset += param->dst_offset_shift;
+
+ return crypto_int(*pkt_out, pkt_out, &new_param);
+}
+
+static int crypto_int_oop_decode(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_packet_t copy;
+ int rc;
+
+ copy = odp_packet_copy(pkt_in, odp_packet_pool(pkt_in));
+ if (copy == ODP_PACKET_INVALID)
+ return -1;
+
+ rc = crypto_int(copy, &copy, param);
+ if (rc < 0) {
+ odp_packet_free(copy);
+ return rc;
+ }
+
+ copy_ranges(*pkt_out, copy, session, param);
+
+ packet_subtype_set(*pkt_out, ODP_EVENT_PACKET_CRYPTO);
+ packet_hdr(*pkt_out)->crypto_op_result = packet_hdr(copy)->crypto_op_result;
+ odp_packet_free(copy);
+
+ return 0;
+}
+
+/*
+ * Slow out-of-place operation implemented using copying and in-place operation
+ */
+static int crypto_int_oop(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_generic_session_t *session;
+ int rc;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (session->p.op == ODP_CRYPTO_OP_ENCODE)
+ rc = crypto_int_oop_encode(pkt_in, pkt_out, session, param);
+ else
+ rc = crypto_int_oop_decode(pkt_in, pkt_out, session, param);
+ if (rc)
+ return rc;
+
+ if (session->p.op_mode == ODP_CRYPTO_ASYNC)
+ packet_hdr(*pkt_out)->crypto_op_result.pkt_in = pkt_in;
+
+ return 0;
+}
+
int odp_crypto_op(const odp_packet_t pkt_in[],
odp_packet_t pkt_out[],
const odp_crypto_packet_op_param_t param[],
@@ -2936,7 +2843,10 @@ int odp_crypto_op(const odp_packet_t pkt_in[],
session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
_ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
- rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ if (odp_unlikely(session->p.op_type == ODP_CRYPTO_OP_TYPE_OOP))
+ rc = crypto_int_oop(pkt_in[i], &pkt_out[i], &param[i]);
+ else
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
if (rc < 0)
break;
}
@@ -2959,8 +2869,13 @@ int odp_crypto_op_enq(const odp_packet_t pkt_in[],
_ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
_ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
- pkt = pkt_out[i];
- rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
+ if (odp_unlikely(session->p.op_type == ODP_CRYPTO_OP_TYPE_OOP))
+ rc = crypto_int_oop(pkt_in[i], &pkt, &param[i]);
+ else
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
if (rc < 0)
break;
diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c
index c4e0f2c9d..edf77e2dc 100644
--- a/platform/linux-generic/odp_event.c
+++ b/platform/linux-generic/odp_event.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2020-2022, Nokia
+ * Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,6 +18,7 @@
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
#include <odp_event_vector_internal.h>
/* Inlined API functions */
@@ -41,13 +42,15 @@ _odp_event_inline_offset ODP_ALIGNED_CACHE = {
#include <odp/visibility_end.h>
-void odp_event_free(odp_event_t event)
+static inline void event_free(odp_event_t event, _odp_ev_id_t id)
{
switch (odp_event_type(event)) {
case ODP_EVENT_BUFFER:
+ _odp_buffer_validate(odp_buffer_from_event(event), id);
odp_buffer_free(odp_buffer_from_event(event));
break;
case ODP_EVENT_PACKET:
+ _odp_packet_validate(odp_packet_from_event(event), id);
odp_packet_free(odp_packet_from_event(event));
break;
case ODP_EVENT_PACKET_VECTOR:
@@ -75,17 +78,21 @@ void odp_event_free(odp_event_t event)
}
}
-void odp_event_free_multi(const odp_event_t event[], int num)
+void odp_event_free(odp_event_t event)
{
- int i;
+ event_free(event, _ODP_EV_EVENT_FREE);
+}
- for (i = 0; i < num; i++)
- odp_event_free(event[i]);
+void odp_event_free_multi(const odp_event_t event[], int num)
+{
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_MULTI);
}
void odp_event_free_sp(const odp_event_t event[], int num)
{
- odp_event_free_multi(event, num);
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_SP);
}
uint64_t odp_event_to_u64(odp_event_t hdl)
@@ -103,9 +110,9 @@ int odp_event_is_valid(odp_event_t event)
switch (odp_event_type(event)) {
case ODP_EVENT_BUFFER:
- /* Fall through */
+ return !_odp_buffer_validate(odp_buffer_from_event(event), _ODP_EV_EVENT_IS_VALID);
case ODP_EVENT_PACKET:
- /* Fall through */
+ return !_odp_packet_validate(odp_packet_from_event(event), _ODP_EV_EVENT_IS_VALID);
case ODP_EVENT_TIMEOUT:
/* Fall through */
#if ODP_DEPRECATED_API
diff --git a/platform/linux-generic/odp_event_validation.c b/platform/linux-generic/odp_event_validation.c
new file mode 100644
index 000000000..c2d430f1a
--- /dev/null
+++ b/platform/linux-generic/odp_event_validation.c
@@ -0,0 +1,260 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/atomic.h>
+#include <odp/api/buffer.h>
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_buffer_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_print_internal.h>
+
+#include <inttypes.h>
+#include <string.h>
+
+#define EVENT_VALIDATION_NONE 0
+#define EVENT_VALIDATION_WARN 1
+#define EVENT_VALIDATION_ABORT 2
+
+#define EVENT_DATA_PRINT_MAX_LEN 128
+
+typedef struct {
+ odp_atomic_u64_t err_count[_ODP_EV_MAX];
+ odp_shm_t shm;
+
+} event_validation_global_t;
+
+typedef struct {
+ const char *str;
+} _odp_ev_info_t;
+
+static event_validation_global_t *_odp_ev_glb;
+
+#if _ODP_EVENT_VALIDATION
+
+/* Table for mapping function IDs to API function names */
+static const _odp_ev_info_t ev_info_tbl[] = {
+ [_ODP_EV_BUFFER_FREE] = {.str = "odp_buffer_free()"},
+ [_ODP_EV_BUFFER_FREE_MULTI] = {.str = "odp_buffer_free_multi()"},
+ [_ODP_EV_BUFFER_IS_VALID] = {.str = "odp_buffer_is_valid()"},
+ [_ODP_EV_EVENT_FREE] = {.str = "odp_event_free()"},
+ [_ODP_EV_EVENT_FREE_MULTI] = {.str = "odp_event_free_multi()"},
+ [_ODP_EV_EVENT_FREE_SP] = {.str = "odp_event_free()_sp"},
+ [_ODP_EV_EVENT_IS_VALID] = {.str = "odp_event_is_valid()"},
+ [_ODP_EV_PACKET_FREE] = {.str = "odp_packet_free()"},
+ [_ODP_EV_PACKET_FREE_MULTI] = {.str = "odp_packet_free_multi()"},
+ [_ODP_EV_PACKET_FREE_SP] = {.str = "odp_packet_free_sp()"},
+ [_ODP_EV_PACKET_IS_VALID] = {.str = "odp_packet_is_valid()"},
+ [_ODP_EV_QUEUE_ENQ] = {.str = "odp_queue_enq()"},
+ [_ODP_EV_QUEUE_ENQ_MULTI] = {.str = "odp_queue_enq_multi()"}
+};
+
+ODP_STATIC_ASSERT(_ODP_ARRAY_SIZE(ev_info_tbl) == _ODP_EV_MAX, "ev_info_tbl missing entries");
+
+static void print_event_data(odp_event_t event, odp_event_type_t type)
+{
+ const char *type_str;
+ const uint32_t bytes_per_row = 16;
+ uint32_t byte_len;
+ int num_rows, max_len, n;
+ int len = 0;
+ uint8_t *data;
+
+ if (type == ODP_EVENT_PACKET) {
+ odp_packet_t pkt = odp_packet_from_event(event);
+
+ data = odp_packet_data(pkt);
+ byte_len = odp_packet_seg_len(pkt);
+ type_str = "Packet";
+ } else {
+ odp_buffer_t buf = odp_buffer_from_event(event);
+
+ data = odp_buffer_addr(buf);
+ byte_len = odp_buffer_size(buf);
+ type_str = "Buffer";
+ }
+
+ if (byte_len > EVENT_DATA_PRINT_MAX_LEN)
+ byte_len = EVENT_DATA_PRINT_MAX_LEN;
+
+ num_rows = (byte_len + bytes_per_row - 1) / bytes_per_row;
+ max_len = 256 + (3 * byte_len) + (3 * num_rows);
+ n = max_len - 1;
+
+ char str[max_len];
+
+ len += _odp_snprint(&str[len], n - len, "%s %p data %p:\n", type_str, event, data);
+ while (byte_len) {
+ uint32_t row_len = byte_len > bytes_per_row ? bytes_per_row : byte_len;
+
+ len += _odp_snprint(&str[len], n - len, " ");
+
+ for (uint32_t i = 0; i < row_len; i++)
+ len += _odp_snprint(&str[len], n - len, " %02x", data[i]);
+
+ len += _odp_snprint(&str[len], n - len, "\n");
+
+ byte_len -= row_len;
+ data += row_len;
+ }
+
+ _ODP_PRINT("%s\n", str);
+}
+
+static inline int validate_event_endmark(odp_event_t event, _odp_ev_id_t id, odp_event_type_t type)
+{
+ uint64_t err_count;
+ uint64_t *endmark_ptr = _odp_event_endmark_get_ptr(event);
+
+ if (odp_likely(*endmark_ptr == _ODP_EV_ENDMARK_VAL))
+ return 0;
+
+ err_count = odp_atomic_fetch_inc_u64(&_odp_ev_glb->err_count[id]) + 1;
+
+ _ODP_ERR("Event %p endmark mismatch in %s: endmark=0x%" PRIx64 " (expected 0x%" PRIx64 ") "
+ "err_count=%" PRIu64 "\n", event, ev_info_tbl[id].str, *endmark_ptr,
+ _ODP_EV_ENDMARK_VAL, err_count);
+
+ print_event_data(event, type);
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_ABORT)
+ _ODP_ABORT("Abort due to event %p endmark mismatch\n", event);
+
+ /* Fix endmark value */
+ _odp_event_endmark_set(event);
+
+ return -1;
+}
+
+static inline int buffer_validate(odp_buffer_t buf, _odp_ev_id_t id)
+{
+ return validate_event_endmark(odp_buffer_to_event(buf), id, ODP_EVENT_BUFFER);
+}
+
+static inline int packet_validate(odp_packet_t pkt, _odp_ev_id_t id)
+{
+ return validate_event_endmark(odp_packet_to_event(pkt), id, ODP_EVENT_PACKET);
+}
+
+static inline int event_validate(odp_event_t event, int id)
+{
+ if (odp_event_type(event) == ODP_EVENT_BUFFER)
+ return buffer_validate(odp_buffer_from_event(event), id);
+ if (odp_event_type(event) == ODP_EVENT_PACKET)
+ return packet_validate(odp_packet_from_event(event), id);
+ return 0;
+}
+
+/* Enable usage from API inline files */
+#include <odp/visibility_begin.h>
+
+int _odp_buffer_validate(odp_buffer_t buf, _odp_ev_id_t id)
+{
+ return buffer_validate(buf, id);
+}
+
+int _odp_buffer_validate_multi(const odp_buffer_t buf[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(buffer_validate(buf[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_packet_validate(odp_packet_t pkt, _odp_ev_id_t id)
+{
+ return packet_validate(pkt, id);
+}
+
+int _odp_packet_validate_multi(const odp_packet_t pkt[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(packet_validate(pkt[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_event_validate(odp_event_t event, _odp_ev_id_t id)
+{
+ return event_validate(event, id);
+}
+
+int _odp_event_validate_multi(const odp_event_t event[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(event_validate(event[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+#include <odp/visibility_end.h>
+
+#endif /* _ODP_EVENT_VALIDATION */
+
+int _odp_event_validation_init_global(void)
+{
+ odp_shm_t shm;
+
+ _ODP_PRINT("\nEvent validation mode: %s\n\n",
+ _ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE ? "none" :
+ _ODP_EVENT_VALIDATION == EVENT_VALIDATION_WARN ? "warn" : "abort");
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE)
+ return 0;
+
+ shm = odp_shm_reserve("_odp_event_validation_global",
+ sizeof(event_validation_global_t),
+ ODP_CACHE_LINE_SIZE, ODP_SHM_EXPORT);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ _odp_ev_glb = odp_shm_addr(shm);
+ if (_odp_ev_glb == NULL)
+ return -1;
+
+ memset(_odp_ev_glb, 0, sizeof(event_validation_global_t));
+ _odp_ev_glb->shm = shm;
+
+ for (int i = 0; i < _ODP_EV_MAX; i++)
+ odp_atomic_init_u64(&_odp_ev_glb->err_count[i], 0);
+
+ return 0;
+}
+
+int _odp_event_validation_term_global(void)
+{
+ int ret;
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE)
+ return 0;
+
+ if (_odp_ev_glb == NULL)
+ return 0;
+
+ ret = odp_shm_free(_odp_ev_glb->shm);
+ if (ret) {
+ _ODP_ERR("SHM free failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 284b3e566..bd27641aa 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -35,6 +35,7 @@ enum init_stage {
HASH_INIT,
THREAD_INIT,
POOL_INIT,
+ EVENT_VALIDATION_INIT,
STASH_INIT,
QUEUE_INIT,
SCHED_INIT,
@@ -242,6 +243,13 @@ static int term_global(enum init_stage stage)
}
/* Fall through */
+ case EVENT_VALIDATION_INIT:
+ if (_odp_event_validation_term_global()) {
+ _ODP_ERR("ODP event validation term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
case POOL_INIT:
if (_odp_pool_term_global()) {
_ODP_ERR("ODP buffer pool term failed.\n");
@@ -412,6 +420,12 @@ int odp_init_global(odp_instance_t *instance,
}
stage = POOL_INIT;
+ if (_odp_event_validation_init_global()) {
+ _ODP_ERR("ODP event validation init failed.\n");
+ goto init_failed;
+ }
+ stage = EVENT_VALIDATION_INIT;
+
if (_odp_stash_init_global()) {
_ODP_ERR("ODP stash init failed.\n");
goto init_failed;
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 1b3a90e6a..4cf265694 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -1,11 +1,12 @@
/* Copyright (c) 2017-2018, Linaro Limited
- * Copyright (c) 2018-2022, Nokia
+ * Copyright (c) 2018-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/atomic.h>
+#include <odp/api/crypto.h>
#include <odp/api/ipsec.h>
#include <odp/api/random.h>
#include <odp/api/shared_memory.h>
@@ -18,7 +19,7 @@
#include <odp_debug_internal.h>
#include <odp_ipsec_internal.h>
#include <odp_macros_internal.h>
-#include <odp_ring_mpmc_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
#include <odp_global_data.h>
#include <string.h>
@@ -102,7 +103,7 @@ typedef struct ODP_ALIGNED_CACHE ipsec_thread_local_s {
typedef struct ipsec_sa_table_t {
ipsec_sa_t ipsec_sa[CONFIG_IPSEC_MAX_NUM_SA];
struct ODP_ALIGNED_CACHE {
- ring_mpmc_t ipv4_id_ring;
+ ring_mpmc_u32_t ipv4_id_ring;
uint32_t ipv4_id_data[IPV4_ID_RING_SIZE] ODP_ALIGNED_CACHE;
} hot;
struct {
@@ -192,7 +193,7 @@ int _odp_ipsec_sad_init_global(void)
ipsec_sa_tbl->shm = shm;
ipsec_sa_tbl->max_num_sa = max_num_sa;
- ring_mpmc_init(&ipsec_sa_tbl->hot.ipv4_id_ring);
+ ring_mpmc_u32_init(&ipsec_sa_tbl->hot.ipv4_id_ring);
for (i = 0; i < thread_count_max; i++) {
/*
* Make the current ID block fully used, forcing allocation
@@ -210,11 +211,11 @@ int _odp_ipsec_sad_init_global(void)
for (i = 0; i < IPV4_ID_RING_SIZE - 1; i++) {
uint32_t data = i * IPV4_ID_BLOCK_SIZE;
- ring_mpmc_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
- ipsec_sa_tbl->hot.ipv4_id_data,
- IPV4_ID_RING_MASK,
- &data,
- 1);
+ ring_mpmc_u32_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
}
for (i = 0; i < ipsec_sa_tbl->max_num_sa; i++) {
@@ -1118,17 +1119,17 @@ uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa)
tl->first_ipv4_id + IPV4_ID_BLOCK_SIZE)) {
/* Return used ID block to the ring */
data = tl->first_ipv4_id;
- ring_mpmc_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
- ipsec_sa_tbl->hot.ipv4_id_data,
- IPV4_ID_RING_MASK,
- &data,
- 1);
+ ring_mpmc_u32_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
/* Get new ID block */
- ring_mpmc_deq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
- ipsec_sa_tbl->hot.ipv4_id_data,
- IPV4_ID_RING_MASK,
- &data,
- 1);
+ ring_mpmc_u32_deq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
tl->first_ipv4_id = data;
tl->next_ipv4_id = data;
}
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index 212e48de0..639a74e0c 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -21,6 +21,7 @@
#include <odp_debug_internal.h>
#include <odp_errno_define.h>
#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
#include <odp_macros_internal.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
@@ -69,6 +70,7 @@ const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
.subtype = offsetof(odp_packet_hdr_t, subtype),
.cls_mark = offsetof(odp_packet_hdr_t, cls_mark),
.ipsec_ctx = offsetof(odp_packet_hdr_t, ipsec_ctx),
+ .crypto_op = offsetof(odp_packet_hdr_t, crypto_op_result),
};
#include <odp/visibility_end.h>
@@ -695,6 +697,8 @@ void odp_packet_free(odp_packet_t pkt)
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
int num_seg = pkt_hdr->seg_count;
+ _odp_packet_validate(pkt, _ODP_EV_PACKET_FREE);
+
_ODP_ASSERT(segment_ref(pkt_hdr) > 0);
if (odp_likely(num_seg == 1))
@@ -703,12 +707,14 @@ void odp_packet_free(odp_packet_t pkt)
free_all_segments(pkt_hdr, num_seg);
}
-void odp_packet_free_multi(const odp_packet_t pkt[], int num)
+static inline void packet_free_multi_ev(const odp_packet_t pkt[], int num, _odp_ev_id_t id)
{
odp_packet_hdr_t *pkt_hdrs[num];
int i;
int num_freed = 0;
+ _odp_packet_validate_multi(pkt, num, id);
+
for (i = 0; i < num; i++) {
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt[i]);
int num_seg = pkt_hdr->seg_count;
@@ -728,9 +734,14 @@ void odp_packet_free_multi(const odp_packet_t pkt[], int num)
packet_free_multi(pkt_hdrs, num - num_freed);
}
+void odp_packet_free_multi(const odp_packet_t pkt[], int num)
+{
+ packet_free_multi_ev(pkt, num, _ODP_EV_PACKET_FREE_MULTI);
+}
+
void odp_packet_free_sp(const odp_packet_t pkt[], int num)
{
- odp_packet_free_multi(pkt, num);
+ packet_free_multi_ev(pkt, num, _ODP_EV_PACKET_FREE_SP);
}
int odp_packet_reset(odp_packet_t pkt, uint32_t len)
@@ -1587,6 +1598,9 @@ int odp_packet_is_valid(odp_packet_t pkt)
if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
+ if (odp_unlikely(_odp_packet_validate(pkt, _ODP_EV_PACKET_IS_VALID)))
+ return 0;
+
switch (odp_event_subtype(ev)) {
case ODP_EVENT_PACKET_BASIC:
/* Fall through */
@@ -2343,6 +2357,7 @@ odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl, odp_packet_buf_t pkt_buf
tailroom = pool->ext_param.pkt.buf_size - sizeof(odp_packet_hdr_t);
tailroom -= pool->ext_param.pkt.app_header_size;
tailroom -= odp_packet_buf_data_len(pkt_buf[num - 1]);
+ tailroom -= pool->trailer_size;
pkt_hdr->seg_count = num;
pkt_hdr->frame_len = data_len;
diff --git a/platform/linux-generic/odp_packet_flags.c b/platform/linux-generic/odp_packet_flags.c
index ef542a2cd..777da12ae 100644
--- a/platform/linux-generic/odp_packet_flags.c
+++ b/platform/linux-generic/odp_packet_flags.c
@@ -72,10 +72,12 @@ void odp_packet_has_jumbo_set(odp_packet_t pkt, int val)
void odp_packet_has_vlan_set(odp_packet_t pkt, int val)
{
setflag(pkt, input_flags.vlan, val);
+ setflag(pkt, input_flags.vlan_qinq, 0);
}
void odp_packet_has_vlan_qinq_set(odp_packet_t pkt, int val)
{
+ setflag(pkt, input_flags.vlan, val);
setflag(pkt, input_flags.vlan_qinq, val);
}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 22b6bc916..5a6c0f460 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -1613,16 +1613,6 @@ odp_time_t odp_pktio_time(odp_pktio_t hdl, odp_time_t *global_ts)
return ts;
}
-uint64_t ODP_DEPRECATE(odp_pktin_ts_res)(odp_pktio_t hdl)
-{
- return odp_pktio_ts_res(hdl);
-}
-
-odp_time_t ODP_DEPRECATE(odp_pktin_ts_from_ns)(odp_pktio_t hdl, uint64_t ns)
-{
- return odp_pktio_ts_from_ns(hdl, ns);
-}
-
void odp_pktio_print(odp_pktio_t hdl)
{
pktio_entry_t *entry;
@@ -2166,8 +2156,7 @@ void odp_pktio_extra_stats_print(odp_pktio_t pktio)
_ODP_PRINT("\n");
}
-int odp_pktin_queue_config(odp_pktio_t pktio,
- const odp_pktin_queue_param_t *param)
+int odp_pktin_queue_config(odp_pktio_t pktio, const odp_pktin_queue_param_t *param)
{
pktio_entry_t *entry;
odp_pktin_mode_t mode;
@@ -2175,7 +2164,7 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
uint32_t num_queues, i;
int rc;
odp_queue_t queue;
- odp_pktin_queue_param_t default_param;
+ odp_pktin_queue_param_t default_param, local_param;
if (param == NULL) {
odp_pktin_queue_param_init(&default_param);
@@ -2204,7 +2193,19 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
return -1;
}
- num_queues = param->classifier_enable ? 1 : param->num_queues;
+ if (param->classifier_enable) {
+ num_queues = 1;
+
+ if (param->num_queues != num_queues) {
+ /* When classifier is enabled, ensure that only one input queue will be
+ * configured by driver. */
+ memcpy(&local_param, param, sizeof(odp_pktin_queue_param_t));
+ local_param.num_queues = num_queues;
+ param = &local_param;
+ }
+ } else {
+ num_queues = param->num_queues;
+ }
rc = odp_pktio_capability(pktio, &capa);
if (rc) {
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index f414b0626..59c007ee2 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,6 +7,7 @@
#include <odp/api/align.h>
#include <odp/api/atomic.h>
+#include <odp/api/hints.h>
#include <odp/api/pool.h>
#include <odp/api/shared_memory.h>
#include <odp/api/system_info.h>
@@ -21,6 +22,8 @@
#include <odp_packet_internal.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
#include <odp_macros_internal.h>
#include <odp_ring_ptr_internal.h>
#include <odp_global_data.h>
@@ -77,6 +80,7 @@ const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
.index = offsetof(pool_t, pool_idx),
.seg_len = offsetof(pool_t, seg_len),
.uarea_size = offsetof(pool_t, param_uarea_size),
+ .trailer_size = offsetof(pool_t, trailer_size),
.ext_head_offset = offsetof(pool_t, ext_head_offset),
.ext_pkt_buf_size = offsetof(pool_t, ext_param.pkt.buf_size)
};
@@ -472,6 +476,7 @@ static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t e
if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET) {
event_hdr->base_data = data_ptr;
event_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+ _odp_event_endmark_set(_odp_event_from_hdr(event_hdr));
}
if (type == ODP_POOL_BUFFER) {
@@ -697,7 +702,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
uint32_t seg_len, align, num, hdr_size, block_size;
- uint32_t max_len, cache_size;
+ uint32_t max_len, cache_size, trailer_size;
uint32_t ring_size;
odp_pool_type_t type = params->type;
uint32_t shmflags = 0;
@@ -743,6 +748,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
tailroom = 0;
seg_len = 0;
max_len = 0;
+ trailer_size = 0;
uarea_size = 0;
cache_size = 0;
@@ -752,6 +758,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
seg_len = params->buf.size;
uarea_size = params->buf.uarea_size;
cache_size = params->buf.cache_size;
+ trailer_size = _ODP_EV_ENDMARK_SIZE;
break;
case ODP_POOL_PACKET:
@@ -763,6 +770,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
num = params->pkt.num;
seg_len = CONFIG_PACKET_MAX_SEG_LEN;
max_len = _odp_pool_glb->config.pkt_max_len;
+ trailer_size = _ODP_EV_ENDMARK_SIZE;
if (params->pkt.len &&
params->pkt.len < CONFIG_PACKET_MAX_SEG_LEN)
@@ -840,7 +848,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
uint32_t adj_size;
hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
- block_size = hdr_size + align + headroom + seg_len + tailroom;
+ block_size = hdr_size + align + headroom + seg_len + tailroom + trailer_size;
adj_size = block_size;
if (pool->mem_src_ops && pool->mem_src_ops->adjust_size) {
@@ -871,7 +879,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
else
hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
- block_size = _ODP_ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len);
+ block_size = _ODP_ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len + trailer_size);
}
/* Allocate extra memory for skipping packet buffers which cross huge
@@ -894,6 +902,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
pool->align = align;
pool->headroom = headroom;
pool->seg_len = seg_len;
+ pool->trailer_size = trailer_size;
pool->max_seg_len = headroom + seg_len + tailroom;
pool->max_len = max_len;
pool->tailroom = tailroom;
@@ -1419,11 +1428,15 @@ int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
void odp_buffer_free(odp_buffer_t buf)
{
+ _odp_buffer_validate(buf, _ODP_EV_BUFFER_FREE);
+
_odp_event_free_multi((_odp_event_hdr_t **)&buf, 1);
}
void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
{
+ _odp_buffer_validate_multi(buf, num, _ODP_EV_BUFFER_FREE_MULTI);
+
_odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)buf, num);
}
@@ -1530,6 +1543,7 @@ void odp_pool_print(odp_pool_t pool_hdl)
_ODP_PRINT(" burst size %u\n", pool->burst_size);
_ODP_PRINT(" mem src %s\n",
pool->mem_src_ops ? pool->mem_src_ops->name : "(none)");
+ _ODP_PRINT(" event valid. %d\n", _ODP_EVENT_VALIDATION);
_ODP_PRINT("\n");
}
@@ -1727,6 +1741,9 @@ int odp_buffer_is_valid(odp_buffer_t buf)
if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
return 0;
+ if (odp_unlikely(_odp_buffer_validate(buf, _ODP_EV_BUFFER_IS_VALID)))
+ return 0;
+
return 1;
}
@@ -1753,7 +1770,7 @@ int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *cap
capa->pkt.max_num_buf = _odp_pool_glb->config.pkt_max_num;
capa->pkt.max_buf_size = MAX_SIZE;
capa->pkt.odp_header_size = sizeof(odp_packet_hdr_t);
- capa->pkt.odp_trailer_size = 0;
+ capa->pkt.odp_trailer_size = _ODP_EV_ENDMARK_SIZE;
capa->pkt.min_mem_align = ODP_CACHE_LINE_SIZE;
capa->pkt.min_buf_align = ODP_CACHE_LINE_SIZE;
capa->pkt.min_head_align = MIN_HEAD_ALIGN;
@@ -1878,7 +1895,9 @@ odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *par
pool->num = num_buf;
pool->headroom = headroom;
pool->tailroom = 0;
- pool->seg_len = buf_size - head_offset - headroom - pool->tailroom;
+ pool->trailer_size = _ODP_EV_ENDMARK_SIZE;
+ pool->seg_len = buf_size - head_offset - headroom - pool->tailroom -
+ pool->trailer_size;
pool->max_seg_len = headroom + pool->seg_len + pool->tailroom;
pool->max_len = PKT_MAX_SEGS * pool->seg_len;
pool->ext_head_offset = head_offset;
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
index 360f907bf..83694f84f 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -1,43 +1,46 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/queue.h>
-#include <odp_queue_basic_internal.h>
-#include <odp_queue_if.h>
-#include <odp/api/std_types.h>
#include <odp/api/align.h>
-#include <odp_pool_internal.h>
-#include <odp_init_internal.h>
-#include <odp_timer_internal.h>
-#include <odp/api/shared_memory.h>
-#include <odp/api/schedule.h>
-#include <odp_schedule_if.h>
-#include <odp_config_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_debug_internal.h>
#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/std_types.h>
#include <odp/api/sync.h>
-#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/ticketlock.h>
#include <odp/api/traffic_mngr.h>
-#include <odp_libconfig_internal.h>
+
#include <odp/api/plat/queue_inline_types.h>
-#include <odp_global_data.h>
-#include <odp_queue_basic_internal.h>
+#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+
+#include <inttypes.h>
+#include <string.h>
-#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->lock))
#define UNLOCK(queue_ptr) odp_ticketlock_unlock(&((queue_ptr)->lock))
#define LOCK_INIT(queue_ptr) odp_ticketlock_init(&((queue_ptr)->lock))
-#include <string.h>
-#include <inttypes.h>
-
#define MIN_QUEUE_SIZE 32
#define MAX_QUEUE_SIZE (1 * 1024 * 1024)
@@ -405,7 +408,7 @@ static int queue_destroy(odp_queue_t handle)
else if (queue->type == ODP_QUEUE_TYPE_SCHED)
empty = ring_st_is_empty(&queue->ring_st);
else
- empty = ring_mpmc_is_empty(&queue->ring_mpmc);
+ empty = ring_mpmc_u32_is_empty(&queue->ring_mpmc);
if (!empty) {
UNLOCK(queue);
@@ -494,7 +497,7 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
{
queue_entry_t *queue;
int ret, num_enq;
- ring_mpmc_t *ring_mpmc;
+ ring_mpmc_u32_t *ring_mpmc;
uint32_t event_idx[num];
queue = qentry_from_handle(handle);
@@ -505,8 +508,8 @@ static inline int _plain_queue_enq_multi(odp_queue_t handle,
event_index_from_hdr(event_idx, event_hdr, num);
- num_enq = ring_mpmc_enq_multi(ring_mpmc, queue->ring_data,
- queue->ring_mask, event_idx, num);
+ num_enq = ring_mpmc_u32_enq_multi(ring_mpmc, queue->ring_data,
+ queue->ring_mask, event_idx, num);
return num_enq;
}
@@ -516,14 +519,14 @@ static inline int _plain_queue_deq_multi(odp_queue_t handle,
{
int num_deq;
queue_entry_t *queue;
- ring_mpmc_t *ring_mpmc;
+ ring_mpmc_u32_t *ring_mpmc;
uint32_t event_idx[num];
queue = qentry_from_handle(handle);
ring_mpmc = &queue->ring_mpmc;
- num_deq = ring_mpmc_deq_multi(ring_mpmc, queue->ring_data,
- queue->ring_mask, event_idx, num);
+ num_deq = ring_mpmc_u32_deq_multi(ring_mpmc, queue->ring_data,
+ queue->ring_mask, event_idx, num);
if (num_deq == 0)
return 0;
@@ -751,7 +754,7 @@ static void queue_print(odp_queue_t handle)
} else {
_ODP_PRINT(" implementation ring_mpmc\n");
_ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
- ring_mpmc_length(&queue->ring_mpmc), queue->ring_mask + 1);
+ ring_mpmc_u32_len(&queue->ring_mpmc), queue->ring_mask + 1);
}
_ODP_PRINT("\n");
@@ -817,7 +820,7 @@ static void queue_print_all(void)
if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
spr = _odp_sched_basic_get_spread(index);
} else {
- len = ring_mpmc_length(&queue->ring_mpmc);
+ len = ring_mpmc_u32_len(&queue->ring_mpmc);
max_len = queue->ring_mask + 1;
}
@@ -1070,7 +1073,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue->ring_data = &_odp_queue_glb->ring_data[offset];
queue->ring_mask = queue_size - 1;
- ring_mpmc_init(&queue->ring_mpmc);
+ ring_mpmc_u32_init(&queue->ring_mpmc);
} else {
queue->enqueue = sched_queue_enq;
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 594360326..3fc9c1c17 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -238,13 +238,13 @@ typedef struct {
struct {
uint8_t burst_default[NUM_SCHED_SYNC][NUM_PRIO];
uint8_t burst_max[NUM_SCHED_SYNC][NUM_PRIO];
+ uint16_t order_stash_size;
uint8_t num_spread;
uint8_t prefer_ratio;
} config;
-
- uint8_t load_balance;
- uint16_t max_spread;
uint32_t ring_mask;
+ uint16_t max_spread;
+ uint8_t load_balance;
odp_atomic_u32_t grp_epoch;
odp_shm_t shm;
odp_ticketlock_t mask_lock[NUM_SCHED_GRPS];
@@ -437,6 +437,20 @@ static int read_config_file(sched_global_t *sched)
if (val == 0 || sched->config.num_spread == 1)
sched->load_balance = 0;
+ str = "sched_basic.order_stash_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > MAX_ORDERED_STASH || val < 0) {
+ _ODP_ERR("Bad value %s = %i [min: 0, max: %u]\n", str, val, MAX_ORDERED_STASH);
+ return -1;
+ }
+
+ sched->config.order_stash_size = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
/* Initialize default values for all queue types */
str = "sched_basic.burst_size_default";
if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC], str, 1,
@@ -1204,7 +1218,7 @@ static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *event_hdr[],
/* Pktout may drop packets, so the operation cannot be stashed. */
if (dst_qentry->pktout.pktio != ODP_PKTIO_INVALID ||
- odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
+ odp_unlikely(stash_num >= sched->config.order_stash_size)) {
/* If the local stash is full, wait until it is our turn and
* then release the stash and do enqueue directly. */
wait_for_order(src_queue);
diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c
index a0e822a53..ef4df3a33 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -8,7 +8,6 @@
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp/api/debug.h>
-#include <odp/api/deprecated.h>
#include <odp/api/std_types.h>
#include <odp/api/shared_memory.h>
#include <odp/api/plat/strong_types.h>
@@ -18,14 +17,8 @@
#include <string.h>
/* Supported ODP_SHM_* flags */
-#if ODP_DEPRECATED_API
- #define DEPRECATED_SHM_FLAGS (ODP_SHM_SW_ONLY)
-#else
- #define DEPRECATED_SHM_FLAGS 0
-#endif
-
#define SUPPORTED_SHM_FLAGS (ODP_SHM_PROC | ODP_SHM_SINGLE_VA | ODP_SHM_EXPORT | \
- ODP_SHM_HP | ODP_SHM_NO_HP | DEPRECATED_SHM_FLAGS)
+ ODP_SHM_HP | ODP_SHM_NO_HP)
static inline uint32_t from_handle(odp_shm_t shm)
{
diff --git a/platform/linux-generic/odp_stash.c b/platform/linux-generic/odp_stash.c
index c7d4136ab..5ff499843 100644
--- a/platform/linux-generic/odp_stash.c
+++ b/platform/linux-generic/odp_stash.c
@@ -1,9 +1,10 @@
-/* Copyright (c) 2020-2022, Nokia
+/* Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
#include <odp/api/shared_memory.h>
#include <odp/api/stash.h>
#include <odp/api/std_types.h>
@@ -15,7 +16,10 @@
#include <odp_debug_internal.h>
#include <odp_global_data.h>
#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
#include <odp_macros_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
+#include <odp_ring_mpmc_u64_internal.h>
#include <odp_ring_u32_internal.h>
#include <odp_ring_u64_internal.h>
@@ -26,18 +30,61 @@
ODP_STATIC_ASSERT(CONFIG_INTERNAL_STASHES < CONFIG_MAX_STASHES, "TOO_MANY_INTERNAL_STASHES");
-#define MAX_RING_SIZE (1024 * 1024)
#define MIN_RING_SIZE 64
+enum {
+ STASH_FREE = 0,
+ STASH_RESERVED,
+ STASH_ACTIVE
+};
+
+typedef struct stash_t stash_t;
+
+typedef void (*ring_u32_init_fn_t)(stash_t *stash);
+typedef int32_t (*ring_u32_enq_multi_fn_t)(stash_t *stash, const uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_enq_batch_fn_t)(stash_t *stash, const uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_deq_multi_fn_t)(stash_t *stash, uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_deq_batch_fn_t)(stash_t *stash, uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_len_fn_t)(stash_t *stash);
+
+typedef void (*ring_u64_init_fn_t)(stash_t *stash);
+typedef int32_t (*ring_u64_enq_multi_fn_t)(stash_t *stash, const uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_enq_batch_fn_t)(stash_t *stash, const uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_deq_multi_fn_t)(stash_t *stash, uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_deq_batch_fn_t)(stash_t *stash, uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_len_fn_t)(stash_t *stash);
+
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
-typedef struct stash_t {
- char name[ODP_STASH_NAME_LEN];
- odp_shm_t shm;
- int index;
+typedef struct ODP_ALIGNED_CACHE stash_t {
+ /* Ring functions */
+ union {
+ struct {
+ ring_u32_enq_multi_fn_t enq_multi;
+ ring_u32_enq_batch_fn_t enq_batch;
+ ring_u32_deq_multi_fn_t deq_multi;
+ ring_u32_deq_batch_fn_t deq_batch;
+ ring_u32_init_fn_t init;
+ ring_u32_len_fn_t len;
+ } u32;
+
+ struct {
+ ring_u64_enq_multi_fn_t enq_multi;
+ ring_u64_enq_batch_fn_t enq_batch;
+ ring_u64_deq_multi_fn_t deq_multi;
+ ring_u64_deq_batch_fn_t deq_batch;
+ ring_u64_init_fn_t init;
+ ring_u64_len_fn_t len;
+ } u64;
+ } ring_fn;
+
uint32_t ring_mask;
+ uint32_t ring_size;
uint32_t obj_size;
+ char name[ODP_STASH_NAME_LEN];
+ int index;
+
/* Ring header followed by variable sized data (object handles) */
union {
struct ODP_ALIGNED_CACHE {
@@ -49,6 +96,16 @@ typedef struct stash_t {
ring_u64_t hdr;
uint64_t data[];
} ring_u64;
+
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_u32_t hdr;
+ uint32_t data[];
+ } ring_mpmc_u32;
+
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_u64_t hdr;
+ uint64_t data[];
+ } ring_mpmc_u64;
};
} stash_t;
@@ -57,23 +114,92 @@ typedef struct stash_t {
typedef struct stash_global_t {
odp_ticketlock_t lock;
odp_shm_t shm;
- uint8_t stash_reserved[CONFIG_MAX_STASHES];
+ uint32_t max_num;
+ uint32_t max_num_obj;
+ uint32_t num_internal;
+ uint8_t strict_size;
+ uint8_t stash_state[CONFIG_MAX_STASHES];
stash_t *stash[CONFIG_MAX_STASHES];
+ uint8_t data[] ODP_ALIGNED_CACHE;
} stash_global_t;
static stash_global_t *stash_global;
+static inline stash_t *stash_entry(odp_stash_t st)
+{
+ return (stash_t *)(uintptr_t)st;
+}
+
+static inline odp_stash_t stash_handle(stash_t *stash)
+{
+ return (odp_stash_t)(uintptr_t)stash;
+}
+
int _odp_stash_init_global(void)
{
odp_shm_t shm;
-
- if (odp_global_ro.disable.stash) {
+ uint32_t max_num, max_num_obj;
+ const char *str;
+ uint64_t ring_max_size, stash_max_size, stash_data_size, offset;
+ const uint32_t internal_stashes = odp_global_ro.disable.dma ? 0 : CONFIG_INTERNAL_STASHES;
+ uint8_t *stash_data;
+ uint8_t strict_size;
+ int val = 0;
+
+ if (odp_global_ro.disable.stash && odp_global_ro.disable.dma) {
_ODP_PRINT("Stash is DISABLED\n");
return 0;
}
- shm = odp_shm_reserve("_odp_stash_global", sizeof(stash_global_t),
+ _ODP_PRINT("Stash config:\n");
+
+ str = "stash.max_num";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+ max_num = val;
+
+ str = "stash.max_num_obj";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+ max_num_obj = val;
+
+ str = "stash.strict_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+ strict_size = !!val;
+
+ _ODP_PRINT("\n");
+
+ /* Reserve resources for implementation internal stashes */
+ if (max_num > CONFIG_MAX_STASHES - internal_stashes) {
+ _ODP_ERR("Maximum supported number of stashes: %d\n",
+ CONFIG_MAX_STASHES - internal_stashes);
+ return -1;
+ }
+ max_num += internal_stashes;
+
+ /* Must have room for minimum sized ring */
+ if (max_num_obj < MIN_RING_SIZE)
+ max_num_obj = MIN_RING_SIZE - 1;
+
+ /* Ring size must be larger than the number of items stored */
+ ring_max_size = _ODP_ROUNDUP_POWER2_U32(max_num_obj + 1);
+
+ stash_max_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(stash_t) +
+ (ring_max_size * sizeof(uint64_t)));
+ stash_data_size = max_num * stash_max_size;
+
+ shm = odp_shm_reserve("_odp_stash_global", sizeof(stash_global_t) + stash_data_size,
ODP_CACHE_LINE_SIZE, 0);
stash_global = odp_shm_addr(shm);
@@ -85,8 +211,21 @@ int _odp_stash_init_global(void)
memset(stash_global, 0, sizeof(stash_global_t));
stash_global->shm = shm;
+ stash_global->max_num = max_num;
+ stash_global->max_num_obj = max_num_obj;
+ stash_global->strict_size = strict_size;
+ stash_global->num_internal = internal_stashes;
odp_ticketlock_init(&stash_global->lock);
+ /* Initialize stash pointers */
+ stash_data = stash_global->data;
+ offset = 0;
+
+ for (uint32_t i = 0; i < max_num; i++) {
+ stash_global->stash[i] = (stash_t *)(uintptr_t)(stash_data + offset);
+ offset += stash_max_size;
+ }
+
return 0;
}
@@ -108,17 +247,21 @@ int _odp_stash_term_global(void)
int odp_stash_capability(odp_stash_capability_t *capa, odp_stash_type_t type)
{
+ uint32_t max_stashes;
+
if (odp_global_ro.disable.stash) {
_ODP_ERR("Stash is disabled\n");
return -1;
}
(void)type;
+ max_stashes = stash_global->max_num - stash_global->num_internal;
+
memset(capa, 0, sizeof(odp_stash_capability_t));
- capa->max_stashes_any_type = CONFIG_MAX_STASHES - CONFIG_INTERNAL_STASHES;
- capa->max_stashes = CONFIG_MAX_STASHES - CONFIG_INTERNAL_STASHES;
- capa->max_num_obj = MAX_RING_SIZE;
+ capa->max_stashes_any_type = max_stashes;
+ capa->max_stashes = max_stashes;
+ capa->max_num_obj = stash_global->max_num_obj;
capa->max_obj_size = sizeof(uint64_t);
capa->max_get_batch = MIN_RING_SIZE;
capa->max_put_batch = MIN_RING_SIZE;
@@ -137,15 +280,14 @@ void odp_stash_param_init(odp_stash_param_t *param)
static int reserve_index(void)
{
- int i;
int index = -1;
odp_ticketlock_lock(&stash_global->lock);
- for (i = 0; i < CONFIG_MAX_STASHES; i++) {
- if (stash_global->stash_reserved[i] == 0) {
+ for (uint32_t i = 0; i < stash_global->max_num; i++) {
+ if (stash_global->stash_state[i] == STASH_FREE) {
index = i;
- stash_global->stash_reserved[i] = 1;
+ stash_global->stash_state[i] = STASH_RESERVED;
break;
}
}
@@ -159,20 +301,152 @@ static void free_index(int i)
{
odp_ticketlock_lock(&stash_global->lock);
- stash_global->stash[i] = NULL;
- stash_global->stash_reserved[i] = 0;
+ stash_global->stash_state[i] = STASH_FREE;
odp_ticketlock_unlock(&stash_global->lock);
}
+static inline void strict_ring_u32_init(stash_t *stash)
+{
+ ring_u32_init(&stash->ring_u32.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_u32.data[i] = 0;
+}
+
+static inline void strict_ring_u64_init(stash_t *stash)
+{
+ ring_u64_init(&stash->ring_u64.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_u64.data[i] = 0;
+}
+
+static inline int32_t strict_ring_u32_enq_multi(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ /* Success always */
+ ring_u32_enq_multi(&stash->ring_u32.hdr, stash->ring_mask, (uint32_t *)(uintptr_t)val, num);
+
+ return num;
+}
+
+static inline int32_t strict_ring_u64_enq_multi(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ /* Success always */
+ ring_u64_enq_multi(&stash->ring_u64.hdr, stash->ring_mask, (uint64_t *)(uintptr_t)val, num);
+
+ return num;
+}
+
+static inline int32_t strict_ring_u32_deq_multi(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_u32_deq_multi(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u64_deq_multi(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_u64_deq_multi(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u32_deq_batch(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u64_deq_batch(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u32_len(stash_t *stash)
+{
+ return ring_u32_len(&stash->ring_u32.hdr);
+}
+
+static inline int32_t strict_ring_u64_len(stash_t *stash)
+{
+ return ring_u64_len(&stash->ring_u64.hdr);
+}
+
+static inline void mpmc_ring_u32_init(stash_t *stash)
+{
+ ring_mpmc_u32_init(&stash->ring_mpmc_u32.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_mpmc_u32.data[i] = 0;
+}
+
+static inline void mpmc_ring_u64_init(stash_t *stash)
+{
+ ring_mpmc_u64_init(&stash->ring_mpmc_u64.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_mpmc_u64.data[i] = 0;
+}
+
+static inline int32_t mpmc_ring_u32_enq_multi(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_enq_multi(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_enq_multi(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_enq_multi(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_enq_batch(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_enq_batch(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_enq_batch(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_enq_batch(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_deq_multi(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_deq_multi(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_deq_multi(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_deq_multi(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_deq_batch(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_deq_batch(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_deq_batch(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_deq_batch(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_len(stash_t *stash)
+{
+ return ring_mpmc_u32_len(&stash->ring_mpmc_u32.hdr);
+}
+
+static inline int32_t mpmc_ring_u64_len(stash_t *stash)
+{
+ return ring_mpmc_u64_len(&stash->ring_mpmc_u64.hdr);
+}
+
odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
{
- odp_shm_t shm;
stash_t *stash;
- uint64_t i, ring_size, shm_size;
+ uint64_t ring_size;
int ring_u64, index;
- char shm_name[ODP_STASH_NAME_LEN + 8];
- uint32_t shm_flags = 0;
if (odp_global_ro.disable.stash) {
_ODP_ERR("Stash is disabled\n");
@@ -184,7 +458,7 @@ odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
return ODP_STASH_INVALID;
}
- if (param->num_obj > MAX_RING_SIZE) {
+ if (param->num_obj > stash_global->max_num_obj) {
_ODP_ERR("Too many objects.\n");
return ODP_STASH_INVALID;
}
@@ -213,75 +487,71 @@ odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
else
ring_size = _ODP_ROUNDUP_POWER2_U32(ring_size + 1);
- memset(shm_name, 0, sizeof(shm_name));
- snprintf(shm_name, sizeof(shm_name) - 1, "_stash_%s", name);
-
- if (ring_u64)
- shm_size = sizeof(stash_t) + (ring_size * sizeof(uint64_t));
- else
- shm_size = sizeof(stash_t) + (ring_size * sizeof(uint32_t));
-
- if (odp_global_ro.shm_single_va)
- shm_flags |= ODP_SHM_SINGLE_VA;
-
- shm = odp_shm_reserve(shm_name, shm_size, ODP_CACHE_LINE_SIZE, shm_flags);
-
- if (shm == ODP_SHM_INVALID) {
- _ODP_ERR("SHM reserve failed.\n");
- free_index(index);
- return ODP_STASH_INVALID;
- }
-
- stash = odp_shm_addr(shm);
+ stash = stash_global->stash[index];
memset(stash, 0, sizeof(stash_t));
- if (ring_u64) {
- ring_u64_init(&stash->ring_u64.hdr);
-
- for (i = 0; i < ring_size; i++)
- stash->ring_u64.data[i] = 0;
+ /* Set ring function pointers */
+ if (stash_global->strict_size) {
+ if (ring_u64) {
+ stash->ring_fn.u64.init = strict_ring_u64_init;
+ stash->ring_fn.u64.enq_multi = strict_ring_u64_enq_multi;
+ stash->ring_fn.u64.enq_batch = strict_ring_u64_enq_multi;
+ stash->ring_fn.u64.deq_multi = strict_ring_u64_deq_multi;
+ stash->ring_fn.u64.deq_batch = strict_ring_u64_deq_batch;
+ stash->ring_fn.u64.len = strict_ring_u64_len;
+ } else {
+ stash->ring_fn.u32.init = strict_ring_u32_init;
+ stash->ring_fn.u32.enq_multi = strict_ring_u32_enq_multi;
+ stash->ring_fn.u32.enq_batch = strict_ring_u32_enq_multi;
+ stash->ring_fn.u32.deq_multi = strict_ring_u32_deq_multi;
+ stash->ring_fn.u32.deq_batch = strict_ring_u32_deq_batch;
+ stash->ring_fn.u32.len = strict_ring_u32_len;
+ }
} else {
- ring_u32_init(&stash->ring_u32.hdr);
-
- for (i = 0; i < ring_size; i++)
- stash->ring_u32.data[i] = 0;
+ if (ring_u64) {
+ stash->ring_fn.u64.init = mpmc_ring_u64_init;
+ stash->ring_fn.u64.enq_multi = mpmc_ring_u64_enq_multi;
+ stash->ring_fn.u64.enq_batch = mpmc_ring_u64_enq_batch;
+ stash->ring_fn.u64.deq_multi = mpmc_ring_u64_deq_multi;
+ stash->ring_fn.u64.deq_batch = mpmc_ring_u64_deq_batch;
+ stash->ring_fn.u64.len = mpmc_ring_u64_len;
+ } else {
+ stash->ring_fn.u32.init = mpmc_ring_u32_init;
+ stash->ring_fn.u32.enq_multi = mpmc_ring_u32_enq_multi;
+ stash->ring_fn.u32.enq_batch = mpmc_ring_u32_enq_batch;
+ stash->ring_fn.u32.deq_multi = mpmc_ring_u32_deq_multi;
+ stash->ring_fn.u32.deq_batch = mpmc_ring_u32_deq_batch;
+ stash->ring_fn.u32.len = mpmc_ring_u32_len;
+ }
}
if (name)
strcpy(stash->name, name);
stash->index = index;
- stash->shm = shm;
stash->obj_size = param->obj_size;
stash->ring_mask = ring_size - 1;
+ stash->ring_size = ring_size;
+
+ if (ring_u64)
+ stash->ring_fn.u64.init(stash);
+ else
+ stash->ring_fn.u32.init(stash);
/* This makes stash visible to lookups */
odp_ticketlock_lock(&stash_global->lock);
- stash_global->stash[index] = stash;
+ stash_global->stash_state[index] = STASH_ACTIVE;
odp_ticketlock_unlock(&stash_global->lock);
- return (odp_stash_t)stash;
+ return stash_handle(stash);
}
int odp_stash_destroy(odp_stash_t st)
{
- stash_t *stash;
- int index;
- odp_shm_t shm;
-
if (st == ODP_STASH_INVALID)
return -1;
- stash = (stash_t *)(uintptr_t)st;
- index = stash->index;
- shm = stash->shm;
-
- free_index(index);
-
- if (odp_shm_free(shm)) {
- _ODP_ERR("SHM free failed.\n");
- return -1;
- }
+ free_index(stash_entry(st)->index);
return 0;
}
@@ -293,7 +563,6 @@ uint64_t odp_stash_to_u64(odp_stash_t st)
odp_stash_t odp_stash_lookup(const char *name)
{
- int i;
stash_t *stash;
if (name == NULL)
@@ -301,12 +570,13 @@ odp_stash_t odp_stash_lookup(const char *name)
odp_ticketlock_lock(&stash_global->lock);
- for (i = 0; i < CONFIG_MAX_STASHES; i++) {
+ for (uint32_t i = 0; i < stash_global->max_num; i++) {
stash = stash_global->stash[i];
- if (stash && strcmp(stash->name, name) == 0) {
+ if (stash_global->stash_state[i] == STASH_ACTIVE &&
+ strcmp(stash->name, name) == 0) {
odp_ticketlock_unlock(&stash_global->lock);
- return (odp_stash_t)stash;
+ return stash_handle(stash);
}
}
@@ -315,57 +585,51 @@ odp_stash_t odp_stash_lookup(const char *name)
return ODP_STASH_INVALID;
}
-static inline int32_t stash_put(odp_stash_t st, const void *obj, int32_t num)
+static inline int32_t stash_put(odp_stash_t st, const void *obj, int32_t num, odp_bool_t is_batch)
{
- stash_t *stash;
+ int32_t (*ring_u32_enq)(stash_t *stash, const uint32_t val[], int32_t num);
+ int32_t (*ring_u64_enq)(stash_t *stash, const uint64_t val[], int32_t num);
+ stash_t *stash = stash_entry(st);
uint32_t obj_size;
int32_t i;
- stash = (stash_t *)(uintptr_t)st;
-
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
- obj_size = stash->obj_size;
-
- if (obj_size == sizeof(uint64_t)) {
- ring_u64_t *ring_u64 = &stash->ring_u64.hdr;
-
- ring_u64_enq_multi(ring_u64, stash->ring_mask,
- (uint64_t *)(uintptr_t)obj, num);
- return num;
+ if (is_batch) {
+ ring_u32_enq = stash->ring_fn.u32.enq_batch;
+ ring_u64_enq = stash->ring_fn.u64.enq_batch;
+ } else {
+ ring_u32_enq = stash->ring_fn.u32.enq_multi;
+ ring_u64_enq = stash->ring_fn.u64.enq_multi;
}
- if (obj_size == sizeof(uint32_t)) {
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
+ obj_size = stash->obj_size;
- ring_u32_enq_multi(ring_u32, stash->ring_mask,
- (uint32_t *)(uintptr_t)obj, num);
- return num;
- }
+ if (obj_size == sizeof(uint64_t))
+ return ring_u64_enq(stash, (uint64_t *)(uintptr_t)obj, num);
+
+ if (obj_size == sizeof(uint32_t))
+ return ring_u32_enq(stash, (uint32_t *)(uintptr_t)obj, num);
if (obj_size == sizeof(uint16_t)) {
const uint16_t *u16_ptr = obj;
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
for (i = 0; i < num; i++)
u32[i] = u16_ptr[i];
- ring_u32_enq_multi(ring_u32, stash->ring_mask, u32, num);
- return num;
+ return ring_u32_enq(stash, u32, num);
}
if (obj_size == sizeof(uint8_t)) {
const uint8_t *u8_ptr = obj;
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
for (i = 0; i < num; i++)
u32[i] = u8_ptr[i];
- ring_u32_enq_multi(ring_u32, stash->ring_mask, u32, num);
- return num;
+ return ring_u32_enq(stash, u32, num);
}
return -1;
@@ -373,73 +637,66 @@ static inline int32_t stash_put(odp_stash_t st, const void *obj, int32_t num)
int32_t odp_stash_put(odp_stash_t st, const void *obj, int32_t num)
{
- return stash_put(st, obj, num);
+ return stash_put(st, obj, num, false);
}
int32_t odp_stash_put_batch(odp_stash_t st, const void *obj, int32_t num)
{
- /* Returns always 'num', or -1 on failure. */
- return stash_put(st, obj, num);
+ return stash_put(st, obj, num, true);
}
-static inline int32_t stash_put_u32(odp_stash_t st, const uint32_t val[],
- int32_t num)
+int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
- ring_u32_enq_multi(&stash->ring_u32.hdr, stash->ring_mask,
- (uint32_t *)(uintptr_t)val, num);
- return num;
+ return stash->ring_fn.u32.enq_multi(stash, val, num);
}
-int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
+int32_t odp_stash_put_u32_batch(odp_stash_t st, const uint32_t val[], int32_t num)
{
- return stash_put_u32(st, val, num);
-}
+ stash_t *stash = stash_entry(st);
-int32_t odp_stash_put_u32_batch(odp_stash_t st, const uint32_t val[],
- int32_t num)
-{
- /* Returns always 'num', or -1 on failure. */
- return stash_put_u32(st, val, num);
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return stash->ring_fn.u32.enq_batch(stash, val, num);
}
-static inline int32_t stash_put_u64(odp_stash_t st, const uint64_t val[],
- int32_t num)
+int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
- ring_u64_enq_multi(&stash->ring_u64.hdr, stash->ring_mask,
- (uint64_t *)(uintptr_t)val, num);
- return num;
-}
-
-int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
-{
- return stash_put_u64(st, val, num);
+ return stash->ring_fn.u64.enq_multi(stash, (uint64_t *)(uintptr_t)val, num);
}
int32_t odp_stash_put_u64_batch(odp_stash_t st, const uint64_t val[],
int32_t num)
{
- /* Returns always 'num', or -1 on failure. */
- return stash_put_u64(st, val, num);
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return stash->ring_fn.u64.enq_batch(stash, (uint64_t *)(uintptr_t)val, num);
}
-static inline int32_t stash_put_ptr(odp_stash_t st, const uintptr_t ptr[],
- int32_t num)
+int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
@@ -447,69 +704,65 @@ static inline int32_t stash_put_ptr(odp_stash_t st, const uintptr_t ptr[],
_ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
if (sizeof(uintptr_t) == sizeof(uint32_t))
- ring_u32_enq_multi(&stash->ring_u32.hdr, stash->ring_mask,
- (uint32_t *)(uintptr_t)ptr, num);
- else if (sizeof(uintptr_t) == sizeof(uint64_t))
- ring_u64_enq_multi(&stash->ring_u64.hdr, stash->ring_mask,
- (uint64_t *)(uintptr_t)ptr, num);
- else
- return -1;
+ return stash->ring_fn.u32.enq_multi(stash, (uint32_t *)(uintptr_t)ptr, num);
- return num;
-}
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.enq_multi(stash, (uint64_t *)(uintptr_t)ptr, num);
-int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
-{
- return stash_put_ptr(st, ptr, num);
+ return -1;
}
int32_t odp_stash_put_ptr_batch(odp_stash_t st, const uintptr_t ptr[],
int32_t num)
{
- /* Returns always 'num', or -1 on failure. */
- return stash_put_ptr(st, ptr, num);
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return stash->ring_fn.u32.enq_batch(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.enq_batch(stash, (uint64_t *)(uintptr_t)ptr, num);
+
+ return -1;
}
-static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool_t batch)
+static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool_t is_batch)
{
- stash_t *stash;
+ int32_t (*ring_u32_deq)(stash_t *stash, uint32_t val[], int32_t num);
+ int32_t (*ring_u64_deq)(stash_t *stash, uint64_t val[], int32_t num);
+ stash_t *stash = stash_entry(st);
uint32_t obj_size;
uint32_t i, num_deq;
- stash = (stash_t *)(uintptr_t)st;
-
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
- obj_size = stash->obj_size;
-
- if (obj_size == sizeof(uint64_t)) {
- ring_u64_t *ring_u64 = &stash->ring_u64.hdr;
-
- if (batch)
- return ring_u64_deq_batch(ring_u64, stash->ring_mask, obj, num);
- else
- return ring_u64_deq_multi(ring_u64, stash->ring_mask, obj, num);
+ if (is_batch) {
+ ring_u32_deq = stash->ring_fn.u32.deq_batch;
+ ring_u64_deq = stash->ring_fn.u64.deq_batch;
+ } else {
+ ring_u32_deq = stash->ring_fn.u32.deq_multi;
+ ring_u64_deq = stash->ring_fn.u64.deq_multi;
}
- if (obj_size == sizeof(uint32_t)) {
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
+ obj_size = stash->obj_size;
+
+ if (obj_size == sizeof(uint64_t))
+ return ring_u64_deq(stash, obj, num);
- if (batch)
- return ring_u32_deq_batch(ring_u32, stash->ring_mask, obj, num);
- else
- return ring_u32_deq_multi(ring_u32, stash->ring_mask, obj, num);
- }
+ if (obj_size == sizeof(uint32_t))
+ return ring_u32_deq(stash, obj, num);
if (obj_size == sizeof(uint16_t)) {
uint16_t *u16_ptr = obj;
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
- if (batch)
- num_deq = ring_u32_deq_batch(ring_u32, stash->ring_mask, u32, num);
- else
- num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask, u32, num);
+ num_deq = ring_u32_deq(stash, u32, num);
for (i = 0; i < num_deq; i++)
u16_ptr[i] = u32[i];
@@ -519,13 +772,9 @@ static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool
if (obj_size == sizeof(uint8_t)) {
uint8_t *u8_ptr = obj;
- ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
- if (batch)
- num_deq = ring_u32_deq_batch(ring_u32, stash->ring_mask, u32, num);
- else
- num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask, u32, num);
+ num_deq = ring_u32_deq(stash, u32, num);
for (i = 0; i < num_deq; i++)
u8_ptr[i] = u32[i];
@@ -538,67 +787,65 @@ static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool
int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
{
- return stash_get(st, obj, num, 0);
+ return stash_get(st, obj, num, false);
}
int32_t odp_stash_get_batch(odp_stash_t st, void *obj, int32_t num)
{
- return stash_get(st, obj, num, 1);
+ return stash_get(st, obj, num, true);
}
int32_t odp_stash_get_u32(odp_stash_t st, uint32_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
- return ring_u32_deq_multi(&stash->ring_u32.hdr, stash->ring_mask, val,
- num);
+ return stash->ring_fn.u32.deq_multi(stash, val, num);
}
int32_t odp_stash_get_u32_batch(odp_stash_t st, uint32_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
- return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+ return stash->ring_fn.u32.deq_batch(stash, val, num);
}
int32_t odp_stash_get_u64(odp_stash_t st, uint64_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
- return ring_u64_deq_multi(&stash->ring_u64.hdr, stash->ring_mask, val,
- num);
+ return stash->ring_fn.u64.deq_multi(stash, val, num);
}
int32_t odp_stash_get_u64_batch(odp_stash_t st, uint64_t val[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
_ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
- return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+ return stash->ring_fn.u64.deq_batch(stash, val, num);
}
int32_t odp_stash_get_ptr(odp_stash_t st, uintptr_t ptr[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
@@ -606,19 +853,17 @@ int32_t odp_stash_get_ptr(odp_stash_t st, uintptr_t ptr[], int32_t num)
_ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
if (sizeof(uintptr_t) == sizeof(uint32_t))
- return ring_u32_deq_multi(&stash->ring_u32.hdr,
- stash->ring_mask,
- (uint32_t *)(uintptr_t)ptr, num);
- else if (sizeof(uintptr_t) == sizeof(uint64_t))
- return ring_u64_deq_multi(&stash->ring_u64.hdr,
- stash->ring_mask,
- (uint64_t *)(uintptr_t)ptr, num);
+ return stash->ring_fn.u32.deq_multi(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.deq_multi(stash, (uint64_t *)(uintptr_t)ptr, num);
+
return -1;
}
int32_t odp_stash_get_ptr_batch(odp_stash_t st, uintptr_t ptr[], int32_t num)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (odp_unlikely(st == ODP_STASH_INVALID))
return -1;
@@ -626,11 +871,11 @@ int32_t odp_stash_get_ptr_batch(odp_stash_t st, uintptr_t ptr[], int32_t num)
_ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
if (sizeof(uintptr_t) == sizeof(uint32_t))
- return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask,
- (uint32_t *)(uintptr_t)ptr, num);
- else if (sizeof(uintptr_t) == sizeof(uint64_t))
- return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask,
- (uint64_t *)(uintptr_t)ptr, num);
+ return stash->ring_fn.u32.deq_batch(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.deq_batch(stash, (uint64_t *)(uintptr_t)ptr, num);
+
return -1;
}
@@ -644,23 +889,17 @@ int odp_stash_flush_cache(odp_stash_t st)
static uint32_t stash_obj_count(stash_t *stash)
{
- ring_u32_t *ring_u32;
uint32_t obj_size = stash->obj_size;
- if (obj_size == sizeof(uint64_t)) {
- ring_u64_t *ring_u64 = &stash->ring_u64.hdr;
-
- return ring_u64_len(ring_u64);
- }
-
- ring_u32 = &stash->ring_u32.hdr;
+ if (obj_size == sizeof(uint64_t))
+ return stash->ring_fn.u64.len(stash);
- return ring_u32_len(ring_u32);
+ return stash->ring_fn.u32.len(stash);
}
void odp_stash_print(odp_stash_t st)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (st == ODP_STASH_INVALID) {
_ODP_ERR("Bad stash handle\n");
@@ -674,13 +913,13 @@ void odp_stash_print(odp_stash_t st)
_ODP_PRINT(" index %i\n", stash->index);
_ODP_PRINT(" obj size %u\n", stash->obj_size);
_ODP_PRINT(" obj count %u\n", stash_obj_count(stash));
- _ODP_PRINT(" ring size %u\n", stash->ring_mask + 1);
+ _ODP_PRINT(" ring size %u\n", stash->ring_size);
_ODP_PRINT("\n");
}
int odp_stash_stats(odp_stash_t st, odp_stash_stats_t *stats)
{
- stash_t *stash = (stash_t *)(uintptr_t)st;
+ stash_t *stash = stash_entry(st);
if (st == ODP_STASH_INVALID) {
_ODP_ERR("Bad stash handle\n");
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index 05660eb7a..8597c3f4e 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -17,6 +17,7 @@
#include <odp/api/atomic.h>
#include <odp/api/cpu.h>
#include <odp/api/debug.h>
+#include <odp/api/deprecated.h>
#include <odp/api/event.h>
#include <odp/api/hints.h>
#include <odp/api/pool.h>
@@ -1494,9 +1495,7 @@ odp_event_t odp_timer_free(odp_timer_t hdl)
return timer_free(tp, idx);
}
-int odp_timer_set_abs(odp_timer_t hdl,
- uint64_t abs_tck,
- odp_event_t *tmo_ev)
+int ODP_DEPRECATE(odp_timer_set_abs)(odp_timer_t hdl, uint64_t abs_tck, odp_event_t *tmo_ev)
{
timer_pool_t *tp = handle_to_tp(hdl);
uint64_t cur_tick = current_nsec(tp);
@@ -1512,9 +1511,7 @@ int odp_timer_set_abs(odp_timer_t hdl,
return ODP_TIMER_FAIL;
}
-int odp_timer_set_rel(odp_timer_t hdl,
- uint64_t rel_tck,
- odp_event_t *tmo_ev)
+int ODP_DEPRECATE(odp_timer_set_rel)(odp_timer_t hdl, uint64_t rel_tck, odp_event_t *tmo_ev)
{
timer_pool_t *tp = handle_to_tp(hdl);
uint64_t cur_tick = current_nsec(tp);
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index 284a6f7f5..c8d8bc5c0 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -2665,12 +2665,6 @@ static int tm_capabilities(odp_tm_capabilities_t capabilities[],
return 1;
}
-int ODP_DEPRECATE(odp_tm_capabilities)(odp_tm_capabilities_t capabilities[],
- uint32_t capabilities_size)
-{
- return tm_capabilities(capabilities, capabilities_size);
-}
-
int odp_tm_egress_capabilities(odp_tm_capabilities_t *capabilities,
const odp_tm_egress_t *egress)
{
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 7e9db90ce..fa40d1bde 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -49,6 +49,7 @@
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_version.h>
+#include <rte_vfio.h>
/* NUMA is not supported on all platforms */
#ifdef _ODP_HAVE_NUMA_LIBRARY
@@ -438,6 +439,15 @@ static struct rte_mempool *mbuf_pool_create(const char *name,
goto fail;
}
+ /* Map pages for DMA access to enable VFIO usage */
+ for (uint64_t i = 0; i < pool_entry->shm_size; i += page_size) {
+ addr = pool_entry->base_addr + i;
+
+ rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ (uint64_t)(uintptr_t)addr,
+ rte_mem_virt2iova(addr), page_size);
+ }
+
rte_mempool_obj_iter(mp, pktmbuf_init, NULL);
return mp;
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index b30535f22..a69daf56b 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2013-2022, Nokia Solutions and Networks
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,11 +7,11 @@
#include <odp/api/debug.h>
#include <odp/api/event.h>
+#include <odp/api/hash.h>
#include <odp/api/hints.h>
#include <odp/api/packet.h>
#include <odp/api/packet_io.h>
#include <odp/api/queue.h>
-#include <odp/api/ticketlock.h>
#include <odp/api/time.h>
#include <odp/api/plat/byteorder_inlines.h>
@@ -39,6 +39,9 @@
#include <stdint.h>
#include <stdlib.h>
+#define MAX_QUEUES (ODP_PKTIN_MAX_QUEUES > ODP_PKTOUT_MAX_QUEUES ? \
+ ODP_PKTIN_MAX_QUEUES : ODP_PKTOUT_MAX_QUEUES)
+
#define MAX_LOOP 16
#define LOOP_MTU_MIN 68
@@ -47,12 +50,40 @@
#define LOOP_MAX_QUEUE_SIZE 1024
typedef struct {
- odp_queue_t loopq; /**< loopback queue for "loop" device */
- uint32_t pktin_queue_size; /**< input queue size */
- uint32_t pktout_queue_size; /**< output queue size */
- uint16_t mtu; /**< link MTU */
- uint8_t idx; /**< index of "loop" device */
- uint8_t queue_create; /**< create or re-create queue during start */
+ odp_atomic_u64_t in_octets;
+ odp_atomic_u64_t in_packets;
+ odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t in_errors;
+ odp_atomic_u64_t out_octets;
+ odp_atomic_u64_t out_packets;
+} stats_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ /* queue handle as the "wire" */
+ odp_queue_t queue;
+ /* queue specific statistics */
+ stats_t stats;
+ /* config input queue size */
+ uint32_t in_size;
+ /* config output queue size */
+ uint32_t out_size;
+} loop_queue_t;
+
+typedef struct {
+ /* loopback entries for "loop" device */
+ loop_queue_t loopqs[MAX_QUEUES];
+ /* hash config */
+ odp_pktin_hash_proto_t hash;
+ /* config queue count */
+ uint32_t num_conf_qs;
+ /* actual number queues */
+ uint32_t num_qs;
+ /* link MTU */
+ uint16_t mtu;
+ /* index of "loop" device */
+ uint8_t idx;
+ /* create or re-create queue during start */
+ uint8_t queue_create;
} pkt_loop_t;
ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_loop_t),
@@ -66,7 +97,6 @@ static inline pkt_loop_t *pkt_priv(pktio_entry_t *pktio_entry)
/* MAC address for the "loop" interface */
static const uint8_t pktio_loop_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x01};
-static int loopback_stats_reset(pktio_entry_t *pktio_entry);
static int loopback_init_capability(pktio_entry_t *pktio_entry);
static int loopback_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
@@ -88,14 +118,20 @@ static int loopback_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
}
memset(pkt_loop, 0, sizeof(pkt_loop_t));
- pkt_loop->idx = idx;
pkt_loop->mtu = LOOP_MTU_MAX;
- pkt_loop->loopq = ODP_QUEUE_INVALID;
+ pkt_loop->idx = idx;
pkt_loop->queue_create = 1;
-
- loopback_stats_reset(pktio_entry);
loopback_init_capability(pktio_entry);
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_octets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_packets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_discards, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_errors, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.out_octets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.out_packets, 0);
+ }
+
return 0;
}
@@ -117,6 +153,18 @@ static int loopback_queue_destroy(odp_queue_t queue)
return 0;
}
+static int loopback_queues_destroy(loop_queue_t *queues, uint32_t num_queues)
+{
+ int ret = 0;
+
+ for (uint32_t i = 0; i < num_queues; i++) {
+ if (loopback_queue_destroy(queues[i].queue))
+ ret = -1;
+ }
+
+ return ret;
+}
+
static int loopback_start(pktio_entry_t *pktio_entry)
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
@@ -127,23 +175,28 @@ static int loopback_start(pktio_entry_t *pktio_entry)
if (!pkt_loop->queue_create)
return 0;
- /* Destroy old queue */
- if (pkt_loop->loopq != ODP_QUEUE_INVALID && loopback_queue_destroy(pkt_loop->loopq))
+ /* Destroy old queues */
+ if (loopback_queues_destroy(pkt_loop->loopqs, pkt_loop->num_qs))
return -1;
- odp_queue_param_init(&queue_param);
- queue_param.size = pkt_loop->pktin_queue_size > pkt_loop->pktout_queue_size ?
- pkt_loop->pktin_queue_size : pkt_loop->pktout_queue_size;
+ pkt_loop->num_qs = 0;
- snprintf(queue_name, sizeof(queue_name), "_odp_pktio_loopq-%" PRIu64 "",
- odp_pktio_to_u64(pktio_entry->handle));
+ for (uint32_t i = 0; i < pkt_loop->num_conf_qs; i++) {
+ odp_queue_param_init(&queue_param);
+ queue_param.size = _ODP_MAX(pkt_loop->loopqs[i].in_size,
+ pkt_loop->loopqs[i].out_size);
+ snprintf(queue_name, sizeof(queue_name), "_odp_pktio_loopq-%" PRIu64 "-%u",
+ odp_pktio_to_u64(pktio_entry->handle), i);
+ pkt_loop->loopqs[i].queue = odp_queue_create(queue_name, &queue_param);
- pkt_loop->loopq = odp_queue_create(queue_name, &queue_param);
- if (pkt_loop->loopq == ODP_QUEUE_INVALID) {
- _ODP_ERR("Creating loopback pktio queue failed\n");
- return -1;
+ if (pkt_loop->loopqs[i].queue == ODP_QUEUE_INVALID) {
+ _ODP_ERR("Creating loopback pktio queue %s failed\n", queue_name);
+ (void)loopback_queues_destroy(pkt_loop->loopqs, i);
+ return -1;
+ }
}
- pkt_loop->queue_create = 0;
+
+ pkt_loop->num_qs = pkt_loop->num_conf_qs;
return 0;
}
@@ -153,9 +206,17 @@ static int loopback_pktin_queue_config(pktio_entry_t *pktio_entry,
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+ pkt_loop->num_conf_qs = param->num_queues;
+ pkt_loop->queue_create = 1;
+ pkt_loop->hash.all_bits = param->hash_enable ? param->hash_proto.all_bits : 0;
+
if (pktio_entry->param.in_mode == ODP_PKTIN_MODE_DIRECT) {
- pkt_loop->pktin_queue_size = param->queue_size[0];
- pkt_loop->queue_create = 1;
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ if (i < pkt_loop->num_conf_qs)
+ pkt_loop->loopqs[i].in_size = param->queue_size[i];
+ else
+ pkt_loop->loopqs[i].in_size = 0;
+ }
}
return 0;
@@ -166,9 +227,15 @@ static int loopback_pktout_queue_config(pktio_entry_t *pktio_entry,
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
- pkt_loop->pktout_queue_size = param->queue_size[0];
pkt_loop->queue_create = 1;
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ if (i < param->num_queues)
+ pkt_loop->loopqs[i].out_size = param->queue_size[i];
+ else
+ pkt_loop->loopqs[i].out_size = 0;
+ }
+
return 0;
}
@@ -176,18 +243,16 @@ static int loopback_close(pktio_entry_t *pktio_entry)
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
- if (pkt_loop->loopq != ODP_QUEUE_INVALID)
- return loopback_queue_destroy(pkt_loop->loopq);
-
- return 0;
+ return loopback_queues_destroy(pkt_loop->loopqs, pkt_loop->num_qs);
}
-static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkts[], int num)
+static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkts[], int num)
{
int nbr, i;
+ loop_queue_t *entry = &pkt_priv(pktio_entry)->loopqs[index];
+ odp_queue_t queue = entry->queue;
+ stats_t *stats = &entry->stats;
_odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- odp_queue_t queue;
odp_packet_hdr_t *pkt_hdr;
odp_packet_t pkt;
odp_time_t ts_val;
@@ -201,9 +266,6 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (odp_unlikely(num > QUEUE_MULTI_MAX))
num = QUEUE_MULTI_MAX;
- odp_ticketlock_lock(&pktio_entry->rxl);
-
- queue = pkt_priv(pktio_entry)->loopq;
nbr = odp_queue_deq_multi(queue, (odp_event_t *)hdr_tbl, num);
if (opt.bit.ts_all || opt.bit.ts_ptp) {
@@ -239,7 +301,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
ret = _odp_packet_parse_common(pkt_hdr, pkt_addr, pkt_len,
seg_len, layer, opt);
if (ret)
- odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
+ odp_atomic_inc_u64(&stats->in_errors);
if (ret < 0) {
odp_packet_free(pkt);
@@ -252,7 +314,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
ret = _odp_cls_classify_packet(pktio_entry, pkt_addr,
&new_pool, pkt_hdr);
if (ret < 0)
- odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ odp_atomic_inc_u64(&stats->in_discards);
if (ret) {
odp_packet_free(pkt);
@@ -262,7 +324,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (odp_unlikely(_odp_pktio_packet_to_pool(
&pkt, &pkt_hdr, new_pool))) {
odp_packet_free(pkt);
- odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ odp_atomic_inc_u64(&stats->in_discards);
continue;
}
}
@@ -285,10 +347,8 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkts[num_rx++] = pkt;
}
- pktio_entry->stats.in_octets += octets;
- pktio_entry->stats.in_packets += packets;
-
- odp_ticketlock_unlock(&pktio_entry->rxl);
+ odp_atomic_add_u64(&stats->in_octets, octets);
+ odp_atomic_add_u64(&stats->in_packets, packets);
return num_rx;
}
@@ -381,22 +441,82 @@ static inline void loopback_fix_checksums(odp_packet_t pkt,
_odp_packet_sctp_chksum_insert(pkt);
}
-static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkt_tbl[], int num)
+static inline uint8_t *add_data(uint8_t *data, void *src, uint32_t len)
+{
+ return (uint8_t *)memcpy(data, src, len) + len;
+}
+
+static inline odp_queue_t get_dest_queue(const pkt_loop_t *pkt_loop, odp_packet_t pkt, int index)
+{
+ const odp_pktin_hash_proto_t *hash = &pkt_loop->hash;
+ _odp_udphdr_t udp;
+ _odp_tcphdr_t tcp;
+ _odp_ipv4hdr_t ipv4;
+ _odp_ipv6hdr_t ipv6;
+ uint32_t off;
+ /* Space for UDP/TCP source and destination ports and IPv4/IPv6 source and destination
+ * addresses. */
+ uint8_t data[2 * sizeof(uint16_t) + 2 * 4 * sizeof(uint32_t)];
+ uint8_t *head = data;
+
+ if (hash->all_bits == 0)
+ return pkt_loop->loopqs[index % pkt_loop->num_qs].queue;
+
+ memset(data, 0, sizeof(data));
+ off = odp_packet_l4_offset(pkt);
+
+ if (off != ODP_PACKET_OFFSET_INVALID) {
+ if ((hash->proto.ipv4_udp || hash->proto.ipv6_udp) && odp_packet_has_udp(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_UDPHDR_LEN, &udp) == 0) {
+ head = add_data(head, &udp.src_port, sizeof(udp.src_port));
+ head = add_data(head, &udp.dst_port, sizeof(udp.dst_port));
+ }
+ } else if ((hash->proto.ipv4_tcp || hash->proto.ipv6_tcp) &&
+ odp_packet_has_tcp(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_TCPHDR_LEN, &tcp) == 0) {
+ head = add_data(head, &tcp.src_port, sizeof(tcp.src_port));
+ head = add_data(head, &tcp.dst_port, sizeof(tcp.dst_port));
+ }
+ }
+ }
+
+ off = odp_packet_l3_offset(pkt);
+
+ if (off != ODP_PACKET_OFFSET_INVALID) {
+ if (hash->proto.ipv4 && odp_packet_has_ipv4(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_IPV4HDR_LEN, &ipv4) == 0) {
+ head = add_data(head, &ipv4.src_addr, sizeof(ipv4.src_addr));
+ head = add_data(head, &ipv4.dst_addr, sizeof(ipv4.dst_addr));
+ }
+ } else if (hash->proto.ipv6 && odp_packet_has_ipv6(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_IPV6HDR_LEN, &ipv6) == 0) {
+ head = add_data(head, &ipv6.src_addr, sizeof(ipv6.src_addr));
+ head = add_data(head, &ipv6.dst_addr, sizeof(ipv6.dst_addr));
+ }
+ }
+ }
+
+ return pkt_loop->loopqs[odp_hash_crc32c(data, head - data, 0) % pkt_loop->num_qs].queue;
+}
+
+static int loopback_send(pktio_entry_t *pktio_entry, int index, const odp_packet_t pkt_tbl[],
+ int num)
{
pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
- _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
odp_queue_t queue;
+ stats_t *stats;
int i;
int ret;
int nb_tx = 0;
int tx_ts_idx = 0;
uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
- uint32_t bytes = 0;
- uint32_t out_octets_tbl[num];
odp_pktout_config_opt_t *pktout_cfg = &pktio_entry->config.pktout;
- odp_pktout_config_opt_t *pktout_capa =
- &pktio_entry->capa.config.pktout;
+ odp_pktout_config_opt_t *pktout_capa = &pktio_entry->capa.config.pktout;
+
+ if (pkt_loop->num_qs == 0)
+ return 0;
+
+ stats = &pkt_loop->loopqs[index].stats;
if (odp_unlikely(num > QUEUE_MULTI_MAX))
num = QUEUE_MULTI_MAX;
@@ -411,44 +531,34 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
}
break;
}
- hdr_tbl[i] = packet_to_event_hdr(pkt_tbl[i]);
- bytes += pkt_len;
- /* Store cumulative byte counts to update 'stats.out_octets'
- * correctly in case enq_multi() fails to enqueue all packets.
- */
- out_octets_tbl[i] = bytes;
- nb_tx++;
if (tx_ts_enabled && tx_ts_idx == 0) {
if (odp_unlikely(packet_hdr(pkt_tbl[i])->p.flags.ts_set))
tx_ts_idx = i + 1;
}
- }
- for (i = 0; i < nb_tx; ++i) {
packet_subtype_set(pkt_tbl[i], ODP_EVENT_PACKET_BASIC);
loopback_fix_checksums(pkt_tbl[i], pktout_cfg, pktout_capa);
- }
+ queue = get_dest_queue(pkt_loop, pkt_tbl[i], index);
+ ret = odp_queue_enq(queue, odp_packet_to_event(pkt_tbl[i]));
- odp_ticketlock_lock(&pktio_entry->txl);
+ if (ret < 0) {
+ _ODP_DBG("queue enqueue failed %i to queue: %" PRIu64 "\n", ret,
+ odp_queue_to_u64(queue));
+ break;
+ }
- queue = pkt_priv(pktio_entry)->loopq;
- ret = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl, nb_tx);
+ nb_tx++;
+ odp_atomic_inc_u64(&stats->out_packets);
+ odp_atomic_add_u64(&stats->out_octets, pkt_len);
+ }
- if (ret > 0) {
- if (odp_unlikely(tx_ts_idx) && ret >= tx_ts_idx)
+ if (nb_tx > 0) {
+ if (odp_unlikely(tx_ts_idx) && nb_tx >= tx_ts_idx)
_odp_pktio_tx_ts_set(pktio_entry);
-
- pktio_entry->stats.out_packets += ret;
- pktio_entry->stats.out_octets += out_octets_tbl[ret - 1];
- } else {
- _ODP_DBG("queue enqueue failed %i\n", ret);
- ret = -1;
}
- odp_ticketlock_unlock(&pktio_entry->txl);
-
- return ret;
+ return nb_tx;
}
static uint32_t loopback_mtu_get(pktio_entry_t *pktio_entry)
@@ -508,8 +618,8 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry)
memset(capa, 0, sizeof(odp_pktio_capability_t));
- capa->max_input_queues = 1;
- capa->max_output_queues = 1;
+ capa->max_input_queues = ODP_PKTIN_MAX_QUEUES;
+ capa->max_output_queues = ODP_PKTOUT_MAX_QUEUES;
capa->set_op.op.promisc_mode = 0;
capa->set_op.op.maxlen = 1;
@@ -566,6 +676,7 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry)
capa->stats.pktin_queue.counter.octets = 1;
capa->stats.pktin_queue.counter.packets = 1;
capa->stats.pktin_queue.counter.errors = 1;
+ capa->stats.pktin_queue.counter.discards = 1;
capa->stats.pktout_queue.counter.octets = 1;
capa->stats.pktout_queue.counter.packets = 1;
return 0;
@@ -582,37 +693,67 @@ static int loopback_promisc_mode_get(pktio_entry_t *pktio_entry ODP_UNUSED)
return 1;
}
-static int loopback_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats)
+static int loopback_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
{
- memcpy(stats, &pktio_entry->stats, sizeof(odp_pktio_stats_t));
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ memset(stats, 0, sizeof(odp_pktio_stats_t));
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ stats_t *qs = &pkt_loop->loopqs[i].stats;
+
+ stats->in_octets += odp_atomic_load_u64(&qs->in_octets);
+ stats->in_packets += odp_atomic_load_u64(&qs->in_packets);
+ stats->in_discards += odp_atomic_load_u64(&qs->in_discards);
+ stats->in_errors += odp_atomic_load_u64(&qs->in_errors);
+ stats->out_octets += odp_atomic_load_u64(&qs->out_octets);
+ stats->out_packets += odp_atomic_load_u64(&qs->out_packets);
+ }
+
return 0;
}
static int loopback_stats_reset(pktio_entry_t *pktio_entry)
{
- memset(&pktio_entry->stats, 0, sizeof(odp_pktio_stats_t));
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ stats_t *qs = &pkt_loop->loopqs[i].stats;
+
+ odp_atomic_store_u64(&qs->in_octets, 0);
+ odp_atomic_store_u64(&qs->in_packets, 0);
+ odp_atomic_store_u64(&qs->in_discards, 0);
+ odp_atomic_store_u64(&qs->in_errors, 0);
+ odp_atomic_store_u64(&qs->out_octets, 0);
+ odp_atomic_store_u64(&qs->out_packets, 0);
+ }
+
return 0;
}
-static int loopback_pktin_stats(pktio_entry_t *pktio_entry,
- uint32_t index ODP_UNUSED,
+static int loopback_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
odp_pktin_queue_stats_t *pktin_stats)
{
+ stats_t *qs = &pkt_priv(pktio_entry)->loopqs[index].stats;
+
memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
- pktin_stats->octets = pktio_entry->stats.in_octets;
- pktin_stats->packets = pktio_entry->stats.in_packets;
- pktin_stats->errors = pktio_entry->stats.in_errors;
+ pktin_stats->octets = odp_atomic_load_u64(&qs->in_octets);
+ pktin_stats->packets = odp_atomic_load_u64(&qs->in_packets);
+ pktin_stats->discards = odp_atomic_load_u64(&qs->in_discards);
+ pktin_stats->errors = odp_atomic_load_u64(&qs->in_errors);
+
return 0;
}
-static int loopback_pktout_stats(pktio_entry_t *pktio_entry,
- uint32_t index ODP_UNUSED,
+static int loopback_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
odp_pktout_queue_stats_t *pktout_stats)
{
+ stats_t *qs = &pkt_priv(pktio_entry)->loopqs[index].stats;
+
memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
- pktout_stats->octets = pktio_entry->stats.out_octets;
- pktout_stats->packets = pktio_entry->stats.out_packets;
+ pktout_stats->octets = odp_atomic_load_u64(&qs->out_octets);
+ pktout_stats->packets = odp_atomic_load_u64(&qs->out_packets);
+
return 0;
}
diff --git a/platform/linux-generic/pktio/stats/packet_io_stats.c b/platform/linux-generic/pktio/stats/packet_io_stats.c
index ac61c0343..280aca250 100644
--- a/platform/linux-generic/pktio/stats/packet_io_stats.c
+++ b/platform/linux-generic/pktio/stats/packet_io_stats.c
@@ -5,7 +5,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/deprecated.h>
#include <odp_packet_io_stats.h>
#include <odp_ethtool_stats.h>
#include <odp_sysfs_stats.h>
@@ -77,10 +76,6 @@ int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
pktio_entry->stats.in_discards;
stats->in_errors = cur_stats.in_errors -
pktio_entry->stats.in_errors;
-#if ODP_DEPRECATED_API
- stats->in_unknown_protos = cur_stats.in_unknown_protos -
- pktio_entry->stats.in_unknown_protos;
-#endif
stats->out_octets = cur_stats.out_octets -
pktio_entry->stats.out_octets;
stats->out_packets = cur_stats.out_packets -
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
index 261aa0141..44db4e337 100644
--- a/platform/linux-generic/test/inline-timer.conf
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.22"
+config_file_version = "0.1.25"
timer: {
# Enable inline timer implementation
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
index 8d2d00e63..26491bd53 100644
--- a/platform/linux-generic/test/packet_align.conf
+++ b/platform/linux-generic/test/packet_align.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.22"
+config_file_version = "0.1.25"
pool: {
pkt: {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
index 1e0e7cc95..2277aabdf 100644
--- a/platform/linux-generic/test/process-mode.conf
+++ b/platform/linux-generic/test/process-mode.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.22"
+config_file_version = "0.1.25"
# Shared memory options
shm: {
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
index e63ffa2f3..c9f7c79fd 100644
--- a/platform/linux-generic/test/sched-basic.conf
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -1,9 +1,10 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.22"
+config_file_version = "0.1.25"
# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
prio_spread = 3
load_balance = 0
+ order_stash_size = 0
}
diff --git a/platform/linux-generic/test/stash-custom.conf b/platform/linux-generic/test/stash-custom.conf
new file mode 100644
index 000000000..95af7a259
--- /dev/null
+++ b/platform/linux-generic/test/stash-custom.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.25"
+
+# Test overflow safe stash variant
+stash: {
+ strict_size = 0
+}