aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-generic
diff options
context:
space:
mode:
Diffstat (limited to 'platform/linux-generic')
-rw-r--r--platform/linux-generic/Makefile.am12
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet.h112
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_types.h141
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats.h27
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue.h17
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue_types.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/std.h (renamed from platform/linux-generic/include-abi/odp/api/abi/std_clib.h)6
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inline_types.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/std_inlines.h (renamed from platform/linux-generic/include/odp/api/plat/std_clib_inlines.h)4
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h15
-rw-r--r--platform/linux-generic/include/odp_classification_internal.h47
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h4
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h20
-rw-r--r--platform/linux-generic/include/odp_traffic_mngr_internal.h7
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m42
-rw-r--r--platform/linux-generic/odp_classification.c72
-rw-r--r--platform/linux-generic/odp_ipsec.c4
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c6
-rw-r--r--platform/linux-generic/odp_packet.c181
-rw-r--r--platform/linux-generic/odp_packet_io.c95
-rw-r--r--platform/linux-generic/odp_packet_vector.c2
-rw-r--r--platform/linux-generic/odp_pool.c517
-rw-r--r--platform/linux-generic/odp_schedule_basic.c297
-rw-r--r--platform/linux-generic/odp_std.c (renamed from platform/linux-generic/odp_fractional.c)2
-rw-r--r--platform/linux-generic/odp_std_api.c (renamed from platform/linux-generic/odp_std_clib_api.c)4
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c153
-rw-r--r--platform/linux-generic/pktio/dpdk.c41
-rw-r--r--platform/linux-generic/test/inline-timer.conf2
-rw-r--r--platform/linux-generic/test/packet_align.conf2
-rw-r--r--platform/linux-generic/test/process-mode.conf2
-rw-r--r--platform/linux-generic/test/sched-basic.conf5
34 files changed, 1501 insertions, 385 deletions
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index b6721dce4..8c75e5ec0 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -43,7 +43,7 @@ odpapiplatinclude_HEADERS = \
include/odp/api/plat/pool_inline_types.h \
include/odp/api/plat/queue_inlines.h \
include/odp/api/plat/queue_inline_types.h \
- include/odp/api/plat/std_clib_inlines.h \
+ include/odp/api/plat/std_inlines.h \
include/odp/api/plat/strong_types.h \
include/odp/api/plat/sync_inlines.h \
include/odp/api/plat/thread_inlines.h \
@@ -67,10 +67,14 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/init.h \
include-abi/odp/api/abi/ipsec.h \
include-abi/odp/api/abi/packet.h \
+ include-abi/odp/api/abi/packet_types.h \
include-abi/odp/api/abi/packet_flags.h \
include-abi/odp/api/abi/packet_io.h \
+ include-abi/odp/api/abi/proto_stats.h \
+ include-abi/odp/api/abi/proto_stats_types.h \
include-abi/odp/api/abi/pool.h \
include-abi/odp/api/abi/queue.h \
+ include-abi/odp/api/abi/queue_types.h \
include-abi/odp/api/abi/rwlock.h \
include-abi/odp/api/abi/rwlock_recursive.h \
include-abi/odp/api/abi/schedule.h \
@@ -79,7 +83,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/spinlock.h \
include-abi/odp/api/abi/spinlock_recursive.h \
include-abi/odp/api/abi/stash.h \
- include-abi/odp/api/abi/std_clib.h \
+ include-abi/odp/api/abi/std.h \
include-abi/odp/api/abi/std_types.h \
include-abi/odp/api/abi/sync.h \
include-abi/odp/api/abi/thread.h \
@@ -174,7 +178,6 @@ __LIB__libodp_linux_la_SOURCES = \
odp_errno.c \
odp_event.c \
odp_fdserver.c \
- odp_fractional.c \
odp_hash_crc_gen.c \
odp_impl.c \
odp_init.c \
@@ -212,6 +215,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_spinlock.c \
odp_spinlock_recursive.c \
odp_stash.c \
+ odp_std.c \
odp_system_info.c \
odp_pcapng.c \
odp_thread.c \
@@ -258,7 +262,7 @@ __LIB__libodp_linux_la_SOURCES += \
odp_packet_flags_api.c \
odp_pktio_api.c \
odp_queue_api.c \
- odp_std_clib_api.c \
+ odp_std_api.c \
odp_sync_api.c \
odp_thread_api.c \
odp_ticketlock_api.c \
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet.h b/platform/linux-generic/include-abi/odp/api/abi/packet.h
index 28e97637c..5703141d4 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/packet.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet.h
@@ -18,121 +18,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_packet_t);
-
-#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-
-typedef ODP_HANDLE_T(odp_packet_seg_t);
-
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_vector_t);
-
-#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
-
-typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
-
-#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID 0xffff
-
-typedef uint8_t odp_proto_l2_type_t;
-
-#define ODP_PROTO_L2_TYPE_NONE 0
-#define ODP_PROTO_L2_TYPE_ETH 1
-
-typedef uint8_t odp_proto_l3_type_t;
-
-#define ODP_PROTO_L3_TYPE_NONE 0
-#define ODP_PROTO_L3_TYPE_ARP 1
-#define ODP_PROTO_L3_TYPE_RARP 2
-#define ODP_PROTO_L3_TYPE_MPLS 3
-#define ODP_PROTO_L3_TYPE_IPV4 4
-#define ODP_PROTO_L3_TYPE_IPV6 6
-
-typedef uint8_t odp_proto_l4_type_t;
-
-/* Numbers from IANA Assigned Internet Protocol Numbers list */
-#define ODP_PROTO_L4_TYPE_NONE 0
-#define ODP_PROTO_L4_TYPE_ICMPV4 1
-#define ODP_PROTO_L4_TYPE_IGMP 2
-#define ODP_PROTO_L4_TYPE_IPV4 4
-#define ODP_PROTO_L4_TYPE_TCP 6
-#define ODP_PROTO_L4_TYPE_UDP 17
-#define ODP_PROTO_L4_TYPE_IPV6 41
-#define ODP_PROTO_L4_TYPE_GRE 47
-#define ODP_PROTO_L4_TYPE_ESP 50
-#define ODP_PROTO_L4_TYPE_AH 51
-#define ODP_PROTO_L4_TYPE_ICMPV6 58
-#define ODP_PROTO_L4_TYPE_NO_NEXT 59
-#define ODP_PROTO_L4_TYPE_IPCOMP 108
-#define ODP_PROTO_L4_TYPE_SCTP 132
-#define ODP_PROTO_L4_TYPE_ROHC 142
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-typedef enum {
- ODP_PACKET_CHKSUM_UNKNOWN = 0,
- ODP_PACKET_CHKSUM_BAD,
- ODP_PACKET_CHKSUM_OK
-} odp_packet_chksum_status_t;
-
-typedef struct odp_packet_parse_result_flag_t {
- union {
- uint64_t all;
-
- struct {
- uint64_t has_error : 1;
- uint64_t has_l2_error : 1;
- uint64_t has_l3_error : 1;
- uint64_t has_l4_error : 1;
- uint64_t has_l2 : 1;
- uint64_t has_l3 : 1;
- uint64_t has_l4 : 1;
- uint64_t has_eth : 1;
- uint64_t has_eth_bcast : 1;
- uint64_t has_eth_mcast : 1;
- uint64_t has_jumbo : 1;
- uint64_t has_vlan : 1;
- uint64_t has_vlan_qinq : 1;
- uint64_t has_arp : 1;
- uint64_t has_ipv4 : 1;
- uint64_t has_ipv6 : 1;
- uint64_t has_ip_bcast : 1;
- uint64_t has_ip_mcast : 1;
- uint64_t has_ipfrag : 1;
- uint64_t has_ipopt : 1;
- uint64_t has_ipsec : 1;
- uint64_t has_udp : 1;
- uint64_t has_tcp : 1;
- uint64_t has_sctp : 1;
- uint64_t has_icmp : 1;
- };
- };
-
-} odp_packet_parse_result_flag_t;
-
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/packet_vector_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..be2cb9df6
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_TYPES_H_
+#define ODP_API_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_packet
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_packet_t);
+
+#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef ODP_HANDLE_T(odp_packet_seg_t);
+
+#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_buf_t);
+
+#define ODP_PACKET_BUF_INVALID _odp_cast_scalar(odp_packet_buf_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
+
+#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef uint8_t odp_proto_l2_type_t;
+
+#define ODP_PROTO_L2_TYPE_NONE 0
+#define ODP_PROTO_L2_TYPE_ETH 1
+
+typedef uint8_t odp_proto_l3_type_t;
+
+#define ODP_PROTO_L3_TYPE_NONE 0
+#define ODP_PROTO_L3_TYPE_ARP 1
+#define ODP_PROTO_L3_TYPE_RARP 2
+#define ODP_PROTO_L3_TYPE_MPLS 3
+#define ODP_PROTO_L3_TYPE_IPV4 4
+#define ODP_PROTO_L3_TYPE_IPV6 6
+
+typedef uint8_t odp_proto_l4_type_t;
+
+/* Numbers from IANA Assigned Internet Protocol Numbers list */
+#define ODP_PROTO_L4_TYPE_NONE 0
+#define ODP_PROTO_L4_TYPE_ICMPV4 1
+#define ODP_PROTO_L4_TYPE_IGMP 2
+#define ODP_PROTO_L4_TYPE_IPV4 4
+#define ODP_PROTO_L4_TYPE_TCP 6
+#define ODP_PROTO_L4_TYPE_UDP 17
+#define ODP_PROTO_L4_TYPE_IPV6 41
+#define ODP_PROTO_L4_TYPE_GRE 47
+#define ODP_PROTO_L4_TYPE_ESP 50
+#define ODP_PROTO_L4_TYPE_AH 51
+#define ODP_PROTO_L4_TYPE_ICMPV6 58
+#define ODP_PROTO_L4_TYPE_NO_NEXT 59
+#define ODP_PROTO_L4_TYPE_IPCOMP 108
+#define ODP_PROTO_L4_TYPE_SCTP 132
+#define ODP_PROTO_L4_TYPE_ROHC 142
+
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+typedef struct odp_packet_parse_result_flag_t {
+ union {
+ uint64_t all;
+
+ struct {
+ uint64_t has_error : 1;
+ uint64_t has_l2_error : 1;
+ uint64_t has_l3_error : 1;
+ uint64_t has_l4_error : 1;
+ uint64_t has_l2 : 1;
+ uint64_t has_l3 : 1;
+ uint64_t has_l4 : 1;
+ uint64_t has_eth : 1;
+ uint64_t has_eth_bcast : 1;
+ uint64_t has_eth_mcast : 1;
+ uint64_t has_jumbo : 1;
+ uint64_t has_vlan : 1;
+ uint64_t has_vlan_qinq : 1;
+ uint64_t has_arp : 1;
+ uint64_t has_ipv4 : 1;
+ uint64_t has_ipv6 : 1;
+ uint64_t has_ip_bcast : 1;
+ uint64_t has_ip_mcast : 1;
+ uint64_t has_ipfrag : 1;
+ uint64_t has_ipopt : 1;
+ uint64_t has_ipsec : 1;
+ uint64_t has_udp : 1;
+ uint64_t has_tcp : 1;
+ uint64_t has_sctp : 1;
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..d81035df2
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_H_
+#define ODP_API_ABI_PROTO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Placeholder for inlined functions for non-ABI compat mode */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..2ebddce62
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats types
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_TYPES_H_
+#define ODP_API_ABI_PROTO_STATS_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_proto_stats
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_proto_stats_t);
+
+#define ODP_PROTO_STATS_INVALID _odp_cast_scalar(odp_proto_stats_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/queue.h b/platform/linux-generic/include-abi/odp/api/abi/queue.h
index 6ec922600..6c34123df 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/queue.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue.h
@@ -17,26 +17,9 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_queue
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_queue_t);
-
-#define ODP_QUEUE_INVALID _odp_cast_scalar(odp_queue_t, 0)
-
-#define ODP_QUEUE_NAME_LEN 32
-
/* Inlined functions for non-ABI compat mode */
#include <odp/api/plat/queue_inlines.h>
-/**
- * @}
- */
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/queue_types.h b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
new file mode 100644
index 000000000..1a56c7682
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP queue
+ */
+
+#ifndef ODP_API_ABI_QUEUE_TYPES_H_
+#define ODP_API_ABI_QUEUE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @ingroup odp_queue
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_queue_t);
+
+#define ODP_QUEUE_INVALID _odp_cast_scalar(odp_queue_t, 0)
+
+#define ODP_QUEUE_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/std_clib.h b/platform/linux-generic/include-abi/odp/api/abi/std.h
index d41dd1403..175b606c5 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/std_clib.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/std.h
@@ -10,15 +10,15 @@
* ODP barrier
*/
-#ifndef ODP_API_ABI_STD_CLIB_H_
-#define ODP_API_ABI_STD_CLIB_H_
+#ifndef ODP_API_ABI_STD_H_
+#define ODP_API_ABI_STD_H_
#ifdef __cplusplus
extern "C" {
#endif
#define _ODP_INLINE static inline
-#include <odp/api/plat/std_clib_inlines.h>
+#include <odp/api/plat/std_inlines.h>
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
index 6f32b46c6..6eb34a39b 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
@@ -13,7 +13,7 @@
#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_H_
#define _ODP_PLAT_PACKET_FLAG_INLINES_H_
-#include <odp/api/abi/packet.h>
+#include <odp/api/abi/packet_types.h>
#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/hints.h>
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index 08048a993..8a0f54134 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -15,6 +15,7 @@
#define _ODP_PLAT_PACKET_INLINES_H_
#include <odp/api/abi/packet.h>
+#include <odp/api/packet_types.h>
#include <odp/api/pool.h>
#include <odp/api/abi/packet_io.h>
#include <odp/api/hints.h>
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
index e59c7f55a..5ce767a16 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
@@ -12,7 +12,7 @@ extern "C" {
#endif
#include <stdint.h>
-#include <odp/api/spec/queue_types.h>
+#include <odp/api/queue_types.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h b/platform/linux-generic/include/odp/api/plat/std_inlines.h
index 4265eaf1d..3f6a7e9d4 100644
--- a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/std_inlines.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_PLAT_STD_CLIB_INLINE_H_
-#define ODP_PLAT_STD_CLIB_INLINE_H_
+#ifndef ODP_PLAT_STD_INLINE_H_
+#define ODP_PLAT_STD_INLINE_H_
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index ebd0107f9..cc0e7f081 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -145,6 +145,10 @@ struct cos_s {
odp_queue_param_t queue_param;
char name[ODP_COS_NAME_LEN]; /* name */
uint8_t index;
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t packets;
+ } stats[CLS_COS_QUEUE_MAX];
};
typedef union cos_u {
@@ -230,6 +234,17 @@ typedef struct pmr_tbl {
pmr_t pmr[CLS_PMR_MAX_ENTRY];
} pmr_tbl_t;
+/**
+Classifier global data
+**/
+typedef struct cls_global_t {
+ cos_tbl_t cos_tbl;
+ pmr_tbl_t pmr_tbl;
+ _cls_queue_grp_tbl_t queue_grp_tbl;
+ odp_shm_t shm;
+
+} cls_global_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_classification_internal.h b/platform/linux-generic/include/odp_classification_internal.h
index 48ee0526e..bc0a12f8f 100644
--- a/platform/linux-generic/include/odp_classification_internal.h
+++ b/platform/linux-generic/include/odp_classification_internal.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,14 +19,58 @@
extern "C" {
#endif
+#include <odp/api/atomic.h>
#include <odp/api/classification.h>
+#include <odp/api/hints.h>
#include <odp/api/queue.h>
#include <odp_packet_internal.h>
#include <odp/api/packet_io.h>
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
-cos_t *_odp_cos_entry_from_idx(uint32_t ndx);
+extern cls_global_t *_odp_cls_global;
+
+static inline cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
+{
+ return &_odp_cls_global->cos_tbl.cos_entry[ndx];
+}
+
+static inline int _odp_cos_queue_idx(const cos_t *cos, odp_queue_t queue)
+{
+ uint32_t i, tbl_idx;
+ int queue_idx = -1;
+
+ if (cos->s.num_queue == 1) {
+ if (odp_unlikely(cos->s.queue != queue))
+ return -1;
+ return 0;
+ }
+
+ tbl_idx = cos->s.index * CLS_COS_QUEUE_MAX;
+ for (i = 0; i < cos->s.num_queue; i++) {
+ if (_odp_cls_global->queue_grp_tbl.s.queue[tbl_idx + i] == queue) {
+ queue_idx = i;
+ break;
+ }
+ }
+ return queue_idx;
+}
+
+static inline void _odp_cos_queue_stats_add(cos_t *cos, odp_queue_t queue,
+ uint64_t packets, uint64_t discards)
+{
+ int queue_idx = _odp_cos_queue_idx(cos, queue);
+
+ if (odp_unlikely(queue_idx < 0)) {
+ ODP_ERR("Queue not attached to the CoS\n");
+ return;
+ }
+
+ if (packets)
+ odp_atomic_add_u64(&cos->s.stats[queue_idx].packets, packets);
+ if (discards)
+ odp_atomic_add_u64(&cos->s.stats[queue_idx].discards, discards);
+}
/** Classification Internal function **/
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 4af4bf062..497ea4aee 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -230,8 +230,8 @@ static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len)
* segment occupied by the allocated length.
*/
pkt_hdr->frame_len = len;
- pkt_hdr->headroom = CONFIG_PACKET_HEADROOM;
- pkt_hdr->tailroom = pool->seg_len - seg_len + CONFIG_PACKET_TAILROOM;
+ pkt_hdr->headroom = pool->headroom;
+ pkt_hdr->tailroom = pool->seg_len - seg_len + pool->tailroom;
if (odp_unlikely(pkt_hdr->subtype != ODP_EVENT_PACKET_BASIC))
pkt_hdr->subtype = ODP_EVENT_PACKET_BASIC;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index a0e4c5c65..dc4754710 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -1,5 +1,5 @@
-/* Copyright (c) 2019, Nokia
- * Copyright (c) 2013-2018, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -42,6 +42,9 @@ typedef struct ODP_ALIGNED_CACHE {
/* Ring data: buffer handles */
odp_buffer_hdr_t *buf_hdr[CONFIG_POOL_MAX_NUM + 1];
+ /* Index to pointer look-up table for external memory pool */
+ odp_buffer_hdr_t *buf_hdr_by_index[0];
+
} pool_ring_t;
/* Callback function for pool destroy */
@@ -49,11 +52,16 @@ typedef void (*pool_destroy_cb_fn)(void *pool);
typedef struct pool_t {
odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+ odp_pool_t pool_hdl;
+ uint32_t pool_idx;
+ uint8_t reserved;
+ /* Everything under this mark are memset() to zero on pool create */
+ uint8_t memset_mark;
+ uint8_t type;
+ uint8_t pool_ext;
char name[ODP_POOL_NAME_LEN];
odp_pool_param_t params;
- odp_pool_t pool_hdl;
- uint32_t pool_idx;
uint32_t ring_mask;
uint32_t cache_size;
uint32_t burst_size;
@@ -61,7 +69,6 @@ typedef struct pool_t {
odp_shm_t uarea_shm;
uint64_t shm_size;
uint64_t uarea_shm_size;
- int reserved;
uint32_t num;
uint32_t align;
uint32_t headroom;
@@ -69,12 +76,15 @@ typedef struct pool_t {
uint32_t seg_len;
uint32_t max_seg_len;
uint32_t max_len;
+ uint32_t param_uarea_size;
uint32_t uarea_size;
uint32_t block_size;
uint32_t block_offset;
+ uint32_t num_populated;
uint8_t *base_addr;
uint8_t *max_addr;
uint8_t *uarea_base_addr;
+ odp_pool_ext_param_t ext_param;
/* Used by DPDK zero-copy pktio */
uint32_t dpdk_elt_size;
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h
index 8a65a1685..a54847319 100644
--- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
+++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
@@ -284,8 +284,15 @@ struct tm_queue_obj_s {
uint8_t tm_idx;
uint8_t delayed_cnt;
uint8_t blocked_cnt;
+ odp_bool_t ordered_enqueue;
tm_status_t status;
odp_queue_t queue;
+ /* Statistics for odp_tm_queue_stats_t */
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t errors;
+ odp_atomic_u64_t packets;
+ } stats;
};
struct tm_node_obj_s {
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
index ecfb28b7f..ccbf1d6f5 100644
--- a/platform/linux-generic/m4/odp_libconfig.m4
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [16])
+m4_define([_odp_config_version_minor], [18])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index bc31d01eb..3b50232d6 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -42,15 +42,7 @@ static cos_tbl_t *cos_tbl;
static pmr_tbl_t *pmr_tbl;
static _cls_queue_grp_tbl_t *queue_grp_tbl;
-typedef struct cls_global_t {
- cos_tbl_t cos_tbl;
- pmr_tbl_t pmr_tbl;
- _cls_queue_grp_tbl_t queue_grp_tbl;
- odp_shm_t shm;
-
-} cls_global_t;
-
-static cls_global_t *cls_global;
+cls_global_t *_odp_cls_global;
static const rss_key default_rss = {
.u8 = {
@@ -62,11 +54,6 @@ static const rss_key default_rss = {
}
};
-cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
-{
- return &cos_tbl->cos_entry[ndx];
-}
-
static inline uint32_t _odp_cos_to_ndx(odp_cos_t cos)
{
return _odp_typeval(cos) - 1;
@@ -109,13 +96,13 @@ int _odp_classification_init_global(void)
if (shm == ODP_SHM_INVALID)
return -1;
- cls_global = odp_shm_addr(shm);
- memset(cls_global, 0, sizeof(cls_global_t));
+ _odp_cls_global = odp_shm_addr(shm);
+ memset(_odp_cls_global, 0, sizeof(cls_global_t));
- cls_global->shm = shm;
- cos_tbl = &cls_global->cos_tbl;
- pmr_tbl = &cls_global->pmr_tbl;
- queue_grp_tbl = &cls_global->queue_grp_tbl;
+ _odp_cls_global->shm = shm;
+ cos_tbl = &_odp_cls_global->cos_tbl;
+ pmr_tbl = &_odp_cls_global->pmr_tbl;
+ queue_grp_tbl = &_odp_cls_global->queue_grp_tbl;
for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
/* init locks */
@@ -136,7 +123,7 @@ int _odp_classification_init_global(void)
int _odp_classification_term_global(void)
{
- if (cls_global && odp_shm_free(cls_global->shm)) {
+ if (_odp_cls_global && odp_shm_free(_odp_cls_global->shm)) {
ODP_ERR("shm free failed\n");
return -1;
}
@@ -163,7 +150,9 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param)
int odp_cls_capability(odp_cls_capability_t *capability)
{
- unsigned count = 0;
+ unsigned int count = 0;
+
+ memset(capability, 0, sizeof(odp_cls_capability_t));
for (int i = 0; i < CLS_PMR_MAX_ENTRY; i++)
if (!pmr_tbl->pmr[i].s.valid)
@@ -197,6 +186,9 @@ int odp_cls_capability(odp_cls_capability_t *capability)
capability->threshold_bp.all_bits = 0;
capability->max_hash_queues = CLS_COS_QUEUE_MAX;
capability->max_mark = MAX_MARK;
+ capability->stats.queue.counter.discards = 1;
+ capability->stats.queue.counter.packets = 1;
+
return 0;
}
@@ -306,6 +298,11 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param)
} else {
cos->s.queue = param->queue;
}
+ /* Initialize statistics counters */
+ for (j = 0; j < cos->s.num_queue; j++) {
+ odp_atomic_init_u64(&cos->s.stats[j].discards, 0);
+ odp_atomic_init_u64(&cos->s.stats[j].packets, 0);
+ }
cos->s.pool = param->pool;
cos->s.headroom = 0;
@@ -1783,6 +1780,35 @@ uint64_t odp_pmr_to_u64(odp_pmr_t hdl)
return _odp_pri(hdl);
}
+int odp_cls_queue_stats(odp_cos_t hdl, odp_queue_t queue,
+ odp_cls_queue_stats_t *stats)
+{
+ cos_t *cos = get_cos_entry(hdl);
+ int queue_idx;
+
+ if (odp_unlikely(cos == NULL)) {
+ ODP_ERR("Invalid odp_cos_t handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(stats == NULL)) {
+ ODP_ERR("Output structure NULL\n");
+ return -1;
+ }
+
+ queue_idx = _odp_cos_queue_idx(cos, queue);
+ if (odp_unlikely(queue_idx < 0)) {
+ ODP_ERR("Invalid odp_queue_t handle\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(odp_cls_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&cos->s.stats[queue_idx].discards);
+ stats->packets = odp_atomic_load_u64(&cos->s.stats[queue_idx].packets);
+
+ return 0;
+}
+
static
void print_cos_ident(struct cos_s *cos)
{
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 15590523c..137e7b435 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -244,8 +244,10 @@ int odp_ipsec_auth_capability(odp_auth_alg_t auth,
continue;
}
- if (out < num)
+ if (out < num) {
capa[out].key_len = crypto_capa[i].key_len;
+ capa[out].icv_len = crypto_capa[i].digest_len;
+ }
out++;
}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 407192dcf..0eea57a10 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -430,6 +430,8 @@ uint32_t _odp_ipsec_auth_digest_len(odp_auth_alg_t auth)
return 16;
case ODP_AUTH_ALG_AES_CCM:
return 16;
+ case ODP_AUTH_ALG_AES_CMAC:
+ return 12;
case ODP_AUTH_ALG_CHACHA20_POLY1305:
return 16;
default:
@@ -629,6 +631,10 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
crypto_param.auth_digest_len =
_odp_ipsec_auth_digest_len(crypto_param.auth_alg);
+ if (param->crypto.icv_len != 0 &&
+ param->crypto.icv_len != crypto_param.auth_digest_len)
+ goto error;
+
if ((uint32_t)-1 == crypto_param.cipher_iv.length ||
(uint32_t)-1 == crypto_param.auth_digest_len)
goto error;
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index fdf711735..0986056e6 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -18,6 +18,7 @@
#include <odp/api/plat/byteorder_inlines.h>
#include <odp/api/packet_io.h>
#include <odp/api/plat/pktio_inlines.h>
+#include <odp/api/proto_stats.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -134,10 +135,11 @@ static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr)
static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_seg)
{
odp_buffer_hdr_t *hdr = &pkt_seg->buf_hdr;
+ pool_t *pool = hdr->pool_ptr;
uint8_t *base = hdr->base_data;
uint8_t *head = pkt_seg->seg_data;
- return CONFIG_PACKET_HEADROOM + (head - base);
+ return pool->headroom + (head - base);
}
static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_seg)
@@ -690,7 +692,7 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
odp_packet_t pkt;
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return ODP_PACKET_INVALID;
}
@@ -713,7 +715,7 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
pool_t *pool = pool_entry_from_hdl(pool_hdl);
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
_odp_errno = EINVAL;
return -1;
}
@@ -1747,8 +1749,8 @@ int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
odp_packet_hdr_t *dsthdr = packet_hdr(dstpkt);
pool_t *src_pool = srchdr->buf_hdr.pool_ptr;
pool_t *dst_pool = dsthdr->buf_hdr.pool_ptr;
- uint32_t src_uarea_size = src_pool->params.pkt.uarea_size;
- uint32_t dst_uarea_size = dst_pool->params.pkt.uarea_size;
+ uint32_t src_uarea_size = src_pool->param_uarea_size;
+ uint32_t dst_uarea_size = dst_pool->param_uarea_size;
dsthdr->input = srchdr->input;
dsthdr->dst_queue = srchdr->dst_queue;
@@ -2949,3 +2951,172 @@ odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
(void)res;
return -ENOTSUP;
}
+
+static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
+{
+ return (odp_packet_hdr_t *)(uintptr_t)pkt_buf;
+}
+
+void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return NULL;
+ }
+
+ return (uint8_t *)pkt_hdr + head_offset;
+}
+
+uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ return pool->ext_param.pkt.buf_size - head_offset;
+}
+
+uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+
+ return (uintptr_t)pkt_hdr->seg_data - (uintptr_t)odp_packet_buf_head(pkt_buf);
+}
+
+uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+
+ return pkt_hdr->seg_len;
+}
+
+void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset, uint32_t data_len)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
+ uint8_t *head = odp_packet_buf_head(pkt_buf);
+
+ pkt_hdr->seg_len = data_len;
+ pkt_hdr->seg_data = head + data_offset;
+}
+
+odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl, void *head)
+{
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_BUF_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_BUF_INVALID;
+ }
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
+
+uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i;
+ odp_packet_seg_t seg;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = pkt_hdr->buf_hdr.pool_ptr;
+ uint32_t num_segs = odp_packet_num_segs(pkt);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(num < num_segs)) {
+ ODP_ERR("Not enough buffer handles %u. Packet has %u segments.\n", num, num_segs);
+ return 0;
+ }
+
+ seg = odp_packet_first_seg(pkt);
+
+ for (i = 0; i < num_segs; i++) {
+ pkt_buf[i] = (odp_packet_buf_t)(uintptr_t)packet_seg_to_hdr(seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ return num_segs;
+}
+
+odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i, data_len, tailroom;
+ odp_packet_hdr_t *cur_seg, *next_seg;
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[0];
+ uint32_t headroom = odp_packet_buf_data_offset(pkt_buf[0]);
+
+ pool_t *pool = pool_entry_from_hdl(pool_hdl);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(num == 0)) {
+ ODP_ERR("Bad number of buffers: %u\n", num);
+ return ODP_PACKET_INVALID;
+ }
+
+ cur_seg = pkt_hdr;
+ data_len = 0;
+
+ for (i = 0; i < num; i++) {
+ next_seg = NULL;
+ if (i < num - 1)
+ next_seg = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[i + 1];
+
+ data_len += cur_seg->seg_len;
+ cur_seg->seg_next = next_seg;
+ cur_seg = next_seg;
+ }
+
+ tailroom = pool->ext_param.pkt.buf_size - sizeof(odp_packet_hdr_t);
+ tailroom -= pool->ext_param.pkt.app_header_size;
+ tailroom -= odp_packet_buf_data_len(pkt_buf[num - 1]);
+
+ pkt_hdr->seg_count = num;
+ pkt_hdr->frame_len = data_len;
+ pkt_hdr->headroom = headroom;
+ pkt_hdr->tailroom = tailroom;
+
+ /* Reset metadata */
+ pkt_hdr->subtype = ODP_EVENT_PACKET_BASIC;
+ pkt_hdr->input = ODP_PKTIO_INVALID;
+ packet_parse_reset(pkt_hdr, 1);
+
+ return packet_handle(pkt_hdr);
+}
+
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt)
+{
+ (void)pkt;
+ (void)opt;
+}
+
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt)
+{
+ (void)pkt;
+
+ return ODP_PROTO_STATS_INVALID;
+}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 39cbd72f6..bd8bb58e8 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -30,6 +30,7 @@
#include <odp/api/plat/queue_inlines.h>
#include <odp_libconfig_internal.h>
#include <odp_event_vector_internal.h>
+#include <odp/api/proto_stats.h>
#include <string.h>
#include <inttypes.h>
@@ -727,7 +728,7 @@ odp_pktio_t odp_pktio_lookup(const char *name)
}
static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
- uint32_t num, const cos_t *cos_hdr)
+ uint32_t num, cos_t *cos_hdr)
{
odp_packet_vector_t pktv;
odp_pool_t pool = cos_hdr->s.vector.pool;
@@ -748,6 +749,7 @@ static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
}
if (odp_unlikely(i == 0)) {
odp_event_free_multi(events, num);
+ _odp_cos_queue_stats_add(cos_hdr, queue, 0, num);
return;
}
num_pktv = i;
@@ -768,9 +770,15 @@ static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
}
ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
- if (odp_unlikely(ret != num_pktv)) {
+ if (odp_likely(ret == num_pktv)) {
+ _odp_cos_queue_stats_add(cos_hdr, queue, num_enq, num - num_enq);
+ } else {
+ uint32_t enqueued;
+
if (ret < 0)
ret = 0;
+ enqueued = max_size * ret;
+ _odp_cos_queue_stats_add(cos_hdr, queue, enqueued, num - enqueued);
odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
}
}
@@ -900,6 +908,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
}
for (i = 0; i < num_dst; i++) {
+ cos_t *cos_hdr = NULL;
int num_enq, ret;
int idx = dst_idx[i];
@@ -910,7 +919,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
if (cos[i] != CLS_COS_IDX_NONE) {
/* Packets from classifier */
- cos_t *cos_hdr = _odp_cos_entry_from_idx(cos[i]);
+ cos_hdr = _odp_cos_entry_from_idx(cos[i]);
if (cos_hdr->s.vector.enable) {
packet_vector_enq_cos(dst[i], &ev[idx], num_enq, cos_hdr);
@@ -929,8 +938,11 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
if (ret < num_enq)
odp_event_free_multi(&ev[idx + ret], num_enq - ret);
- }
+ /* Update CoS statistics */
+ if (cos[i] != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, dst[i], ret, num_enq - ret);
+ }
return num_rx;
}
@@ -1136,6 +1148,7 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
odp_queue_t queue;
odp_bool_t vector_enabled = entry->s.in_queue[rx_queue].vector.enable;
uint32_t num = QUEUE_MULTI_MAX;
+ cos_t *cos_hdr = NULL;
if (odp_unlikely(entry->s.state != PKTIO_STATE_STARTED)) {
if (entry->s.state < PKTIO_STATE_ACTIVE ||
@@ -1162,12 +1175,13 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
pkt_hdr = packet_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
odp_event_t event = odp_packet_to_event(pkt);
+ uint16_t cos_idx = pkt_hdr->cos;
queue = pkt_hdr->dst_queue;
- if (pkt_hdr->cos != CLS_COS_IDX_NONE) {
+ if (cos_idx != CLS_COS_IDX_NONE) {
/* Packets from classifier */
- cos_t *cos_hdr = _odp_cos_entry_from_idx(pkt_hdr->cos);
+ cos_hdr = _odp_cos_entry_from_idx(cos_idx);
if (cos_hdr->s.vector.enable) {
packet_vector_enq_cos(queue, &event, 1, cos_hdr);
@@ -1182,7 +1196,13 @@ int _odp_sched_cb_pktin_poll_one(int pktio_index,
if (odp_unlikely(odp_queue_enq(queue, event))) {
/* Queue full? */
odp_packet_free(pkt);
- odp_atomic_inc_u64(&entry->s.stats_extra.in_discards);
+ if (cos_idx != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, queue, 0, 1);
+ else
+ odp_atomic_inc_u64(&entry->s.stats_extra.in_discards);
+ } else {
+ if (cos_idx != CLS_COS_IDX_NONE)
+ _odp_cos_queue_stats_add(cos_hdr, queue, 1, 0);
}
} else {
evt_tbl[num_rx++] = odp_packet_to_event(pkt);
@@ -3328,3 +3348,64 @@ int odp_pktout_send_lso(odp_pktout_queue_t queue, const odp_packet_t packet[], i
return i;
}
+
+void
+odp_proto_stats_param_init(odp_proto_stats_param_t *param)
+{
+ if (param)
+ memset(param, 0, sizeof(*param));
+}
+
+int
+odp_proto_stats_capability(odp_pktio_t pktio, odp_proto_stats_capability_t *capa)
+{
+ (void)pktio;
+
+ if (capa == NULL)
+ return -EINVAL;
+
+ memset(capa, 0, sizeof(*capa));
+
+ return 0;
+}
+
+odp_proto_stats_t
+odp_proto_stats_lookup(const char *name)
+{
+ (void)name;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+odp_proto_stats_t
+odp_proto_stats_create(const char *name, const odp_proto_stats_param_t *param)
+{
+ (void)name;
+ (void)param;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+int
+odp_proto_stats_destroy(odp_proto_stats_t stat)
+{
+ (void)stat;
+
+ return 0;
+}
+
+int
+odp_proto_stats(odp_proto_stats_t stat, odp_proto_stats_data_t *data)
+{
+ (void)stat;
+
+ memset(data, 0, sizeof(odp_proto_stats_data_t));
+
+ return 0;
+}
+
+void
+odp_proto_stats_print(odp_proto_stats_t stat)
+{
+ (void)stat;
+}
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
index 98f373814..d97bb96a1 100644
--- a/platform/linux-generic/odp_packet_vector.c
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -39,7 +39,7 @@ odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool)
{
odp_buffer_t buf;
- ODP_ASSERT(pool_entry_from_hdl(pool)->params.type == ODP_POOL_VECTOR);
+ ODP_ASSERT(pool_entry_from_hdl(pool)->type == ODP_POOL_VECTOR);
buf = odp_buffer_alloc(pool);
if (odp_unlikely(buf == ODP_BUFFER_INVALID))
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 07da3d9cc..d1fc94369 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -27,6 +27,7 @@
#include <string.h>
#include <stdio.h>
+#include <stddef.h>
#include <inttypes.h>
#include <odp/api/plat/pool_inline_types.h>
@@ -66,7 +67,7 @@ static __thread pool_local_t local;
/* Fill in pool header field offsets for inline functions */
const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
.pool_hdl = offsetof(pool_t, pool_hdl),
- .uarea_size = offsetof(pool_t, params.pkt.uarea_size)
+ .uarea_size = offsetof(pool_t, param_uarea_size)
};
#include <odp/visibility_end.h>
@@ -369,9 +370,11 @@ int _odp_pool_term_local(void)
return 0;
}
-static pool_t *reserve_pool(uint32_t shmflags)
+static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
{
int i;
+ odp_shm_t shm;
+ uint32_t mem_size;
pool_t *pool;
char ring_name[ODP_POOL_NAME_LEN];
@@ -382,19 +385,30 @@ static pool_t *reserve_pool(uint32_t shmflags)
if (pool->reserved == 0) {
pool->reserved = 1;
UNLOCK(&pool->lock);
+
+ memset(&pool->memset_mark, 0,
+ sizeof(pool_t) - offsetof(pool_t, memset_mark));
sprintf(ring_name, "_odp_pool_ring_%d", i);
- pool->ring_shm =
- odp_shm_reserve(ring_name,
- sizeof(pool_ring_t),
- ODP_CACHE_LINE_SIZE, shmflags);
- if (odp_unlikely(pool->ring_shm == ODP_SHM_INVALID)) {
+
+ /* Reserve memory for the ring, and for lookup table in case of pool ext */
+ mem_size = sizeof(pool_ring_t);
+ if (pool_ext)
+ mem_size += num * sizeof(odp_buffer_hdr_t *);
+
+ shm = odp_shm_reserve(ring_name, mem_size, ODP_CACHE_LINE_SIZE, shmflags);
+
+ if (odp_unlikely(shm == ODP_SHM_INVALID)) {
ODP_ERR("Unable to alloc pool ring %d\n", i);
LOCK(&pool->lock);
pool->reserved = 0;
UNLOCK(&pool->lock);
break;
}
- pool->ring = odp_shm_addr(pool->ring_shm);
+
+ pool->ring_shm = shm;
+ pool->ring = odp_shm_addr(shm);
+ pool->pool_ext = pool_ext;
+
return pool;
}
UNLOCK(&pool->lock);
@@ -403,20 +417,59 @@ static pool_t *reserve_pool(uint32_t shmflags)
return NULL;
}
+static void init_buffer_hdr(pool_t *pool, odp_buffer_hdr_t *buf_hdr, uint32_t buf_index,
+ uint32_t hdr_len, uint8_t *data_ptr, void *uarea)
+{
+ odp_pool_type_t type = pool->type;
+
+ memset(buf_hdr, 0, hdr_len);
+
+ /* Initialize buffer metadata */
+ buf_hdr->index.u32 = 0;
+ buf_hdr->index.pool = pool->pool_idx;
+ buf_hdr->index.buffer = buf_index;
+ buf_hdr->type = type;
+ buf_hdr->event_type = type;
+ buf_hdr->pool_ptr = pool;
+ buf_hdr->uarea_addr = uarea;
+ odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
+
+ /* Store base values for fast init */
+ buf_hdr->base_data = data_ptr;
+ buf_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+
+ /* Initialize segmentation metadata */
+ if (type == ODP_POOL_PACKET) {
+ odp_packet_hdr_t *pkt_hdr = (void *)buf_hdr;
+
+ pkt_hdr->seg_data = data_ptr;
+ pkt_hdr->seg_len = pool->seg_len;
+ pkt_hdr->seg_count = 1;
+ pkt_hdr->seg_next = NULL;
+ }
+
+ /* Initialize event vector metadata */
+ if (type == ODP_POOL_VECTOR) {
+ odp_event_vector_hdr_t *vect_hdr = (void *)buf_hdr;
+
+ vect_hdr->size = 0;
+ buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
+ }
+}
+
static void init_buffers(pool_t *pool)
{
uint64_t i;
odp_buffer_hdr_t *buf_hdr;
odp_packet_hdr_t *pkt_hdr;
- odp_event_vector_hdr_t *vect_hdr;
odp_shm_info_t shm_info;
void *addr;
void *uarea = NULL;
uint8_t *data;
- uint32_t offset;
+ uint32_t offset, hdr_len;
ring_ptr_t *ring;
uint32_t mask;
- int type;
+ odp_pool_type_t type;
uint64_t page_size;
int skipped_blocks = 0;
@@ -426,7 +479,7 @@ static void init_buffers(pool_t *pool)
page_size = shm_info.page_size;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- type = pool->params.type;
+ type = pool->type;
for (i = 0; i < pool->num + skipped_blocks ; i++) {
int skip = 0;
@@ -435,11 +488,10 @@ static void init_buffers(pool_t *pool)
pool->block_offset];
buf_hdr = addr;
pkt_hdr = addr;
- vect_hdr = addr;
+
/* Skip packet buffers which cross huge page boundaries. Some
* NICs cannot handle buffers which cross page boundaries. */
- if (pool->params.type == ODP_POOL_PACKET &&
- page_size >= FIRST_HP_SIZE) {
+ if (type == ODP_POOL_PACKET && page_size >= FIRST_HP_SIZE) {
uint64_t first_page;
uint64_t last_page;
@@ -467,37 +519,8 @@ static void init_buffers(pool_t *pool)
while (((uintptr_t)&data[offset]) % pool->align != 0)
offset++;
- memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
-
- /* Initialize buffer metadata */
- buf_hdr->index.u32 = 0;
- buf_hdr->index.pool = pool->pool_idx;
- buf_hdr->index.buffer = i;
- buf_hdr->type = type;
- buf_hdr->event_type = type;
- if (type == ODP_POOL_VECTOR)
- buf_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
- buf_hdr->pool_ptr = pool;
- buf_hdr->uarea_addr = uarea;
-
- /* Initialize segmentation metadata */
- if (type == ODP_POOL_PACKET) {
- pkt_hdr->seg_data = &data[offset];
- pkt_hdr->seg_len = pool->seg_len;
- pkt_hdr->seg_count = 1;
- pkt_hdr->seg_next = NULL;
- }
-
- odp_atomic_init_u32(&buf_hdr->ref_cnt, 0);
-
- /* Initialize event vector metadata */
- if (type == ODP_POOL_VECTOR)
- vect_hdr->size = 0;
-
- /* Store base values for fast init */
- buf_hdr->base_data = &data[offset];
- buf_hdr->buf_end = &data[offset + pool->seg_len +
- pool->tailroom];
+ hdr_len = (uintptr_t)data - (uintptr_t)buf_hdr;
+ init_buffer_hdr(pool, buf_hdr, i, hdr_len, &data[offset], uarea);
/* Store buffer into the global pool */
if (!skip)
@@ -522,6 +545,67 @@ static bool shm_is_from_huge_pages(odp_shm_t shm)
return (info.page_size >= huge_page_size);
}
+static void set_pool_name(pool_t *pool, const char *name)
+{
+ if (name == NULL) {
+ pool->name[0] = 0;
+ } else {
+ strncpy(pool->name, name, ODP_POOL_NAME_LEN - 1);
+ pool->name[ODP_POOL_NAME_LEN - 1] = 0;
+ }
+}
+
+static void set_pool_cache_size(pool_t *pool, uint32_t cache_size)
+{
+ uint32_t burst_size;
+
+ pool->cache_size = 0;
+ pool->burst_size = 1;
+
+ if (cache_size > 1) {
+ cache_size = (cache_size / 2) * 2;
+ burst_size = _odp_pool_glb->config.burst_size;
+
+ if ((cache_size / burst_size) < 2)
+ burst_size = cache_size / 2;
+
+ pool->cache_size = cache_size;
+ pool->burst_size = burst_size;
+ }
+}
+
+static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, uint32_t shmflags)
+{
+ odp_shm_t shm;
+ const char *max_prefix = "pool_000_uarea_";
+ int max_prefix_len = strlen(max_prefix);
+ char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
+
+ pool->uarea_shm = ODP_SHM_INVALID;
+
+ if (uarea_size == 0) {
+ pool->param_uarea_size = 0;
+ pool->uarea_size = 0;
+ pool->uarea_shm_size = 0;
+ return 0;
+ }
+
+ sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
+
+ pool->param_uarea_size = uarea_size;
+ pool->uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
+
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, shmflags);
+
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ pool->uarea_shm = shm;
+ pool->uarea_base_addr = odp_shm_addr(shm);
+ return 0;
+}
+
static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t shmflags)
{
@@ -529,17 +613,17 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
uint32_t seg_len, align, num, hdr_size, block_size;
- uint32_t max_len, cache_size, burst_size;
+ uint32_t max_len, cache_size;
uint32_t ring_size;
+ odp_pool_type_t type = params->type;
uint32_t num_extra = 0;
- const char *max_prefix = "pool_000_uarea_";
+ const char *max_prefix = "pool_000_";
int max_prefix_len = strlen(max_prefix);
char shm_name[ODP_POOL_NAME_LEN + max_prefix_len];
- char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
align = 0;
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
uint32_t align_req = params->pkt.align;
if (align_req &&
@@ -551,7 +635,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
align = _odp_pool_glb->config.pkt_base_align;
} else {
- if (params->type == ODP_POOL_BUFFER)
+ if (type == ODP_POOL_BUFFER)
align = params->buf.align;
if (align < _odp_pool_glb->config.buf_min_align)
@@ -572,7 +656,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uarea_size = 0;
cache_size = 0;
- switch (params->type) {
+ switch (type) {
case ODP_POOL_BUFFER:
num = params->buf.num;
seg_len = params->buf.size;
@@ -630,32 +714,23 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
return ODP_POOL_INVALID;
}
- if (uarea_size)
- uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
-
- pool = reserve_pool(shmflags);
+ pool = reserve_pool(shmflags, 0, num);
if (pool == NULL) {
ODP_ERR("No more free pools\n");
return ODP_POOL_INVALID;
}
- if (name == NULL) {
- pool->name[0] = 0;
- } else {
- strncpy(pool->name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->name[ODP_POOL_NAME_LEN - 1] = 0;
- }
+ set_pool_name(pool, name);
/* Format SHM names from prefix, pool index and pool name. */
sprintf(shm_name, "pool_%03i_%s", pool->pool_idx, pool->name);
- sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
+ pool->type = type;
pool->params = *params;
pool->block_offset = 0;
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
uint32_t dpdk_obj_size;
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
@@ -681,9 +756,9 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
uint32_t align_pad = (align > ODP_CACHE_LINE_SIZE) ?
align - ODP_CACHE_LINE_SIZE : 0;
- if (params->type == ODP_POOL_BUFFER)
+ if (type == ODP_POOL_BUFFER)
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
- else if (params->type == ODP_POOL_TIMEOUT)
+ else if (type == ODP_POOL_TIMEOUT)
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
else
hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
@@ -693,7 +768,7 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
/* Allocate extra memory for skipping packet buffers which cross huge
* page boundaries. */
- if (params->type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_PACKET) {
num_extra = ((((uint64_t)num * block_size) +
FIRST_HP_SIZE - 1) / FIRST_HP_SIZE);
num_extra += ((((uint64_t)num_extra * block_size) +
@@ -715,25 +790,11 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
pool->max_len = max_len;
pool->tailroom = tailroom;
pool->block_size = block_size;
- pool->uarea_size = uarea_size;
pool->shm_size = (num + num_extra) * (uint64_t)block_size;
- pool->uarea_shm_size = num * (uint64_t)uarea_size;
pool->ext_desc = NULL;
pool->ext_destroy = NULL;
- pool->cache_size = 0;
- pool->burst_size = 1;
-
- if (cache_size > 1) {
- cache_size = (cache_size / 2) * 2;
- burst_size = _odp_pool_glb->config.burst_size;
-
- if ((cache_size / burst_size) < 2)
- burst_size = cache_size / 2;
-
- pool->cache_size = cache_size;
- pool->burst_size = burst_size;
- }
+ set_pool_cache_size(pool, cache_size);
shm = odp_shm_reserve(shm_name, pool->shm_size, ODP_PAGE_SIZE,
shmflags);
@@ -750,26 +811,16 @@ static odp_pool_t pool_create(const char *name, const odp_pool_param_t *params,
pool->base_addr = odp_shm_addr(pool->shm);
pool->max_addr = pool->base_addr + pool->shm_size - 1;
- pool->uarea_shm = ODP_SHM_INVALID;
- if (uarea_size) {
- shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size,
- ODP_PAGE_SIZE, shmflags);
-
- pool->uarea_shm = shm;
-
- if (shm == ODP_SHM_INVALID) {
- ODP_ERR("SHM reserve failed (uarea)\n");
- goto error;
- }
-
- pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
+ if (reserve_uarea(pool, uarea_size, num, shmflags)) {
+ ODP_ERR("User area SHM reserve failed\n");
+ goto error;
}
ring_ptr_init(&pool->ring->hdr);
init_buffers(pool);
/* Create zero-copy DPDK memory pool. NOP if zero-copy is disabled. */
- if (params->type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
+ if (type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
ODP_ERR("Creating DPDK packet pool failed\n");
goto error;
}
@@ -994,7 +1045,8 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
cache_flush(&pool->local_cache[i], pool);
- odp_shm_free(pool->shm);
+ if (pool->pool_ext == 0)
+ odp_shm_free(pool->shm);
if (pool->uarea_shm != ODP_SHM_INVALID)
odp_shm_free(pool->uarea_shm);
@@ -1044,10 +1096,18 @@ int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
if (pool == NULL || info == NULL)
return -1;
+ memset(info, 0, sizeof(odp_pool_info_t));
+
info->name = pool->name;
- info->params = pool->params;
- if (pool->params.type == ODP_POOL_PACKET)
+ if (pool->pool_ext) {
+ info->pool_ext = 1;
+ info->pool_ext_param = pool->ext_param;
+ } else {
+ info->params = pool->params;
+ }
+
+ if (pool->type == ODP_POOL_PACKET)
info->pkt.max_num = pool->num;
info->min_data_addr = (uintptr_t)pool->base_addr;
@@ -1314,10 +1374,10 @@ void odp_pool_print(odp_pool_t pool_hdl)
odp_pool_to_u64(pool->pool_hdl));
ODP_PRINT(" name %s\n", pool->name);
ODP_PRINT(" pool type %s\n",
- pool->params.type == ODP_POOL_BUFFER ? "buffer" :
- (pool->params.type == ODP_POOL_PACKET ? "packet" :
- (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" :
- (pool->params.type == ODP_POOL_VECTOR ? "vector" :
+ pool->type == ODP_POOL_BUFFER ? "buffer" :
+ (pool->type == ODP_POOL_PACKET ? "packet" :
+ (pool->type == ODP_POOL_TIMEOUT ? "timeout" :
+ (pool->type == ODP_POOL_VECTOR ? "vector" :
"unknown"))));
ODP_PRINT(" pool shm %" PRIu64 "\n",
odp_shm_to_u64(pool->shm));
@@ -1492,3 +1552,250 @@ int odp_buffer_is_valid(odp_buffer_t buf)
return 1;
}
+
+/* No actual head pointer alignment requirement. Anyway, require even byte address. */
+#define MIN_HEAD_ALIGN 2
+
+int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *capa)
+{
+ odp_pool_stats_opt_t supported_stats;
+
+ if (type != ODP_POOL_PACKET)
+ return -1;
+
+ supported_stats.all = 0;
+
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ capa->type = type;
+ capa->max_pools = ODP_CONFIG_POOLS - 1;
+ capa->min_cache_size = 0;
+ capa->max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->stats.all = supported_stats.all;
+
+ capa->pkt.max_num_buf = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.max_buf_size = MAX_SIZE;
+ capa->pkt.odp_header_size = sizeof(odp_packet_hdr_t);
+ capa->pkt.odp_trailer_size = 0;
+ capa->pkt.min_mem_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_buf_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_head_align = MIN_HEAD_ALIGN;
+ capa->pkt.buf_size_aligned = 0;
+ capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_headroom_size = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
+ capa->pkt.max_uarea_size = MAX_SIZE;
+
+ return 0;
+}
+
+void odp_pool_ext_param_init(odp_pool_type_t type, odp_pool_ext_param_t *param)
+{
+ uint32_t default_cache_size = _odp_pool_glb->config.local_cache_size;
+
+ memset(param, 0, sizeof(odp_pool_ext_param_t));
+
+ if (type != ODP_POOL_PACKET)
+ return;
+
+ param->type = ODP_POOL_PACKET;
+ param->cache_size = default_cache_size;
+ param->pkt.headroom = CONFIG_PACKET_HEADROOM;
+}
+
+static int check_pool_ext_param(const odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+
+ if (param->type != ODP_POOL_PACKET) {
+ ODP_ERR("Pool type not supported\n");
+ return -1;
+ }
+
+ if (odp_pool_ext_capability(param->type, &capa)) {
+ ODP_ERR("Capa failed\n");
+ return -1;
+ }
+
+ if (param->cache_size > capa.max_cache_size) {
+ ODP_ERR("Too large cache size %u\n", param->cache_size);
+ return -1;
+ }
+
+ if (param->stats.all != capa.stats.all) {
+ ODP_ERR("Pool statistics not supported\n");
+ return -1;
+ }
+
+ if (param->pkt.num_buf > capa.pkt.max_num_buf) {
+ ODP_ERR("Too many packet buffers\n");
+ return -1;
+ }
+
+ if (param->pkt.buf_size > capa.pkt.max_buf_size) {
+ ODP_ERR("Too large packet buffer size %u\n", param->pkt.buf_size);
+ return -1;
+ }
+
+ if (param->pkt.uarea_size > capa.pkt.max_uarea_size) {
+ ODP_ERR("Too large user area size %u\n", param->pkt.uarea_size);
+ return -1;
+ }
+
+ if (param->pkt.headroom > capa.pkt.max_headroom) {
+ ODP_ERR("Too large headroom size\n");
+ return -1;
+ }
+
+ if (head_offset % capa.pkt.min_head_align) {
+ ODP_ERR("Head pointer not %u byte aligned\n", capa.pkt.min_head_align);
+ return -1;
+ }
+
+ return 0;
+}
+
+odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *param)
+{
+ pool_t *pool;
+ uint32_t ring_size;
+ uint32_t num_buf = param->pkt.num_buf;
+ uint32_t buf_size = param->pkt.buf_size;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+ uint32_t headroom = param->pkt.headroom;
+ uint32_t shm_flags = 0;
+
+ if (check_pool_ext_param(param)) {
+ ODP_ERR("Bad pool ext param\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (odp_global_ro.shm_single_va)
+ shm_flags |= ODP_SHM_SINGLE_VA;
+
+ pool = reserve_pool(shm_flags, 1, num_buf);
+
+ if (pool == NULL) {
+ ODP_ERR("No more free pools\n");
+ return ODP_POOL_INVALID;
+ }
+
+ pool->ext_param = *param;
+ set_pool_name(pool, name);
+ set_pool_cache_size(pool, param->cache_size);
+
+ if (reserve_uarea(pool, param->pkt.uarea_size, num_buf, shm_flags)) {
+ ODP_ERR("User area SHM reserve failed\n");
+ goto error;
+ }
+
+ /* Ring size must be larger than the number of items stored */
+ if (num_buf + 1 <= RING_SIZE_MIN)
+ ring_size = RING_SIZE_MIN;
+ else
+ ring_size = ROUNDUP_POWER2_U32(num_buf + 1);
+
+ pool->ring_mask = ring_size - 1;
+ pool->type = param->type;
+ pool->num = num_buf;
+ pool->headroom = headroom;
+ pool->tailroom = 0;
+ pool->seg_len = buf_size - head_offset - headroom - pool->tailroom;
+ pool->max_seg_len = headroom + pool->seg_len + pool->tailroom;
+ pool->max_len = PKT_MAX_SEGS * pool->seg_len;
+
+ ring_ptr_init(&pool->ring->hdr);
+
+ return pool->pool_hdl;
+
+error:
+ if (pool->ring_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->ring_shm);
+
+ LOCK(&pool->lock);
+ pool->reserved = 0;
+ UNLOCK(&pool->lock);
+
+ return ODP_POOL_INVALID;
+}
+
+int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, uint32_t num,
+ uint32_t flags)
+{
+ pool_t *pool;
+ odp_buffer_hdr_t *buf_hdr;
+ ring_ptr_t *ring;
+ uint32_t i, ring_mask, buf_index, head_offset;
+ uint32_t num_populated;
+ uint8_t *data_ptr;
+ uint32_t hdr_size = sizeof(odp_packet_hdr_t);
+ void *uarea = NULL;
+
+ if (pool_hdl == ODP_POOL_INVALID) {
+ ODP_ERR("Bad pool handle\n");
+ return -1;
+ }
+
+ pool = pool_entry_from_hdl(pool_hdl);
+
+ if (pool->type != ODP_POOL_PACKET || pool->pool_ext == 0) {
+ ODP_ERR("Bad pool type\n");
+ return -1;
+ }
+
+ if (buf_size != pool->ext_param.pkt.buf_size) {
+ ODP_ERR("Bad buffer size\n");
+ return -1;
+ }
+
+ num_populated = pool->num_populated;
+
+ if (num_populated + num > pool->num) {
+ ODP_ERR("Trying to over populate the pool\n");
+ return -1;
+ }
+
+ if ((num_populated + num == pool->num) && !(flags & ODP_POOL_POPULATE_DONE)) {
+ ODP_ERR("Missing ODP_POOL_POPULATE_DONE flag\n");
+ return -1;
+ }
+
+ if ((num_populated + num < pool->num) && flags) {
+ ODP_ERR("Unexpected flags: 0x%x\n", flags);
+ return -1;
+ }
+
+ ring = &pool->ring->hdr;
+ ring_mask = pool->ring_mask;
+ buf_index = pool->num_populated;
+ head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+
+ for (i = 0; i < num; i++) {
+ buf_hdr = buf[i];
+
+ if ((uintptr_t)buf_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
+ ODP_ERR("Bad packet buffer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (((uintptr_t)buf_hdr + head_offset) & (MIN_HEAD_ALIGN - 1)) {
+ ODP_ERR("Bad head pointer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (pool->uarea_size)
+ uarea = &pool->uarea_base_addr[buf_index * pool->uarea_size];
+
+ data_ptr = (uint8_t *)buf_hdr + head_offset + pool->headroom;
+ init_buffer_hdr(pool, buf_hdr, buf_index, hdr_size, data_ptr, uarea);
+ pool->ring->buf_hdr_by_index[buf_index] = buf_hdr;
+ buf_index++;
+
+ ring_ptr_enq(ring, ring_mask, buf_hdr);
+ }
+
+ pool->num_populated += num;
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 5d328b84c..479d6f956 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -22,7 +22,7 @@
#include <odp/api/plat/thread_inlines.h>
#include <odp/api/time.h>
#include <odp/api/plat/time_inlines.h>
-#include <odp/api/spinlock.h>
+#include <odp/api/ticketlock.h>
#include <odp/api/hints.h>
#include <odp/api/cpu.h>
#include <odp/api/thrmask.h>
@@ -51,6 +51,18 @@
/* Group weight table size */
#define GRP_WEIGHT_TBL_SIZE NUM_SCHED_GRPS
+/* Spread balancing frequency. Balance every BALANCE_ROUNDS_M1 + 1 scheduling rounds. */
+#define BALANCE_ROUNDS_M1 0xfffff
+
+/* Load of a queue */
+#define QUEUE_LOAD 256
+
+/* Margin for load balance hysteresis */
+#define QUEUE_LOAD_MARGIN 8
+
+/* Ensure that load calculation does not wrap around */
+ODP_STATIC_ASSERT((QUEUE_LOAD * CONFIG_MAX_SCHED_QUEUES) < UINT32_MAX, "Load_value_too_large");
+
/* Maximum priority queue spread */
#define MAX_SPREAD 8
@@ -123,10 +135,12 @@ ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
/* Scheduler local data */
typedef struct ODP_ALIGNED_CACHE {
+ uint32_t sched_round;
uint16_t thr;
uint8_t pause;
uint8_t sync_ctx;
- uint16_t grp_round;
+ uint8_t balance_on;
+ uint16_t balance_start;
uint16_t spread_round;
struct {
@@ -188,11 +202,12 @@ typedef struct {
uint8_t prefer_ratio;
} config;
+ uint8_t load_balance;
uint16_t max_spread;
uint32_t ring_mask;
- odp_spinlock_t mask_lock;
odp_atomic_u32_t grp_epoch;
odp_shm_t shm;
+ odp_ticketlock_t mask_lock[NUM_SCHED_GRPS];
prio_q_mask_t prio_q_mask[NUM_SCHED_GRPS][NUM_PRIO];
struct {
@@ -213,7 +228,7 @@ typedef struct {
uint32_t prio_q_count[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
odp_thrmask_t mask_all;
- odp_spinlock_t grp_lock;
+ odp_ticketlock_t grp_lock;
struct {
char name[ODP_SCHED_GROUP_NAME_LEN];
@@ -225,7 +240,7 @@ typedef struct {
struct {
int num_pktin;
} pktio[NUM_PKTIO];
- odp_spinlock_t pktio_lock;
+ odp_ticketlock_t pktio_lock;
order_context_t order[CONFIG_MAX_SCHED_QUEUES];
@@ -289,6 +304,22 @@ static int read_config_file(sched_global_t *sched)
sched->config.prefer_ratio = val + 1;
ODP_PRINT(" %s: %i\n", str, val);
+ str = "sched_basic.load_balance";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > 1 || val < 0) {
+ ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ sched->load_balance = 1;
+ if (val == 0 || sched->config.num_spread == 1)
+ sched->load_balance = 0;
+
str = "sched_basic.burst_size_default";
if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
NUM_PRIO) {
@@ -357,14 +388,16 @@ static int read_config_file(sched_global_t *sched)
sched->config_if.group_enable.control = val;
ODP_PRINT(" %s: %i\n", str, val);
+ ODP_PRINT(" dynamic load balance: %s\n", sched->load_balance ? "ON" : "OFF");
+
ODP_PRINT("\n");
return 0;
}
-static inline uint8_t spread_index(uint32_t index)
+/* Spread from thread or other index */
+static inline uint8_t spread_from_index(uint32_t index)
{
- /* thread/queue index to spread index */
return index % sched->config.num_spread;
}
@@ -381,15 +414,14 @@ static void sched_local_init(void)
sched_local.sync_ctx = NO_SYNC_CONTEXT;
sched_local.stash.queue = ODP_QUEUE_INVALID;
- spread = spread_index(sched_local.thr);
+ spread = spread_from_index(sched_local.thr);
prefer_ratio = sched->config.prefer_ratio;
for (i = 0; i < SPREAD_TBL_SIZE; i++) {
sched_local.spread_tbl[i] = spread;
if (num_spread > 1 && (i % prefer_ratio) == 0) {
- sched_local.spread_tbl[i] = spread_index(spread +
- offset);
+ sched_local.spread_tbl[i] = spread_from_index(spread + offset);
offset++;
if (offset == num_spread)
offset = 1;
@@ -402,7 +434,7 @@ static int schedule_init_global(void)
odp_shm_t shm;
int i, j, grp;
int prefer_ratio;
- uint32_t ring_size;
+ uint32_t ring_size, num_rings;
ODP_DBG("Schedule init ... ");
@@ -429,20 +461,29 @@ static int schedule_init_global(void)
/* When num_spread == 1, only spread_tbl[0] is used. */
sched->max_spread = (sched->config.num_spread - 1) * prefer_ratio;
- ring_size = MAX_RING_SIZE / sched->config.num_spread;
+ /* Dynamic load balance may move all queues into a single ring.
+ * Ring size can be smaller with fixed spreading. */
+ if (sched->load_balance) {
+ ring_size = MAX_RING_SIZE;
+ num_rings = 1;
+ } else {
+ ring_size = MAX_RING_SIZE / sched->config.num_spread;
+ num_rings = sched->config.num_spread;
+ }
+
ring_size = ROUNDUP_POWER2_U32(ring_size);
ODP_ASSERT(ring_size <= MAX_RING_SIZE);
sched->ring_mask = ring_size - 1;
/* Each ring can hold in maximum ring_size-1 queues. Due to ring size round up,
* total capacity of rings may be larger than CONFIG_MAX_SCHED_QUEUES. */
- sched->max_queues = sched->ring_mask * sched->config.num_spread;
+ sched->max_queues = sched->ring_mask * num_rings;
if (sched->max_queues > CONFIG_MAX_SCHED_QUEUES)
sched->max_queues = CONFIG_MAX_SCHED_QUEUES;
- odp_spinlock_init(&sched->mask_lock);
-
for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ odp_ticketlock_init(&sched->mask_lock[grp]);
+
for (i = 0; i < NUM_PRIO; i++) {
for (j = 0; j < MAX_SPREAD; j++) {
prio_queue_t *prio_q;
@@ -453,11 +494,11 @@ static int schedule_init_global(void)
}
}
- odp_spinlock_init(&sched->pktio_lock);
+ odp_ticketlock_init(&sched->pktio_lock);
for (i = 0; i < NUM_PKTIO; i++)
sched->pktio[i].num_pktin = 0;
- odp_spinlock_init(&sched->grp_lock);
+ odp_ticketlock_init(&sched->grp_lock);
odp_atomic_init_u32(&sched->grp_epoch, 0);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
@@ -538,7 +579,7 @@ static inline int grp_update_tbl(void)
int num = 0;
int thr = sched_local.thr;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = 0; i < NUM_SCHED_GRPS; i++) {
if (sched->sched_grp[i].allocated == 0)
@@ -550,7 +591,7 @@ static inline int grp_update_tbl(void)
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
if (odp_unlikely(num == 0))
return 0;
@@ -593,13 +634,68 @@ static inline int prio_level_from_api(int api_prio)
return schedule_max_prio() - api_prio;
}
+static inline void dec_queue_count(int grp, int prio, int spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_count[grp][prio][spr]--;
+
+ /* Clear mask bit only when the last queue is removed */
+ if (sched->prio_q_count[grp][prio][spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+static inline void update_queue_count(int grp, int prio, int old_spr, int new_spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_mask[grp][prio] |= 1 << new_spr;
+ sched->prio_q_count[grp][prio][new_spr]++;
+
+ sched->prio_q_count[grp][prio][old_spr]--;
+
+ if (sched->prio_q_count[grp][prio][old_spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << old_spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+/* Select the spread that has least queues */
+static uint8_t allocate_spread(int grp, int prio)
+{
+ uint8_t i;
+ uint32_t num;
+ uint32_t min = UINT32_MAX;
+ uint8_t num_spread = sched->config.num_spread;
+ uint8_t spr = 0;
+
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ for (i = 0; i < num_spread; i++) {
+ num = sched->prio_q_count[grp][prio][i];
+ if (num < min) {
+ spr = i;
+ min = num;
+ }
+ }
+
+ sched->prio_q_mask[grp][prio] |= 1 << spr;
+ sched->prio_q_count[grp][prio][spr]++;
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+
+ return spr;
+}
+
static int schedule_create_queue(uint32_t queue_index,
const odp_schedule_param_t *sched_param)
{
int i;
+ uint8_t spread;
int grp = sched_param->group;
int prio = prio_level_from_api(sched_param->prio);
- uint8_t spread = spread_index(queue_index);
if (odp_global_rw->schedule_configured == 0) {
ODP_ERR("Scheduler has not been configured\n");
@@ -623,13 +719,7 @@ static int schedule_create_queue(uint32_t queue_index,
return -1;
}
- odp_spinlock_lock(&sched->mask_lock);
-
- /* update scheduler prio queue usage status */
- sched->prio_q_mask[grp][prio] |= 1 << spread;
- sched->prio_q_count[grp][prio][spread]++;
-
- odp_spinlock_unlock(&sched->mask_lock);
+ spread = allocate_spread(grp, prio);
sched->queue[queue_index].grp = grp;
sched->queue[queue_index].prio = prio;
@@ -658,17 +748,9 @@ static void schedule_destroy_queue(uint32_t queue_index)
{
int grp = sched->queue[queue_index].grp;
int prio = sched->queue[queue_index].prio;
- uint8_t spread = spread_index(queue_index);
-
- odp_spinlock_lock(&sched->mask_lock);
-
- /* Clear mask bit when last queue is removed*/
- sched->prio_q_count[grp][prio][spread]--;
-
- if (sched->prio_q_count[grp][prio][spread] == 0)
- sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spread));
+ int spread = sched->queue[queue_index].spread;
- odp_spinlock_unlock(&sched->mask_lock);
+ dec_queue_count(grp, prio, spread);
sched->queue[queue_index].grp = 0;
sched->queue[queue_index].prio = 0;
@@ -860,7 +942,7 @@ static void schedule_group_clear(odp_schedule_group_t group)
static int schedule_config(const odp_schedule_config_t *config)
{
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
sched->config_if.group_enable.all = config->sched_group.all;
sched->config_if.group_enable.control = config->sched_group.control;
@@ -876,11 +958,51 @@ static int schedule_config(const odp_schedule_config_t *config)
if (!config->sched_group.control)
schedule_group_clear(ODP_SCHED_GROUP_CONTROL);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
+/* Spread load after adding 'num' queues */
+static inline uint32_t spread_load(int grp, int prio, int spr, int num)
+{
+ uint32_t num_q, num_thr;
+
+ num_q = sched->prio_q_count[grp][prio][spr];
+ num_thr = sched->sched_grp[grp].spread_thrs[spr];
+
+ if (num_thr == 0)
+ return UINT32_MAX;
+
+ return ((num_q + num) * QUEUE_LOAD) / num_thr;
+}
+
+static inline int balance_spread(int grp, int prio, int cur_spr)
+{
+ int spr;
+ uint64_t cur_load, min_load, load;
+ int num_spread = sched->config.num_spread;
+ int new_spr = cur_spr;
+
+ cur_load = spread_load(grp, prio, cur_spr, 0);
+ min_load = cur_load;
+
+ for (spr = 0; spr < num_spread; spr++) {
+ if (spr == cur_spr)
+ continue;
+
+ load = spread_load(grp, prio, spr, 1);
+
+ /* Move queue if improvement is larger than marginal */
+ if ((load + QUEUE_LOAD_MARGIN) < min_load) {
+ new_spr = spr;
+ min_load = load;
+ }
+ }
+
+ return new_spr;
+}
+
static inline int copy_from_stash(odp_event_t out_ev[], unsigned int max)
{
int i = 0;
@@ -986,10 +1108,10 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
/* Pktio stopped or closed. Call stop_finalize when we have stopped
* polling all pktin queues of the pktio. */
if (odp_unlikely(num < 0)) {
- odp_spinlock_lock(&sched->pktio_lock);
+ odp_ticketlock_lock(&sched->pktio_lock);
sched->pktio[pktio_index].num_pktin--;
num_pktin = sched->pktio[pktio_index].num_pktin;
- odp_spinlock_unlock(&sched->pktio_lock);
+ odp_ticketlock_unlock(&sched->pktio_lock);
_odp_sched_queue_set_status(qi, QUEUE_STATUS_NOTSCHED);
@@ -1021,9 +1143,9 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
}
static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
- unsigned int max_num, int grp, int first_spr)
+ unsigned int max_num, int grp, int first_spr, int balance)
{
- int prio, spr, i, ret;
+ int prio, spr, new_spr, i, ret;
uint32_t qi;
uint16_t burst_def;
int num_spread = sched->config.num_spread;
@@ -1091,6 +1213,19 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
pktin = queue_is_pktin(qi);
+ /* Update queue spread before dequeue. Dequeue changes status of an empty
+ * queue, which enables a following enqueue operation to insert the queue
+ * back into scheduling (with new spread). */
+ if (odp_unlikely(balance)) {
+ new_spr = balance_spread(grp, prio, spr);
+
+ if (new_spr != spr) {
+ sched->queue[qi].spread = new_spr;
+ ring = &sched->prio_q[grp][prio][new_spr].ring;
+ update_queue_count(grp, prio, spr, new_spr);
+ }
+ }
+
num = _odp_sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
if (odp_unlikely(num < 0)) {
@@ -1186,8 +1321,10 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
unsigned int max_num)
{
int i, num_grp, ret, spr, grp_id;
+ uint32_t sched_round;
uint16_t spread_round, grp_round;
uint32_t epoch;
+ int balance = 0;
if (sched_local.stash.num_ev) {
ret = copy_from_stash(out_ev, max_num);
@@ -1207,10 +1344,28 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
if (odp_unlikely(sched_local.pause))
return 0;
+ sched_round = sched_local.sched_round++;
+ grp_round = sched_round & (GRP_WEIGHT_TBL_SIZE - 1);
+
/* Each thread prefers a priority queue. Spread weight table avoids
* starvation of other priority queues on low thread counts. */
spread_round = sched_local.spread_round;
- grp_round = (sched_local.grp_round++) & (GRP_WEIGHT_TBL_SIZE - 1);
+
+ if (odp_likely(sched->load_balance)) {
+ /* Spread balance is checked max_spread times in every BALANCE_ROUNDS_M1 + 1
+ * scheduling rounds. */
+ if (odp_unlikely(sched_local.balance_on)) {
+ balance = 1;
+
+ if (sched_local.balance_start == spread_round)
+ sched_local.balance_on = 0;
+ }
+
+ if (odp_unlikely((sched_round & BALANCE_ROUNDS_M1) == 0)) {
+ sched_local.balance_start = spread_round;
+ sched_local.balance_on = 1;
+ }
+ }
if (odp_unlikely(spread_round + 1 >= sched->max_spread))
sched_local.spread_round = 0;
@@ -1234,7 +1389,7 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
int grp;
grp = sched_local.grp[grp_id];
- ret = do_schedule_grp(out_queue, out_ev, max_num, grp, spr);
+ ret = do_schedule_grp(out_queue, out_ev, max_num, grp, spr, balance);
if (odp_likely(ret))
return ret;
@@ -1424,7 +1579,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
if (!sched->sched_grp[i].allocated) {
@@ -1445,7 +1600,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return group;
}
@@ -1456,7 +1611,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
odp_thrmask_zero(&zero);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
sched->sched_grp[group].allocated) {
@@ -1469,7 +1624,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1478,7 +1633,7 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
int i;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
if (strcmp(name, sched->sched_grp[i].name) == 0) {
@@ -1487,7 +1642,7 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
}
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return group;
}
@@ -1521,23 +1676,23 @@ static int schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *
thr = odp_thrmask_next(mask, thr);
}
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (sched->sched_grp[group].allocated == 0) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
ODP_ERR("Bad group status\n");
return -1;
}
for (i = 0; i < count; i++) {
- spread = spread_index(thr_tbl[i]);
+ spread = spread_from_index(thr_tbl[i]);
sched->sched_grp[group].spread_thrs[spread]++;
}
odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, mask);
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1573,23 +1728,23 @@ static int schedule_group_leave(odp_schedule_group_t group, const odp_thrmask_t
odp_thrmask_xor(&new_mask, mask, &sched->mask_all);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (sched->sched_grp[group].allocated == 0) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
ODP_ERR("Bad group status\n");
return -1;
}
for (i = 0; i < count; i++) {
- spread = spread_index(thr_tbl[i]);
+ spread = spread_from_index(thr_tbl[i]);
sched->sched_grp[group].spread_thrs[spread]--;
}
odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1598,7 +1753,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
{
int ret;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
*thrmask = sched->sched_grp[group].mask;
@@ -1607,7 +1762,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1616,7 +1771,7 @@ static int schedule_group_info(odp_schedule_group_t group,
{
int ret;
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
info->name = sched->sched_grp[group].name;
@@ -1626,7 +1781,7 @@ static int schedule_group_info(odp_schedule_group_t group,
ret = -1;
}
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return ret;
}
@@ -1634,7 +1789,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
- uint8_t spread = spread_index(thr);
+ uint8_t spread = spread_from_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1642,10 +1797,10 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
odp_thrmask_zero(&mask);
odp_thrmask_set(&mask, thr);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (!sched->sched_grp[group].allocated) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1653,7 +1808,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
sched->sched_grp[group].spread_thrs[spread]++;
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1662,7 +1817,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
- uint8_t spread = spread_index(thr);
+ uint8_t spread = spread_from_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1671,10 +1826,10 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
odp_thrmask_set(&mask, thr);
odp_thrmask_xor(&new_mask, &mask, &sched->mask_all);
- odp_spinlock_lock(&sched->grp_lock);
+ odp_ticketlock_lock(&sched->grp_lock);
if (!sched->sched_grp[group].allocated) {
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
@@ -1682,7 +1837,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
sched->sched_grp[group].spread_thrs[spread]--;
grp_update_mask(group, &new_mask);
- odp_spinlock_unlock(&sched->grp_lock);
+ odp_ticketlock_unlock(&sched->grp_lock);
return 0;
}
diff --git a/platform/linux-generic/odp_fractional.c b/platform/linux-generic/odp_std.c
index c98f3a4b2..9db5a35b3 100644
--- a/platform/linux-generic/odp_fractional.c
+++ b/platform/linux-generic/odp_std.c
@@ -4,7 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_types.h>
+#include <odp/api/std.h>
double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract)
{
diff --git a/platform/linux-generic/odp_std_clib_api.c b/platform/linux-generic/odp_std_api.c
index a0ba25ae8..0bcd68de2 100644
--- a/platform/linux-generic/odp_std_clib_api.c
+++ b/platform/linux-generic/odp_std_api.c
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_clib.h>
+#include <odp/api/std.h>
/* Include non-inlined versions of API functions */
#define _ODP_NO_INLINE
-#include <odp/api/plat/std_clib_inlines.h>
+#include <odp/api/plat/std_inlines.h>
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index cd7a9ecd9..de01af96c 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -2024,14 +2024,18 @@ static int tm_enqueue(tm_system_t *tm_system,
drop = random_early_discard(tm_system, tm_queue_obj,
initial_tm_wred_node, pkt_color);
if (drop)
- return -1;
+ return -2;
}
work_item.queue_num = tm_queue_obj->queue_num;
work_item.pkt = pkt;
- _odp_sched_fn->order_lock();
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_lock();
+
rc = input_work_queue_append(tm_system, &work_item);
- _odp_sched_fn->order_unlock();
+
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_unlock();
if (rc < 0) {
ODP_DBG("%s work queue full\n", __func__);
@@ -2230,6 +2234,7 @@ static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
odp_packet_t odp_pkt;
pkt_desc_t *pkt_desc;
uint32_t cnt;
+ int ret;
for (cnt = 1; cnt <= max_sends; cnt++) {
pkt_desc = &tm_system->egress_pkt_desc;
@@ -2248,8 +2253,16 @@ static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
tm_system->egress_pkt_desc = EMPTY_PKT_DESC;
if (tm_system->egress.egress_kind == ODP_TM_EGRESS_PKT_IO) {
- if (odp_pktout_send(tm_system->pktout, &odp_pkt, 1) != 1)
+ ret = odp_pktout_send(tm_system->pktout, &odp_pkt, 1);
+ if (odp_unlikely(ret != 1)) {
odp_packet_free(odp_pkt);
+ if (odp_unlikely(ret < 0))
+ odp_atomic_inc_u64(&tm_queue_obj->stats.errors);
+ else
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ } else {
+ odp_atomic_inc_u64(&tm_queue_obj->stats.packets);
+ }
} else if (tm_system->egress.egress_kind == ODP_TM_EGRESS_FN) {
tm_system->egress.egress_fcn(odp_pkt);
} else {
@@ -2535,6 +2548,8 @@ odp_bool_t odp_tm_is_idle(odp_tm_t odp_tm)
void odp_tm_requirements_init(odp_tm_requirements_t *requirements)
{
memset(requirements, 0, sizeof(odp_tm_requirements_t));
+
+ requirements->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_PRESERVE;
}
void odp_tm_egress_init(odp_tm_egress_t *egress)
@@ -2542,8 +2557,8 @@ void odp_tm_egress_init(odp_tm_egress_t *egress)
memset(egress, 0, sizeof(odp_tm_egress_t));
}
-int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
- uint32_t capabilities_size)
+static int tm_capabilities(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size)
{
odp_tm_level_capabilities_t *per_level_cap;
odp_tm_capabilities_t *cap_ptr;
@@ -2565,6 +2580,11 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr->vlan_marking_supported = true;
cap_ptr->ecn_marking_supported = true;
cap_ptr->drop_prec_marking_supported = true;
+ cap_ptr->tm_queue_threshold = true;
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
cap_ptr->dynamic_topology_update = true;
cap_ptr->dynamic_shaper_update = true;
@@ -2572,6 +2592,9 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr->dynamic_wred_update = true;
cap_ptr->dynamic_threshold_update = true;
+ /* We only support pkt priority mode preserve */
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] = true;
@@ -2589,11 +2612,48 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
per_level_cap->tm_node_dual_slope_supported = true;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+ per_level_cap->tm_node_threshold = true;
}
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
+
return 1;
}
+int ODP_DEPRECATE(odp_tm_capabilities)(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size)
+{
+ return tm_capabilities(capabilities, capabilities_size);
+}
+
+int odp_tm_egress_capabilities(odp_tm_capabilities_t *capabilities,
+ const odp_tm_egress_t *egress)
+{
+ pktio_entry_t *entry;
+ int ret;
+
+ memset(capabilities, 0, sizeof(odp_tm_capabilities_t));
+ if (egress->egress_kind == ODP_TM_EGRESS_PKT_IO) {
+ entry = get_pktio_entry(egress->pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %" PRIuPTR " does not exist\n",
+ (uintptr_t)egress->pktio);
+ return -1;
+ }
+
+ /* Report not capable if pktout mode is not TM */
+ if (entry->s.param.out_mode != ODP_PKTOUT_MODE_TM)
+ return 0;
+ }
+
+ ret = tm_capabilities(capabilities, 1);
+ if (ret <= 0)
+ return -1;
+ return 0;
+}
+
static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_requirements_t *req_ptr)
{
@@ -2601,10 +2661,11 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_level_capabilities_t *per_level_cap;
odp_packet_color_t color;
odp_bool_t shaper_supported, wred_supported;
- odp_bool_t dual_slope;
+ odp_bool_t dual_slope, threshold;
uint32_t num_levels, level_idx, max_nodes;
uint32_t max_queues, max_fanin;
- uint8_t max_priority, min_weight, max_weight;
+ uint32_t min_weight, max_weight;
+ uint8_t max_priority;
num_levels = MAX(MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
memset(cap_ptr, 0, sizeof(odp_tm_capabilities_t));
@@ -2614,6 +2675,7 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
shaper_supported = req_ptr->tm_queue_shaper_needed;
wred_supported = req_ptr->tm_queue_wred_needed;
dual_slope = req_ptr->tm_queue_dual_slope_needed;
+ threshold = req_ptr->tm_queue_threshold_needed;
cap_ptr->max_tm_queues = max_queues;
cap_ptr->max_levels = num_levels;
@@ -2624,6 +2686,11 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
cap_ptr->ecn_marking_supported = req_ptr->ecn_marking_needed;
cap_ptr->drop_prec_marking_supported =
req_ptr->drop_prec_marking_needed;
+ cap_ptr->tm_queue_threshold = threshold;
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
cap_ptr->dynamic_topology_update = true;
cap_ptr->dynamic_shaper_update = true;
@@ -2631,6 +2698,8 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
cap_ptr->dynamic_wred_update = true;
cap_ptr->dynamic_threshold_update = true;
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] =
req_ptr->marking_colors_needed[color];
@@ -2652,6 +2721,7 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
shaper_supported = per_level_req->tm_node_shaper_needed;
wred_supported = per_level_req->tm_node_wred_needed;
dual_slope = per_level_req->tm_node_dual_slope_needed;
+ threshold = per_level_req->tm_node_threshold_needed;
per_level_cap->max_num_tm_nodes = max_nodes;
per_level_cap->max_fanin_per_node = max_fanin;
@@ -2664,7 +2734,12 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
per_level_cap->tm_node_dual_slope_supported = dual_slope;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+ per_level_cap->tm_node_threshold = threshold;
}
+
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
}
static int affinitize_main_thread(void)
@@ -2932,6 +3007,11 @@ odp_tm_t odp_tm_create(const char *name,
return ODP_TM_INVALID;
}
+ /* We only support global pkt priority mode */
+ if (requirements->pkt_prio_mode != ODP_TM_PKT_PRIO_MODE_PRESERVE) {
+ ODP_ERR("Unsupported Packet priority mode\n");
+ return ODP_TM_INVALID;
+ }
odp_ticketlock_lock(&tm_glb->create_lock);
/* If we are using pktio output (usual case) get the first associated
@@ -3960,6 +4040,8 @@ int odp_tm_node_context_set(odp_tm_node_t tm_node, void *user_context)
void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
{
memset(params, 0, sizeof(odp_tm_queue_params_t));
+
+ params->ordered_enqueue = true;
}
odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
@@ -3996,11 +4078,15 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
memset(queue_obj, 0, sizeof(tm_queue_obj_t));
queue_obj->user_context = params->user_context;
queue_obj->priority = params->priority;
+ queue_obj->ordered_enqueue = params->ordered_enqueue;
queue_obj->tm_idx = tm_system->tm_idx;
queue_obj->queue_num = (uint32_t)_odp_int_pkt_queue;
queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
queue_obj->pkt = ODP_PACKET_INVALID;
odp_ticketlock_init(&queue_obj->tm_wred_node.tm_wred_node_lock);
+ odp_atomic_init_u64(&queue_obj->stats.discards, 0);
+ odp_atomic_init_u64(&queue_obj->stats.errors, 0);
+ odp_atomic_init_u64(&queue_obj->stats.packets, 0);
queue = odp_queue_create(NULL, NULL);
if (queue == ODP_QUEUE_INVALID) {
@@ -4438,6 +4524,40 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
return pkt_cnt;
}
+int odp_tm_enq_multi(odp_tm_queue_t tm_queue, const odp_packet_t packets[],
+ int num)
+{
+ tm_queue_obj_t *tm_queue_obj;
+ tm_system_t *tm_system;
+ int i, rc;
+
+ tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+ if (!tm_queue_obj)
+ return -1;
+
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
+ if (!tm_system)
+ return -1;
+
+ if (odp_atomic_load_u64(&tm_system->destroying))
+ return -1;
+
+ for (i = 0; i < num; i++) {
+ rc = tm_enqueue(tm_system, tm_queue_obj, packets[i]);
+ if (rc < 0 && rc != -2)
+ break;
+ /* For RED failure, just drop current pkt but
+ * continue with next pkts.
+ */
+ if (rc == -2) {
+ odp_packet_free(packets[i]);
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ }
+ }
+
+ return i;
+}
+
int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
{
tm_queue_thresholds_t *threshold_params;
@@ -4754,6 +4874,23 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
}
}
+int odp_tm_queue_stats(odp_tm_queue_t tm_queue, odp_tm_queue_stats_t *stats)
+{
+ tm_queue_obj_t *tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+
+ if (!tm_queue_obj) {
+ ODP_ERR("Invalid TM queue handle\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(odp_tm_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&tm_queue_obj->stats.discards);
+ stats->errors = odp_atomic_load_u64(&tm_queue_obj->stats.errors);
+ stats->packets = odp_atomic_load_u64(&tm_queue_obj->stats.packets);
+
+ return 0;
+}
+
uint64_t odp_tm_to_u64(odp_tm_t hdl)
{
return _odp_pri(hdl);
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 4841402aa..a98324ee4 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -116,6 +116,7 @@ ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) &&
typedef struct {
int num_rx_desc;
int num_tx_desc;
+ uint8_t multicast_en;
uint8_t rx_drop_en;
uint8_t set_flow_hash;
} dpdk_opt_t;
@@ -227,8 +228,13 @@ static int init_options(pktio_entry_t *pktio_entry,
return -1;
opt->set_flow_hash = !!val;
+ if (!lookup_opt("multicast_en", NULL, &val))
+ return -1;
+ opt->multicast_en = !!val;
+
ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name,
pkt_priv(pktio_entry)->port_id);
+ ODP_DBG(" multicast_en: %d\n", opt->multicast_en);
ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc);
ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc);
ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en);
@@ -1669,8 +1675,9 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
char pool_name[RTE_MEMPOOL_NAMESIZE];
uint16_t data_room;
uint32_t mtu;
- int i;
+ int i, ret;
pool_t *pool_entry;
+ uint16_t port_id;
if (disable_pktio)
return -1;
@@ -1679,8 +1686,15 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
return -1;
pool_entry = pool_entry_from_hdl(pool);
- if (!dpdk_netdev_is_valid(netdev)) {
- ODP_ERR("Invalid dpdk netdev: %s\n", netdev);
+ /* Init pktio entry */
+ memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
+
+ if (!rte_eth_dev_get_port_by_name(netdev, &port_id))
+ pkt_dpdk->port_id = port_id;
+ else if (dpdk_netdev_is_valid(netdev))
+ pkt_dpdk->port_id = atoi(netdev);
+ else {
+ ODP_ERR("Invalid DPDK interface name: %s\n", netdev);
return -1;
}
@@ -1691,11 +1705,7 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
odp_global_rw->dpdk_initialized = 1;
}
- /* Init pktio entry */
- memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
-
pkt_dpdk->pool = pool;
- pkt_dpdk->port_id = atoi(netdev);
/* rte_eth_dev_count() was removed in v18.05 */
#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0)
@@ -1727,6 +1737,23 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
promisc_mode_check(pkt_dpdk);
+#if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0)
+ ret = 0;
+ if (pkt_dpdk->opt.multicast_en)
+ rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ rte_eth_allmulticast_disable(pkt_dpdk->port_id);
+#else
+ if (pkt_dpdk->opt.multicast_en)
+ ret = rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ ret = rte_eth_allmulticast_disable(pkt_dpdk->port_id);
+#endif
+
+ /* Not supported by all PMDs, so ignore the return value */
+ if (ret)
+ ODP_DBG("Configuring multicast reception not supported by the PMD\n");
+
/* Drivers requiring minimum burst size. Supports also *_vf versions
* of the drivers. */
if (!strncmp(dev_info.driver_name, IXGBE_DRV_NAME,
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
index 93195f5a8..90709d86d 100644
--- a/platform/linux-generic/test/inline-timer.conf
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
timer: {
# Enable inline timer implementation
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
index 58a73f2df..f9b39abf6 100644
--- a/platform/linux-generic/test/packet_align.conf
+++ b/platform/linux-generic/test/packet_align.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
pool: {
pkt: {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
index a6e6080d2..a4b5d3f39 100644
--- a/platform/linux-generic/test/process-mode.conf
+++ b/platform/linux-generic/test/process-mode.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
# Shared memory options
shm: {
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
index 79537b454..4ef0ab044 100644
--- a/platform/linux-generic/test/sched-basic.conf
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -1,8 +1,9 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.16"
+config_file_version = "0.1.18"
+# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
- # Test scheduler with an odd spread value
prio_spread = 3
+ load_balance = 0
}