aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2023-01-27 13:56:45 +0200
committerMatias Elo <matias.elo@nokia.com>2023-05-08 14:35:47 +0300
commitd989083e7f964b60d83ca1d97cffdd9dac0b4710 (patch)
treebf932ff0f93bdbb9651ca588d6c71467721b1a40
parenta0da0182cc48ed4859b0a12dc668e1edfe73b9d1 (diff)
linux-gen: pktio: enqueue packets to destination queues from pktio devices
Reduce latency by enqueueing packets directly to classifier and inline IPsec destination queues from pktio devices. Signed-off-by: Matias Elo <matias.elo@nokia.com> Reviewed-by: Tuomas Taipale <tuomas.taipale@nokia.com>
-rw-r--r--platform/linux-generic/include/odp_classification_internal.h158
-rw-r--r--platform/linux-generic/odp_packet_io.c60
-rw-r--r--platform/linux-generic/pktio/dpdk.c47
-rw-r--r--platform/linux-generic/pktio/loop.c30
-rw-r--r--platform/linux-generic/pktio/pcap.c25
-rw-r--r--platform/linux-generic/pktio/socket.c18
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c28
-rw-r--r--platform/linux-generic/pktio/socket_xdp.c21
-rw-r--r--platform/linux-generic/pktio/tap.c22
9 files changed, 308 insertions, 101 deletions
diff --git a/platform/linux-generic/include/odp_classification_internal.h b/platform/linux-generic/include/odp_classification_internal.h
index c3ecf4079..7841e64fb 100644
--- a/platform/linux-generic/include/odp_classification_internal.h
+++ b/platform/linux-generic/include/odp_classification_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2021, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -21,13 +21,20 @@ extern "C" {
#include <odp/api/atomic.h>
#include <odp/api/classification.h>
+#include <odp/api/event.h>
#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/pool.h>
#include <odp/api/queue.h>
+#include <odp/api/std_types.h>
+
+#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
-#include <odp/api/packet_io.h>
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
+#include <stdint.h>
+
extern cls_global_t *_odp_cls_global;
static inline cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
@@ -72,6 +79,153 @@ static inline void _odp_cos_queue_stats_add(cos_t *cos, odp_queue_t queue,
odp_atomic_add_u64(&cos->queue_stats[queue_idx].discards, discards);
}
+static inline void _odp_cos_vector_enq(odp_queue_t queue, odp_event_t events[], uint32_t num,
+ cos_t *cos)
+{
+ odp_packet_vector_t pktv;
+ const odp_pool_t pool = cos->vector.pool;
+ const uint32_t max_size = cos->vector.max_size;
+ uint32_t num_enq;
+ int num_pktv = (num + max_size - 1) / max_size;
+ int ret;
+ int i;
+ odp_packet_vector_t pktv_tbl[num_pktv];
+ odp_event_t event_tbl[num_pktv];
+
+ for (i = 0; i < num_pktv; i++) {
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ break;
+ pktv_tbl[i] = pktv;
+ event_tbl[i] = odp_packet_vector_to_event(pktv);
+ }
+ if (odp_unlikely(i == 0)) {
+ odp_event_free_multi(events, num);
+ _odp_cos_queue_stats_add(cos, queue, 0, num);
+ return;
+ }
+ num_pktv = i;
+ num_enq = 0;
+ for (i = 0; i < num_pktv; i++) {
+ odp_packet_t *pkt_tbl;
+ int pktv_size = max_size;
+
+ pktv = pktv_tbl[i];
+
+ if (num_enq + max_size > num)
+ pktv_size = num - num_enq;
+
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ odp_packet_from_event_multi(pkt_tbl, &events[num_enq], pktv_size);
+ odp_packet_vector_size_set(pktv, pktv_size);
+ num_enq += pktv_size;
+ }
+
+ ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
+ if (odp_likely(ret == num_pktv)) {
+ _odp_cos_queue_stats_add(cos, queue, num_enq, num - num_enq);
+ } else {
+ uint32_t enqueued;
+
+ if (ret < 0)
+ ret = 0;
+ enqueued = max_size * ret;
+ _odp_cos_queue_stats_add(cos, queue, enqueued, num - enqueued);
+ odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
+ }
+}
+
+/**
+ * Enqueue packets into destination CoS
+ */
+static inline void _odp_cos_enq(uint16_t cos_id, odp_queue_t dst, odp_packet_t packets[], int num)
+{
+ _ODP_ASSERT(cos_id != CLS_COS_IDX_NONE);
+ _ODP_ASSERT(dst != ODP_QUEUE_INVALID);
+
+ cos_t *cos = _odp_cos_entry_from_idx(cos_id);
+
+ if (num < 2 || !cos->vector.enable) {
+ int ret = odp_queue_enq_multi(dst, (odp_event_t *)packets, num);
+
+ if (odp_unlikely(ret != num)) {
+ if (ret < 0)
+ ret = 0;
+
+ odp_packet_free_multi(&packets[ret], num - ret);
+ }
+ _odp_cos_queue_stats_add(cos, dst, ret, num - ret);
+ } else {
+ _odp_cos_vector_enq(dst, (odp_event_t *)packets, num, cos);
+ }
+}
+
+/**
+ * Enqueue all remaining packets in 'packets' array
+ */
+static inline void _odp_cls_enq_all(odp_packet_t packets[], int num)
+{
+ odp_packet_hdr_t *prev_hdr;
+ odp_packet_hdr_t *latest_hdr = packet_hdr(packets[num - 1]);
+
+ if (num < 2) {
+ _odp_cos_enq(latest_hdr->cos, latest_hdr->dst_queue, packets, 1);
+ return;
+ }
+
+ prev_hdr = packet_hdr(packets[num - 2]);
+
+ if (prev_hdr->dst_queue == latest_hdr->dst_queue && prev_hdr->cos == latest_hdr->cos) {
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num);
+ } else {
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num - 1);
+ _odp_cos_enq(latest_hdr->cos, latest_hdr->dst_queue, &packets[num - 1], 1);
+ }
+}
+
+/**
+ * Enqueue packets into classifier destination queue
+ *
+ * Called by pktio devices for each received packet when classifier has been enabled. Postpones the
+ * actual enqueue operation and stores packets in 'packets' array until destination queue or CoS
+ * change, or 'last' flag is set.
+ *
+ * @param[out] packets Packet array to be enqueued
+ * @param num Number of handles in 'packets' array
+ * @param last Enqueue all packets
+ *
+ * @return Number of packets remaining in 'packets' array
+ */
+static inline int _odp_cls_enq(odp_packet_t packets[], int num, odp_bool_t last)
+{
+ odp_packet_hdr_t *prev_hdr, *latest_hdr;
+
+ _ODP_ASSERT(num > 0);
+
+ if (last) {
+ _odp_cls_enq_all(packets, num);
+ return 0;
+ }
+
+ /* Only one packet, so postpone enqueue */
+ if (num < 2)
+ return num;
+
+ prev_hdr = packet_hdr(packets[num - 2]);
+ latest_hdr = packet_hdr(packets[num - 1]);
+
+ /* Postpone enqueue if destination queue and CoS have not changed */
+ if (prev_hdr->dst_queue == latest_hdr->dst_queue && prev_hdr->cos == latest_hdr->cos)
+ return num;
+
+ /* Perform previously postponed enqueue operation and move the last packet (different
+ * destination) in 'packets' array to be the first entry */
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num - 1);
+ packets[0] = packets[num - 1];
+
+ return 1;
+}
+
/** Classification Internal function **/
/**
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 48bbe38fc..7fd54ba2b 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -774,62 +774,6 @@ odp_pktio_t odp_pktio_lookup(const char *name)
return hdl;
}
-static void packet_vector_enq_cos(odp_queue_t queue, odp_event_t events[],
- uint32_t num, cos_t *cos_hdr)
-{
- odp_packet_vector_t pktv;
- odp_pool_t pool = cos_hdr->vector.pool;
- uint32_t max_size = cos_hdr->vector.max_size;
- uint32_t num_enq;
- int num_pktv = (num + max_size - 1) / max_size;
- int ret;
- int i;
- odp_packet_vector_t pktv_tbl[num_pktv];
- odp_event_t event_tbl[num_pktv];
-
- for (i = 0; i < num_pktv; i++) {
- pktv = odp_packet_vector_alloc(pool);
- if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
- break;
- pktv_tbl[i] = pktv;
- event_tbl[i] = odp_packet_vector_to_event(pktv);
- }
- if (odp_unlikely(i == 0)) {
- odp_event_free_multi(events, num);
- _odp_cos_queue_stats_add(cos_hdr, queue, 0, num);
- return;
- }
- num_pktv = i;
- num_enq = 0;
- for (i = 0; i < num_pktv; i++) {
- odp_packet_t *pkt_tbl;
- int pktv_size = max_size;
-
- pktv = pktv_tbl[i];
-
- if (num_enq + max_size > num)
- pktv_size = num - num_enq;
-
- odp_packet_vector_tbl(pktv, &pkt_tbl);
- odp_packet_from_event_multi(pkt_tbl, &events[num_enq], pktv_size);
- odp_packet_vector_size_set(pktv, pktv_size);
- num_enq += pktv_size;
- }
-
- ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
- if (odp_likely(ret == num_pktv)) {
- _odp_cos_queue_stats_add(cos_hdr, queue, num_enq, num - num_enq);
- } else {
- uint32_t enqueued;
-
- if (ret < 0)
- ret = 0;
- enqueued = max_size * ret;
- _odp_cos_queue_stats_add(cos_hdr, queue, enqueued, num - enqueued);
- odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
- }
-}
-
static void packet_vector_enq(odp_queue_t queue, odp_event_t events[],
uint32_t num, odp_pool_t pool)
{
@@ -969,7 +913,7 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
cos_hdr = _odp_cos_entry_from_idx(cos[i]);
if (cos_hdr->vector.enable) {
- packet_vector_enq_cos(dst[i], &ev[idx], num_enq, cos_hdr);
+ _odp_cos_vector_enq(dst[i], &ev[idx], num_enq, cos_hdr);
continue;
}
} else if (vector_enabled) {
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 67779e073..fed1e81c9 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -667,6 +667,8 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
int i, j, num;
uint32_t max_len;
int nb_pkts = 0;
+ int nb_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
odp_pool_t pool = pkt_dpdk->pool;
odp_pktin_config_opt_t pktin_cfg = pktio_entry->config.pktin;
@@ -715,7 +717,7 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, (const uint8_t *)data,
@@ -753,11 +755,21 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
packet_set_ts(pkt_hdr, ts);
- pkt_table[nb_pkts++] = pkt;
-
rte_pktmbuf_free(mbuf);
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == num));
+ } else {
+ pkt_table[nb_pkts++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
return nb_pkts;
fail:
@@ -948,7 +960,7 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
uint8_t set_flow_hash;
struct rte_mbuf *mbuf;
void *data;
- int i, nb_pkts;
+ int i, nb_pkts, nb_cls;
odp_pktin_config_opt_t pktin_cfg;
odp_pktio_t input;
pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
@@ -956,6 +968,7 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
prefetch_pkt(mbuf_table[0]);
+ nb_cls = 0;
nb_pkts = 0;
set_flow_hash = pkt_dpdk->flags.set_flow_hash;
pktin_cfg = pktio_entry->config.pktin;
@@ -986,6 +999,12 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
/* Init buffer segments. Currently, only single segment packets
* are supported. */
pkt_hdr->seg_data = data;
+ pkt_hdr->input = input;
+
+ if (set_flow_hash && (mbuf->ol_flags & RTE_MBUF_F_RX_RSS_HASH))
+ packet_set_flow_hash(pkt_hdr, mbuf->hash.rss);
+
+ packet_set_ts(pkt_hdr, ts);
if (layer) {
int ret;
@@ -1013,25 +1032,27 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
continue;
}
- if (odp_unlikely(_odp_pktio_packet_to_pool(
- &pkt, &pkt_hdr, new_pool))) {
+ if (odp_unlikely(_odp_pktio_packet_to_pool(&pkt, &pkt_hdr,
+ new_pool))) {
rte_pktmbuf_free(mbuf);
odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
continue;
}
+
+ /* Enqueue packet directly to CoS destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == mbuf_num));
+ continue;
}
}
- pkt_hdr->input = input;
-
- if (set_flow_hash && (mbuf->ol_flags & RTE_MBUF_F_RX_RSS_HASH))
- packet_set_flow_hash(pkt_hdr, mbuf->hash.rss);
-
- packet_set_ts(pkt_hdr, ts);
-
pkt_table[nb_pkts++] = pkt;
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
return nb_pkts;
}
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index f9c72db25..08459776b 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -251,12 +251,15 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt
odp_queue_t queue = entry->queue;
stats_t *stats = &entry->stats;
_odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ odp_packet_t cls_tbl[QUEUE_MULTI_MAX];
odp_packet_hdr_t *pkt_hdr;
odp_packet_t pkt;
odp_time_t ts_val;
odp_time_t *ts = NULL;
int num_rx = 0;
int packets = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
uint32_t octets = 0;
const odp_proto_layer_t layer = pktio_entry->parse_layer;
const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
@@ -273,6 +276,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt
for (i = 0; i < nbr; i++) {
uint32_t pkt_len;
+ int do_ipsec_enq = 0;
pkt = packet_from_event_hdr(hdr_tbl[i]);
pkt_len = odp_packet_len(pkt);
@@ -306,7 +310,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, pkt_addr,
@@ -335,7 +339,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt
if (pktio_entry->config.inbound_ipsec &&
!pkt_hdr->p.flags.ip_err &&
odp_packet_has_ipsec(pkt)) {
- _odp_ipsec_try_inline(&pkt);
+ do_ipsec_enq = !_odp_ipsec_try_inline(&pkt);
pkt_hdr = packet_hdr(pkt);
}
@@ -344,9 +348,29 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkt
packets++;
}
- pkts[num_rx++] = pkt;
+ if (do_ipsec_enq) {
+ if (odp_unlikely(odp_queue_enq(pkt_hdr->dst_queue,
+ odp_packet_to_event(pkt)))) {
+ odp_atomic_inc_u64(&stats->in_discards);
+ if (!pkt_hdr->p.flags.all.error) {
+ octets -= pkt_len;
+ packets--;
+ }
+ odp_packet_free(pkt);
+ }
+ } else if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ cls_tbl[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(cls_tbl, num_cls, (i + 1 == nbr));
+ } else {
+ pkts[num_rx++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(cls_tbl, num_cls, true);
+
odp_atomic_add_u64(&stats->in_octets, octets);
odp_atomic_add_u64(&stats->in_packets, packets);
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index 20b6ec179..6f68e95a1 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
- * Copyright (c) 2021-2022, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -291,6 +291,9 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_time_t *ts = NULL;
int packets = 0;
uint32_t octets = 0;
+ int num_pkts = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
uint16_t frame_offset = pktio_entry->pktin_frame_offset;
const odp_proto_layer_t layer = pktio_entry->parse_layer;
const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
@@ -304,7 +307,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (opt.bit.ts_all || opt.bit.ts_ptp)
ts = &ts_val;
- for (i = 0; i < num; ) {
+ for (i = 0; i < num; i++) {
int ret;
ret = pcap_next_ex(pcap->rx, &hdr, &data);
@@ -346,7 +349,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, data,
@@ -376,17 +379,25 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
packets++;
}
- pkts[i] = pkt;
-
- i++;
+ /* Enqueue packets directly to classifier destination queue */
+ if (cls_enabled) {
+ pkts[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(pkts, num_cls, (i + 1 == num));
+ } else {
+ pkts[num_pkts++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(pkts, num_cls, true);
+
pktio_entry->stats.in_octets += octets;
pktio_entry->stats.in_packets += packets;
odp_ticketlock_unlock(&pktio_entry->rxl);
- return i;
+ return num_pkts;
}
static int _pcapif_dump_pkt(pkt_pcap_t *pcap, odp_packet_t pkt)
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index 371831961..609a9351f 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2013-2022, Nokia Solutions and Networks
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -224,9 +224,11 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
struct mmsghdr msgvec[num];
struct iovec iovecs[num][PKT_MAX_SEGS];
int nb_rx = 0;
+ int nb_cls = 0;
int nb_pkts;
int recv_msgs;
int i;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
uint16_t frame_offset = pktio_entry->pktin_frame_offset;
uint32_t alloc_len = pkt_sock->mtu + frame_offset;
const odp_proto_layer_t layer = pktio_entry->parse_layer;
@@ -296,7 +298,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, base,
@@ -328,9 +330,19 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkt_hdr->input = pktio_entry->handle;
packet_set_ts(pkt_hdr, ts);
- pkt_table[nb_rx++] = pkt;
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == recv_msgs));
+ } else {
+ pkt_table[nb_rx++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
/* Free unused pkt buffers */
if (i < nb_pkts)
odp_packet_free_multi(&pkt_table[i], nb_pkts - i);
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 0c04acd03..2ca150751 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2013-2022, Nokia Solutions and Networks
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -138,13 +138,15 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
{
odp_time_t ts_val;
odp_time_t *ts = NULL;
- unsigned frame_num, next_frame_num;
+ unsigned int frame_num, next_frame_num;
uint8_t *pkt_buf, *next_ptr;
int pkt_len;
uint32_t alloc_len;
struct ethhdr *eth_hdr;
- unsigned i;
- unsigned nb_rx;
+ unsigned int i;
+ unsigned int nb_rx = 0;
+ unsigned int nb_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
struct ring *ring;
odp_pool_t pool = pkt_sock->pool;
uint16_t frame_offset = pktio_entry->pktin_frame_offset;
@@ -159,7 +161,7 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
frame_num = ring->frame_num;
next_ptr = ring->rd[frame_num].iov_base;
- for (i = 0, nb_rx = 0; i < num; i++) {
+ for (i = 0; i < num; i++) {
struct tpacket2_hdr *tp_hdr;
odp_packet_t pkt;
odp_packet_hdr_t *hdr;
@@ -276,7 +278,7 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, pkt_buf,
@@ -308,11 +310,21 @@ static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
tp_hdr->tp_status = TP_STATUS_KERNEL;
frame_num = next_frame_num;
- pkt_table[nb_rx] = pkt;
- nb_rx++;
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == num));
+ } else {
+ pkt_table[nb_rx++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
ring->frame_num = frame_num;
+
return nb_rx;
}
diff --git a/platform/linux-generic/pktio/socket_xdp.c b/platform/linux-generic/pktio/socket_xdp.c
index 93b6c03d3..867483f76 100644
--- a/platform/linux-generic/pktio/socket_xdp.c
+++ b/platform/linux-generic/pktio/socket_xdp.c
@@ -777,6 +777,9 @@ static uint32_t process_received(pktio_entry_t *pktio_entry, xdp_sock_t *sock, p
uint64_t errors = 0U, octets = 0U;
odp_pktio_t pktio_hdl = pktio_entry->handle;
uint32_t num_rx = 0U;
+ uint32_t num_cls = 0U;
+ uint32_t num_pkts = 0U;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
for (int i = 0; i < num; ++i) {
extract_data(xsk_ring_cons__rx_desc(rx, start_idx++), base_addr, &pkt_data);
@@ -798,7 +801,7 @@ static uint32_t process_received(pktio_entry_t *pktio_entry, xdp_sock_t *sock, p
continue;
}
- if (pktio_cls_enabled(pktio_entry)) {
+ if (cls_enabled) {
odp_pool_t new_pool;
ret = _odp_cls_classify_packet(pktio_entry, pkt_data.data,
@@ -817,12 +820,24 @@ static uint32_t process_received(pktio_entry_t *pktio_entry, xdp_sock_t *sock, p
}
pkt_data.pkt_hdr->input = pktio_hdl;
- packets[num_rx++] = pkt_data.pkt;
+ num_pkts++;
octets += pkt_data.len;
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ packets[num_cls++] = pkt_data.pkt;
+ num_cls = _odp_cls_enq(packets, num_cls, (i + 1 == num));
+ } else {
+ packets[num_rx++] = pkt_data.pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(packets, num_cls, true);
+
sock->qi_stats.octets += octets;
- sock->qi_stats.packets += num_rx;
+ sock->qi_stats.packets += num_pkts;
sock->qi_stats.errors += errors;
return num_rx;
diff --git a/platform/linux-generic/pktio/tap.c b/platform/linux-generic/pktio/tap.c
index 5f5b081c5..50d17e888 100644
--- a/platform/linux-generic/pktio/tap.c
+++ b/platform/linux-generic/pktio/tap.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2015, Ilya Maximets <i.maximets@samsung.com>
- * Copyright (c) 2021-2022, Nokia
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -331,6 +331,9 @@ static int tap_pktio_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_time_t ts_val;
odp_time_t *ts = NULL;
int num_rx = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ odp_packet_t pkt;
odp_ticketlock_lock(&pktio_entry->rxl);
@@ -350,12 +353,23 @@ static int tap_pktio_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
break;
}
- pkts[num_rx] = pack_odp_pkt(pktio_entry, buf, retval, ts);
- if (pkts[num_rx] == ODP_PACKET_INVALID)
+ pkt = pack_odp_pkt(pktio_entry, buf, retval, ts);
+ if (pkt == ODP_PACKET_INVALID)
break;
- num_rx++;
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkts[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(pkts, num_cls, (i + 1 == num));
+ } else {
+ pkts[num_rx++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(pkts, num_cls, true);
+
odp_ticketlock_unlock(&pktio_entry->rxl);
return num_rx;