All of lore.kernel.org
 help / color / mirror / Atom feed
From: Feifei Wang <feifei.wang2@arm.com>
To: Qiming Yang <qiming.yang@intel.com>, Wenjun Wu <wenjun1.wu@intel.com>
Cc: dev@dpdk.org, nd@arm.com, Feifei Wang <feifei.wang2@arm.com>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
	Ruifeng Wang <ruifeng.wang@arm.com>
Subject: [PATCH v8 3/4] net/ixgbe: implement mbufs recycle mode
Date: Wed,  2 Aug 2023 15:38:39 +0800	[thread overview]
Message-ID: <20230802073840.552460-4-feifei.wang2@arm.com> (raw)
In-Reply-To: <20230802073840.552460-1-feifei.wang2@arm.com>

Define specific function implementation for ixgbe driver.
Currently, recycle buffer mode can support 128bit
vector path. And can be enabled both in fast free and
no fast free mode.

Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c              |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.h              |   3 +
 .../ixgbe/ixgbe_recycle_mbufs_vec_common.c    | 143 ++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.c                |  37 ++++-
 drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +
 drivers/net/ixgbe/meson.build                 |   3 +
 6 files changed, 189 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 14a7d571e0..ea4c9dd561 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
 	.rxq_info_get         = ixgbe_rxq_info_get,
 	.txq_info_get         = ixgbe_txq_info_get,
+	.recycle_rxq_info_get = ixgbe_recycle_rxq_info_get,
 	.timesync_enable      = ixgbe_timesync_enable,
 	.timesync_disable     = ixgbe_timesync_disable,
 	.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 1291e9099c..22fc3be3d8 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -626,6 +626,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_txq_info *qinfo);
 
+void ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+		struct rte_eth_recycle_rxq_info *recycle_rxq_info);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
 
 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
new file mode 100644
index 0000000000..9a8cc86954
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Arm Limited.
+ */
+
+#include <stdint.h>
+#include <ethdev_driver.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+void
+ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs)
+{
+	struct ixgbe_rx_queue *rxq = rx_queue;
+	struct ixgbe_rx_entry *rxep;
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	uint16_t rx_id;
+	uint64_t paddr;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+	rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+	for (i = 0; i < nb_mbufs; i++) {
+		/* Initialize rxdp descs. */
+		paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
+		dma_addr = rte_cpu_to_le_64(paddr);
+		/* Flush descriptors with pa dma_addr */
+		rxdp[i].read.hdr_addr = 0;
+		rxdp[i].read.pkt_addr = dma_addr;
+	}
+
+	/* Update the descriptor initializer index */
+	rxq->rxrearm_start += nb_mbufs;
+	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+		rxq->rxrearm_start = 0;
+
+	rxq->rxrearm_nb -= nb_mbufs;
+
+	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+			(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+	/* Update the tail pointer on the NIC */
+	IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+uint16_t
+ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
+		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
+{
+	struct ixgbe_tx_queue *txq = tx_queue;
+	struct ixgbe_tx_entry *txep;
+	struct rte_mbuf **rxep;
+	int i, n;
+	uint32_t status;
+	uint16_t nb_recycle_mbufs;
+	uint16_t avail = 0;
+	uint16_t mbuf_ring_size = recycle_rxq_info->mbuf_ring_size;
+	uint16_t mask = recycle_rxq_info->mbuf_ring_size - 1;
+	uint16_t refill_requirement = recycle_rxq_info->refill_requirement;
+	uint16_t refill_head = *recycle_rxq_info->refill_head;
+	uint16_t receive_tail = *recycle_rxq_info->receive_tail;
+
+	/* Get available recycling Rx buffers. */
+	avail = (mbuf_ring_size - (refill_head - receive_tail)) & mask;
+
+	/* Check Tx free thresh and Rx available space. */
+	if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)
+		return 0;
+
+	/* check DD bits on threshold descriptor */
+	status = txq->tx_ring[txq->tx_next_dd].wb.status;
+	if (!(status & IXGBE_ADVTXD_STAT_DD))
+		return 0;
+
+	n = txq->tx_rs_thresh;
+	nb_recycle_mbufs = n;
+
+	/* Mbufs recycle can only support no ring buffer wrapping around.
+	 * Two case for this:
+	 *
+	 * case 1: The refill head of Rx buffer ring needs to be aligned with
+	 * buffer ring size. In this case, the number of Tx freeing buffers
+	 * should be equal to refill_requirement.
+	 *
+	 * case 2: The refill head of Rx ring buffer does not need to be aligned
+	 * with buffer ring size. In this case, the update of refill head can not
+	 * exceed the Rx buffer ring size.
+	 */
+	if (refill_requirement != n ||
+		(!refill_requirement && (refill_head + n > mbuf_ring_size)))
+		return 0;
+
+	/* First buffer to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1).
+	 */
+	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+	rxep = recycle_rxq_info->mbuf_ring;
+	rxep += refill_head;
+
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+		/* Avoid txq contains buffers from unexpected mempool. */
+		if (unlikely(recycle_rxq_info->mp
+					!= txep[0].mbuf->pool))
+			return 0;
+
+		/* Directly put mbufs from Tx to Rx. */
+		for (i = 0; i < n; i++)
+			rxep[i] = txep[i].mbuf;
+	} else {
+		for (i = 0; i < n; i++) {
+			rxep[i] = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+
+			/* If Tx buffers are not the last reference or from
+			 * unexpected mempool, previous copied buffers are
+			 * considered as invalid.
+			 */
+			if (unlikely((rxep[i] == NULL && refill_requirement) ||
+					recycle_rxq_info->mp != txep[i].mbuf->pool))
+				nb_recycle_mbufs = 0;
+		}
+		/* If Tx buffers are not the last reference or
+		 * from unexpected mempool, all recycled buffers
+		 * are put into mempool.
+		 */
+		if (nb_recycle_mbufs == 0)
+			for (i = 0; i < n; i++) {
+				if (rxep[i] != NULL)
+					rte_mempool_put(rxep[i]->pool, rxep[i]);
+			}
+	}
+
+	/* Update counters for Tx. */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return nb_recycle_mbufs;
+}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 954ef241a0..90b0a7004f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2552,6 +2552,9 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 				(rte_eal_process_type() != RTE_PROC_PRIMARY ||
 					ixgbe_txq_vec_setup(txq) == 0)) {
 			PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+			dev->recycle_tx_mbufs_reuse = ixgbe_recycle_tx_mbufs_reuse_vec;
+#endif
 			dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
 		} else
 		dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
@@ -4890,7 +4893,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 			PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
 					    "callback (port=%d).",
 				     dev->data->port_id);
-
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+			dev->recycle_rx_descriptors_refill =
+				ixgbe_recycle_rx_descriptors_refill_vec;
+#endif
 			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
 		} else if (adapter->rx_bulk_alloc_allowed) {
 			PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
@@ -4919,7 +4925,9 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 				    "burst size no less than %d (port=%d).",
 			     RTE_IXGBE_DESCS_PER_LOOP,
 			     dev->data->port_id);
-
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+		dev->recycle_rx_descriptors_refill = ixgbe_recycle_rx_descriptors_refill_vec;
+#endif
 		dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -5691,6 +5699,31 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
+void
+ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_recycle_rxq_info *recycle_rxq_info)
+{
+	struct ixgbe_rx_queue *rxq;
+	struct ixgbe_adapter *adapter = dev->data->dev_private;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	recycle_rxq_info->mbuf_ring = (void *)rxq->sw_ring;
+	recycle_rxq_info->mp = rxq->mb_pool;
+	recycle_rxq_info->mbuf_ring_size = rxq->nb_rx_desc;
+	recycle_rxq_info->receive_tail = &rxq->rx_tail;
+
+	if (adapter->rx_vec_allowed) {
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+		recycle_rxq_info->refill_requirement = RTE_IXGBE_RXQ_REARM_THRESH;
+		recycle_rxq_info->refill_head = &rxq->rxrearm_start;
+#endif
+	} else {
+		recycle_rxq_info->refill_requirement = rxq->rx_free_thresh;
+		recycle_rxq_info->refill_head = &rxq->rx_free_trigger;
+	}
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 668a5b9814..ee89c89929 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
 extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
 extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
 
+uint16_t ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
+		struct rte_eth_recycle_rxq_info *recycle_rxq_info);
+void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);
+
 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index a18908ef7c..52e77190f7 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'ixgbe_rxtx.c',
         'ixgbe_tm.c',
         'ixgbe_vf_representor.c',
+	'ixgbe_recycle_mbufs_vec_common.c',
         'rte_pmd_ixgbe.c',
 )
 
@@ -26,11 +27,13 @@ deps += ['hash', 'security']
 
 if arch_subdir == 'x86'
     sources += files('ixgbe_rxtx_vec_sse.c')
+    sources += files('ixgbe_recycle_mbufs_vec_common.c')
     if is_windows and cc.get_id() != 'clang'
         cflags += ['-fno-asynchronous-unwind-tables']
     endif
 elif arch_subdir == 'arm'
     sources += files('ixgbe_rxtx_vec_neon.c')
+    sources += files('ixgbe_recycle_mbufs_vec_common.c')
 endif
 
 includes += include_directories('base')
-- 
2.25.1


  parent reply	other threads:[~2023-08-02  7:39 UTC|newest]

Thread overview: 145+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-20  8:16 [PATCH v1 0/5] Direct re-arming of buffers on receive side Feifei Wang
2022-04-20  8:16 ` [PATCH v1 1/5] net/i40e: remove redundant Dtype initialization Feifei Wang
2022-04-20  8:16 ` [PATCH v1 2/5] net/i40e: enable direct rearm mode Feifei Wang
2022-05-11 22:28   ` Konstantin Ananyev
2022-04-20  8:16 ` [PATCH v1 3/5] ethdev: add API for " Feifei Wang
2022-04-20  9:59   ` Morten Brørup
2022-04-29  2:42     ` 回复: " Feifei Wang
2022-04-20 10:41   ` Andrew Rybchenko
2022-04-29  6:28     ` 回复: " Feifei Wang
2022-05-10 22:49       ` Honnappa Nagarahalli
2022-06-03 10:19         ` Andrew Rybchenko
2022-04-20 10:50   ` Jerin Jacob
2022-05-02  3:09     ` 回复: " Feifei Wang
2022-04-21 14:57   ` Stephen Hemminger
2022-04-29  6:35     ` 回复: " Feifei Wang
2022-04-20  8:16 ` [PATCH v1 4/5] net/i40e: add direct rearm mode internal API Feifei Wang
2022-05-11 22:31   ` Konstantin Ananyev
2022-04-20  8:16 ` [PATCH v1 5/5] examples/l3fwd: enable direct rearm mode Feifei Wang
2022-04-20 10:10   ` Morten Brørup
2022-04-21  2:35     ` Honnappa Nagarahalli
2022-04-21  6:40       ` Morten Brørup
2022-05-10 22:01         ` Honnappa Nagarahalli
2022-05-11  7:17           ` Morten Brørup
2022-05-11 22:33   ` Konstantin Ananyev
2022-05-27 11:28     ` Konstantin Ananyev
2022-05-31 17:14       ` Honnappa Nagarahalli
2022-06-03 10:32         ` Andrew Rybchenko
2022-06-06 11:27         ` Konstantin Ananyev
2022-06-29 21:25           ` Honnappa Nagarahalli
2022-05-11 23:00 ` [PATCH v1 0/5] Direct re-arming of buffers on receive side Konstantin Ananyev
     [not found] ` <20220516061012.618787-1-feifei.wang2@arm.com>
2022-05-24  1:25   ` Konstantin Ananyev
2022-05-24 12:40     ` Morten Brørup
2022-05-24 20:14     ` Honnappa Nagarahalli
2022-05-28 12:22       ` Konstantin Ananyev
2022-06-01  1:00         ` Honnappa Nagarahalli
2022-06-03 23:32           ` Konstantin Ananyev
2022-06-04  8:07             ` Morten Brørup
2022-06-29 21:58               ` Honnappa Nagarahalli
2022-06-30 15:21                 ` Morten Brørup
2022-07-01 19:30                   ` Honnappa Nagarahalli
2022-07-01 20:28                     ` Morten Brørup
2022-06-13  5:55     ` 回复: " Feifei Wang
2023-01-04  7:30 ` [PATCH v3 0/3] " Feifei Wang
2023-01-04  7:30   ` [PATCH v3 1/3] ethdev: enable direct rearm with separate API Feifei Wang
2023-01-04  8:21     ` Morten Brørup
2023-01-04  8:51       ` 回复: " Feifei Wang
2023-01-04 10:11         ` Morten Brørup
2023-02-24  8:55           ` 回复: " Feifei Wang
2023-03-06 12:49       ` Ferruh Yigit
2023-03-06 13:26         ` Morten Brørup
2023-03-06 14:53           ` 回复: " Feifei Wang
2023-03-06 15:02           ` Ferruh Yigit
2023-03-07  6:12             ` Honnappa Nagarahalli
2023-03-07 10:52               ` Konstantin Ananyev
2023-03-07 20:41               ` Ferruh Yigit
2023-03-22 14:43                 ` Honnappa Nagarahalli
2023-02-02 14:33     ` Konstantin Ananyev
2023-02-24  9:45       ` 回复: " Feifei Wang
2023-02-27 19:31         ` Konstantin Ananyev
2023-02-28  2:16           ` 回复: " Feifei Wang
2023-02-28  8:09           ` Morten Brørup
2023-03-01  7:34             ` 回复: " Feifei Wang
2023-01-04  7:30   ` [PATCH v3 2/3] net/i40e: " Feifei Wang
2023-02-02 14:37     ` Konstantin Ananyev
2023-02-24  9:50       ` 回复: " Feifei Wang
2023-02-27 19:35         ` Konstantin Ananyev
2023-02-28  2:15           ` 回复: " Feifei Wang
2023-03-07 11:01             ` Konstantin Ananyev
2023-03-14  6:07               ` 回复: " Feifei Wang
2023-03-19 16:11                 ` Konstantin Ananyev
2023-03-23 10:49                   ` Feifei Wang
2023-01-04  7:30   ` [PATCH v3 3/3] net/ixgbe: " Feifei Wang
2023-01-31  6:13   ` 回复: [PATCH v3 0/3] Direct re-arming of buffers on receive side Feifei Wang
2023-02-01  1:10     ` Konstantin Ananyev
2023-02-01  2:24       ` 回复: " Feifei Wang
2023-03-22 12:56   ` Morten Brørup
2023-03-22 13:41     ` Honnappa Nagarahalli
2023-03-22 14:04       ` Morten Brørup
2023-08-02  7:38 ` [PATCH v8 0/4] Recycle mbufs from Tx queue into Rx queue Feifei Wang
2023-08-02  7:38   ` [PATCH v8 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-08-02  7:38   ` [PATCH v8 2/4] net/i40e: implement " Feifei Wang
2023-08-02  7:38   ` Feifei Wang [this message]
2023-08-02  7:38   ` [PATCH v8 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-08-02  8:08 ` [PATCH v9 0/4] Recycle mbufs from Tx queue into Rx queue Feifei Wang
2023-08-02  8:08   ` [PATCH v9 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-08-02  8:08   ` [PATCH v9 2/4] net/i40e: implement " Feifei Wang
2023-08-02  8:08   ` [PATCH v9 3/4] net/ixgbe: " Feifei Wang
2023-08-02  8:08   ` [PATCH v9 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-08-04  9:24 ` [PATCH v10 0/4] Recycle mbufs from Tx queue into Rx queue Feifei Wang
2023-08-04  9:24   ` [PATCH v10 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-08-04  9:24   ` [PATCH v10 2/4] net/i40e: implement " Feifei Wang
2023-08-04  9:24   ` [PATCH v10 3/4] net/ixgbe: " Feifei Wang
2023-08-04  9:24   ` [PATCH v10 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-08-22  7:27 ` [PATCH v11 0/4] Recycle mbufs from Tx queue into Rx queue Feifei Wang
2023-08-22  7:27   ` [PATCH v11 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-08-22 14:02     ` Stephen Hemminger
2023-08-24  3:16       ` Feifei Wang
2023-08-22 23:33     ` Konstantin Ananyev
2023-08-24  3:38     ` Feifei Wang
2023-08-22  7:27   ` [PATCH v11 2/4] net/i40e: implement " Feifei Wang
2023-08-22 23:43     ` Konstantin Ananyev
2023-08-24  6:10     ` Feifei Wang
2023-08-31 17:24       ` Konstantin Ananyev
2023-08-31 23:49         ` Konstantin Ananyev
2023-09-01 12:22         ` Feifei Wang
2023-09-01 14:22           ` Konstantin Ananyev
2023-09-04  6:59             ` Feifei Wang
2023-09-04  7:49               ` Konstantin Ananyev
2023-09-04  9:24                 ` Feifei Wang
2023-09-04 10:21                   ` Konstantin Ananyev
2023-09-05  3:11                     ` Feifei Wang
2023-09-22 14:58                       ` Feifei Wang
2023-09-22 15:46                         ` Feifei Wang
2023-09-22 16:40                           ` Konstantin Ananyev
2023-09-23  5:52                             ` Feifei Wang
2023-09-23 20:40                               ` Konstantin Ananyev
2023-09-25  3:26                               ` Feifei Wang
2023-08-22  7:27   ` [PATCH v11 3/4] net/ixgbe: " Feifei Wang
2023-08-22  7:27   ` [PATCH v11 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-08-22  7:33   ` [PATCH v11 0/4] Recycle mbufs from Tx queue into Rx queue Feifei Wang
2023-08-22 13:59   ` Stephen Hemminger
2023-08-24  3:11     ` Feifei Wang
2023-08-24  7:36 ` [PATCH v12 " Feifei Wang
2023-08-24  7:36   ` [PATCH v12 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-08-31  9:16     ` Feifei Wang
2023-09-20 13:10     ` Ferruh Yigit
2023-08-24  7:36   ` [PATCH v12 2/4] net/i40e: implement " Feifei Wang
2023-08-24  7:36   ` [PATCH v12 3/4] net/ixgbe: " Feifei Wang
2023-08-24  7:36   ` [PATCH v12 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-09-20 13:11     ` Ferruh Yigit
2023-09-20 13:12   ` [PATCH v12 0/4] Recycle mbufs from Tx queue into Rx queue Ferruh Yigit
2023-09-22 15:30     ` Ferruh Yigit
2023-09-25  3:19 ` [PATCH v13 " Feifei Wang
2023-09-25  3:19   ` [PATCH v13 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-09-25  4:40     ` Ajit Khaparde
2023-09-25  3:19   ` [PATCH v13 2/4] net/i40e: implement " Feifei Wang
2023-09-26  8:26     ` Ferruh Yigit
2023-09-26  8:56       ` Konstantin Ananyev
2023-09-26 13:34     ` Konstantin Ananyev
2023-09-25  3:19   ` [PATCH v13 3/4] net/ixgbe: " Feifei Wang
2023-09-26 13:30     ` Konstantin Ananyev
2023-09-25  3:19   ` [PATCH v13 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-09-26 13:30     ` Konstantin Ananyev
2023-09-26 16:38       ` Ajit Khaparde
2023-09-27 17:24   ` [PATCH v13 0/4] Recycle mbufs from Tx queue into Rx queue Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230802073840.552460-4-feifei.wang2@arm.com \
    --to=feifei.wang2@arm.com \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=nd@arm.com \
    --cc=qiming.yang@intel.com \
    --cc=ruifeng.wang@arm.com \
    --cc=wenjun1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.