All of lore.kernel.org
 help / color / mirror / Atom feed
From: Aman Kumar <aman.kumar@vvdntech.in>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com,
	aman.kumar@vvdntech.in
Subject: [RFC PATCH 14/29] net/qdma: add routine for Tx queue initialization
Date: Wed,  6 Jul 2022 13:22:04 +0530	[thread overview]
Message-ID: <20220706075219.517046-15-aman.kumar@vvdntech.in> (raw)
In-Reply-To: <20220706075219.517046-1-aman.kumar@vvdntech.in>

defined routines to handle tx queue related ops.
this patch add support to rte_eth_dev_tx_queue*
apis for this PMD.

Signed-off-by: Aman Kumar <aman.kumar@vvdntech.in>
---
 drivers/net/qdma/qdma.h        |   8 +
 drivers/net/qdma/qdma_common.c |  74 +++++++++
 drivers/net/qdma/qdma_devops.c | 270 +++++++++++++++++++++++++++++++--
 3 files changed, 343 insertions(+), 9 deletions(-)

diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
index 5992473b33..8515ebe60e 100644
--- a/drivers/net/qdma/qdma.h
+++ b/drivers/net/qdma/qdma.h
@@ -42,6 +42,7 @@
 #define MIN_RX_PIDX_UPDATE_THRESHOLD (1)
 #define MIN_TX_PIDX_UPDATE_THRESHOLD (1)
 #define DEFAULT_MM_CMPT_CNT_THRESHOLD	(2)
+#define QDMA_TXQ_PIDX_UPDATE_INTERVAL	(1000) /* 100 uSec */
 
 #define WB_TIMEOUT		(100000)
 #define RESET_TIMEOUT		(60000)
@@ -198,6 +199,7 @@ struct qdma_tx_queue {
 	uint16_t			tx_desc_pend;
 	uint16_t			nb_tx_desc; /* No of TX descriptors. */
 	rte_spinlock_t			pidx_update_lock;
+	struct qdma_q_pidx_reg_info	q_pidx_info;
 	uint64_t			offloads; /* Tx offloads */
 
 	uint8_t				st_mode:1;/* dma-mode: MM or ST */
@@ -297,17 +299,23 @@ struct qdma_pci_dev {
 };
 
 void qdma_dev_ops_init(struct rte_eth_dev *dev);
+void qdma_txq_pidx_update(void *arg);
 int qdma_pf_csr_read(struct rte_eth_dev *dev);
 
 uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len);
 
 int qdma_init_rx_queue(struct qdma_rx_queue *rxq);
+void qdma_reset_tx_queue(struct qdma_tx_queue *txq);
 void qdma_reset_rx_queue(struct qdma_rx_queue *rxq);
 
 void qdma_clr_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
 				uint32_t mode);
 void qdma_inv_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
 				uint32_t mode);
+void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
+				uint32_t mode);
+void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
+				uint32_t mode);
 int qdma_identify_bars(struct rte_eth_dev *dev);
 int qdma_get_hw_version(struct rte_eth_dev *dev);
 
diff --git a/drivers/net/qdma/qdma_common.c b/drivers/net/qdma/qdma_common.c
index d39e642008..2650438e47 100644
--- a/drivers/net/qdma/qdma_common.c
+++ b/drivers/net/qdma/qdma_common.c
@@ -160,6 +160,80 @@ int qdma_init_rx_queue(struct qdma_rx_queue *rxq)
 	return -ENOMEM;
 }
 
+/*
+ * Tx queue reset
+ */
+void qdma_reset_tx_queue(struct qdma_tx_queue *txq)
+{
+	uint32_t i;
+	uint32_t sz;
+
+	txq->tx_fl_tail = 0;
+	if (txq->st_mode) {  /* ST-mode */
+		sz = sizeof(struct qdma_ul_st_h2c_desc);
+		/* Zero out HW ring memory */
+		for (i = 0; i < (sz * (txq->nb_tx_desc)); i++)
+			((volatile char *)txq->tx_ring)[i] = 0;
+	} else {
+		sz = sizeof(struct qdma_ul_mm_desc);
+		/* Zero out HW ring memory */
+		for (i = 0; i < (sz * (txq->nb_tx_desc)); i++)
+			((volatile char *)txq->tx_ring)[i] = 0;
+	}
+
+	/* Initialize SW ring entries */
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		txq->sw_ring[i] = NULL;
+}
+
+void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev,
+			     uint32_t qid, uint32_t mode)
+{
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_descq_sw_ctxt q_sw_ctxt;
+	struct qdma_descq_hw_ctxt q_hw_ctxt;
+	struct qdma_descq_credit_ctxt q_credit_ctxt;
+	struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+	hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt,
+			QDMA_HW_ACCESS_INVALIDATE);
+	hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt,
+			QDMA_HW_ACCESS_INVALIDATE);
+
+	if (mode) {  /* ST-mode */
+		hw_access->qdma_credit_ctx_conf(dev, 0, qid,
+			&q_credit_ctxt, QDMA_HW_ACCESS_INVALIDATE);
+	}
+}
+
+/**
+ * Clear Tx queue contexts
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   Nothing.
+ */
+void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev,
+			     uint32_t qid, uint32_t mode)
+{
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_descq_sw_ctxt q_sw_ctxt;
+	struct qdma_descq_credit_ctxt q_credit_ctxt;
+	struct qdma_descq_hw_ctxt q_hw_ctxt;
+	struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+	hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt,
+			QDMA_HW_ACCESS_CLEAR);
+	hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt,
+			QDMA_HW_ACCESS_CLEAR);
+	if (mode) {  /* ST-mode */
+		hw_access->qdma_credit_ctx_conf(dev, 0, qid,
+			&q_credit_ctxt, QDMA_HW_ACCESS_CLEAR);
+	}
+}
+
 /* Utility function to find index of an element in an array */
 int index_of_array(uint32_t *arr, uint32_t n, uint32_t element)
 {
diff --git a/drivers/net/qdma/qdma_devops.c b/drivers/net/qdma/qdma_devops.c
index fefbbda012..e411c0f1be 100644
--- a/drivers/net/qdma/qdma_devops.c
+++ b/drivers/net/qdma/qdma_devops.c
@@ -573,13 +573,196 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 			    uint16_t nb_tx_desc, unsigned int socket_id,
 			    const struct rte_eth_txconf *tx_conf)
 {
-	(void)dev;
-	(void)tx_queue_id;
-	(void)nb_tx_desc;
-	(void)socket_id;
-	(void)tx_conf;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_tx_queue *txq = NULL;
+	struct qdma_ul_mm_desc *tx_ring_mm;
+	struct qdma_ul_st_h2c_desc *tx_ring_st;
+	uint32_t sz;
+	uint8_t  *tx_ring_bypass;
+	int err = 0;
+
+	PMD_DRV_LOG(INFO, "Configuring Tx queue id:%d with %d desc\n",
+		    tx_queue_id, nb_tx_desc);
+
+	if (!qdma_dev->is_vf) {
+		err = qdma_dev_increment_active_queue
+				(qdma_dev->dma_device_index,
+				qdma_dev->func_id,
+				QDMA_DEV_Q_TYPE_H2C);
+		if (err != QDMA_SUCCESS)
+			return -EINVAL;
+	}
+	if (!qdma_dev->init_q_range) {
+		if (!qdma_dev->is_vf) {
+			err = qdma_pf_csr_read(dev);
+			if (err < 0) {
+				PMD_DRV_LOG(ERR, "CSR read failed\n");
+				goto tx_setup_err;
+			}
+		}
+		qdma_dev->init_q_range = 1;
+	}
+	/* allocate rx queue data structure */
+	txq = rte_zmalloc_socket("QDMA_TxQ", sizeof(struct qdma_tx_queue),
+						RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		PMD_DRV_LOG(ERR, "Memory allocation failed for "
+				"Tx queue SW structure\n");
+		err = -ENOMEM;
+		goto tx_setup_err;
+	}
+
+	txq->st_mode = qdma_dev->q_info[tx_queue_id].queue_mode;
+	txq->en_bypass = (qdma_dev->q_info[tx_queue_id].tx_bypass_mode) ? 1 : 0;
+	txq->bypass_desc_sz = qdma_dev->q_info[tx_queue_id].tx_bypass_desc_sz;
+
+	txq->nb_tx_desc = (nb_tx_desc + 1);
+	txq->queue_id = tx_queue_id;
+	txq->dev = dev;
+	txq->port_id = dev->data->port_id;
+	txq->func_id = qdma_dev->func_id;
+	txq->num_queues = dev->data->nb_tx_queues;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	txq->ringszidx = index_of_array(qdma_dev->g_ring_sz,
+					QDMA_NUM_RING_SIZES, txq->nb_tx_desc);
+	if (txq->ringszidx < 0) {
+		PMD_DRV_LOG(ERR, "Expected Ring size %d not found\n",
+				txq->nb_tx_desc);
+		err = -EINVAL;
+		goto tx_setup_err;
+	}
+
+	if (qdma_dev->ip_type == EQDMA_SOFT_IP &&
+			qdma_dev->vivado_rel >= QDMA_VIVADO_2020_2) {
+		if (qdma_dev->dev_cap.desc_eng_mode ==
+				QDMA_DESC_ENG_BYPASS_ONLY) {
+			PMD_DRV_LOG(ERR,
+				"Bypass only mode design "
+				"is not supported\n");
+			return -ENOTSUP;
+		}
+
+		if (txq->en_bypass &&
+				qdma_dev->dev_cap.desc_eng_mode ==
+				QDMA_DESC_ENG_INTERNAL_ONLY) {
+			PMD_DRV_LOG(ERR,
+				"Tx qid %d config in bypass "
+				"mode not supported on "
+				"internal only mode design\n",
+				tx_queue_id);
+			return -ENOTSUP;
+		}
+	}
+
+	/* Allocate memory for TX descriptor ring */
+	if (txq->st_mode) {
+		if (!qdma_dev->dev_cap.st_en) {
+			PMD_DRV_LOG(ERR, "Streaming mode not enabled "
+					"in the hardware\n");
+			err = -EINVAL;
+			goto tx_setup_err;
+		}
+
+		if (txq->en_bypass &&
+			txq->bypass_desc_sz != 0)
+			sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz);
+		else
+			sz = (txq->nb_tx_desc) *
+					sizeof(struct qdma_ul_st_h2c_desc);
+		txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id, sz,
+						socket_id);
+		if (!txq->tx_mz) {
+			PMD_DRV_LOG(ERR, "Couldn't reserve memory for "
+					"ST H2C ring of size %d\n", sz);
+			err = -ENOMEM;
+			goto tx_setup_err;
+		}
+
+		txq->tx_ring = txq->tx_mz->addr;
+		tx_ring_st = (struct qdma_ul_st_h2c_desc *)txq->tx_ring;
+
+		tx_ring_bypass = (uint8_t *)txq->tx_ring;
+		/* Write-back status structure */
+		if (txq->en_bypass &&
+			txq->bypass_desc_sz != 0)
+			txq->wb_status = (struct wb_status *)&
+					tx_ring_bypass[(txq->nb_tx_desc - 1) *
+					(txq->bypass_desc_sz)];
+		else
+			txq->wb_status = (struct wb_status *)&
+					tx_ring_st[txq->nb_tx_desc - 1];
+	} else {
+		if (!qdma_dev->dev_cap.mm_en) {
+			PMD_DRV_LOG(ERR, "Memory mapped mode not "
+					"enabled in the hardware\n");
+			err = -EINVAL;
+			goto tx_setup_err;
+		}
+
+		if (txq->en_bypass &&
+			txq->bypass_desc_sz != 0)
+			sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz);
+		else
+			sz = (txq->nb_tx_desc) * sizeof(struct qdma_ul_mm_desc);
+		txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id,
+						sz, socket_id);
+		if (!txq->tx_mz) {
+			PMD_DRV_LOG(ERR, "Couldn't reserve memory for "
+					"MM H2C ring of size %d\n", sz);
+			err = -ENOMEM;
+			goto tx_setup_err;
+		}
+
+		txq->tx_ring = txq->tx_mz->addr;
+		tx_ring_mm = (struct qdma_ul_mm_desc *)txq->tx_ring;
+
+		/* Write-back status structure */
+
+		tx_ring_bypass = (uint8_t *)txq->tx_ring;
+		if (txq->en_bypass &&
+			txq->bypass_desc_sz != 0)
+			txq->wb_status = (struct wb_status *)&
+				tx_ring_bypass[(txq->nb_tx_desc - 1) *
+				(txq->bypass_desc_sz)];
+		else
+			txq->wb_status = (struct wb_status *)&
+				tx_ring_mm[txq->nb_tx_desc - 1];
+	}
+
+	PMD_DRV_LOG(INFO, "Tx ring phys addr: 0x%lX, Tx Ring virt addr: 0x%lX",
+	    (uint64_t)txq->tx_mz->iova, (uint64_t)txq->tx_ring);
+
+	/* Allocate memory for TX software ring */
+	sz = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
+	txq->sw_ring = rte_zmalloc_socket("TxSwRn", sz,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		PMD_DRV_LOG(ERR, "Memory allocation failed for "
+				 "Tx queue SW ring\n");
+		err = -ENOMEM;
+		goto tx_setup_err;
+	}
+
+	rte_spinlock_init(&txq->pidx_update_lock);
+	dev->data->tx_queues[tx_queue_id] = txq;
 
 	return 0;
+
+tx_setup_err:
+	PMD_DRV_LOG(ERR, " Tx queue setup failed");
+	if (!qdma_dev->is_vf)
+		qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+						qdma_dev->func_id,
+						QDMA_DEV_Q_TYPE_H2C);
+	if (txq) {
+		if (txq->tx_mz)
+			rte_memzone_free(txq->tx_mz);
+		if (txq->sw_ring)
+			rte_free(txq->sw_ring);
+		rte_free(txq);
+	}
+	return err;
 }
 
 void qdma_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_id)
@@ -983,9 +1166,54 @@ int qdma_dev_configure(struct rte_eth_dev *dev)
 
 int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
 {
-	(void)dev;
-	(void)qid;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_tx_queue *txq;
+	uint32_t queue_base =  qdma_dev->queue_base;
+	int err, bypass_desc_sz_idx;
+	struct qdma_descq_sw_ctxt q_sw_ctxt;
+	struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+	txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
 
+	memset(&q_sw_ctxt, 0, sizeof(struct qdma_descq_sw_ctxt));
+
+	bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz);
+
+	qdma_reset_tx_queue(txq);
+	qdma_clr_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode);
+
+	if (txq->st_mode) {
+		q_sw_ctxt.desc_sz = SW_DESC_CNTXT_H2C_STREAM_DMA;
+	} else {
+		q_sw_ctxt.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA;
+		q_sw_ctxt.is_mm = 1;
+	}
+	q_sw_ctxt.wbi_chk = 1;
+	q_sw_ctxt.wbi_intvl_en = 1;
+	q_sw_ctxt.fnc_id = txq->func_id;
+	q_sw_ctxt.qen = 1;
+	q_sw_ctxt.rngsz_idx = txq->ringszidx;
+	q_sw_ctxt.bypass = txq->en_bypass;
+	q_sw_ctxt.wbk_en = 1;
+	q_sw_ctxt.ring_bs_addr = (uint64_t)txq->tx_mz->iova;
+
+	if (txq->en_bypass &&
+		txq->bypass_desc_sz != 0)
+		q_sw_ctxt.desc_sz = bypass_desc_sz_idx;
+
+	/* Set SW Context */
+	err = hw_access->qdma_sw_ctx_conf(dev, 0,
+			(qid + queue_base), &q_sw_ctxt,
+			QDMA_HW_ACCESS_WRITE);
+	if (err < 0)
+		return qdma_dev->hw_access->qdma_get_error_code(err);
+
+	txq->q_pidx_info.pidx = 0;
+	hw_access->qdma_queue_pidx_update(dev, qdma_dev->is_vf,
+		qid, 0, &txq->q_pidx_info);
+
+	dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+	txq->status = RTE_ETH_QUEUE_STATE_STARTED;
 	return 0;
 }
 
@@ -1185,8 +1413,32 @@ int qdma_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
 
 int qdma_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
 {
-	(void)dev;
-	(void)qid;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	uint32_t queue_base =  qdma_dev->queue_base;
+	struct qdma_tx_queue *txq;
+	int cnt = 0;
+	uint16_t count;
+
+	txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+
+	txq->status = RTE_ETH_QUEUE_STATE_STOPPED;
+	/* Wait for TXQ to send out all packets. */
+	while (txq->wb_status->cidx != txq->q_pidx_info.pidx) {
+		usleep(10);
+		if (cnt++ > 10000)
+			break;
+	}
+
+	qdma_inv_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode);
+
+	/* Relinquish pending mbufs */
+	for (count = 0; count < txq->nb_tx_desc - 1; count++) {
+		rte_pktmbuf_free(txq->sw_ring[count]);
+		txq->sw_ring[count] = NULL;
+	}
+	qdma_reset_tx_queue(txq);
+
+	dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
 
 	return 0;
 }
-- 
2.36.1


  parent reply	other threads:[~2022-07-06  7:57 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-06  7:51 [RFC PATCH 00/29] cover letter for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 01/29] net/qdma: add net PMD template Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 02/29] maintainers: add maintainer for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 03/29] net/meson.build: add support to compile net qdma Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 04/29] net/qdma: add logging support Aman Kumar
2022-07-06 15:27   ` Stephen Hemminger
2022-07-07  2:32     ` Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 05/29] net/qdma: add device init and uninit functions Aman Kumar
2022-07-06 15:35   ` Stephen Hemminger
2022-07-07  2:41     ` Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 06/29] net/qdma: add qdma access library Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 07/29] net/qdma: add supported qdma version Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 08/29] net/qdma: qdma hardware initialization Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 09/29] net/qdma: define device modes and data structure Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 10/29] net/qdma: add net PMD ops template Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 11/29] net/qdma: add configure close and reset ethdev ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 12/29] net/qdma: add routine for Rx queue initialization Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 13/29] net/qdma: add callback support for Rx queue count Aman Kumar
2022-07-06  7:52 ` Aman Kumar [this message]
2022-07-06  7:52 ` [RFC PATCH 15/29] net/qdma: add queue cleanup PMD ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 16/29] net/qdma: add start and stop apis Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 17/29] net/qdma: add Tx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 18/29] net/qdma: add Tx queue reclaim routine Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 19/29] net/qdma: add callback function for Tx desc status Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 20/29] net/qdma: add Rx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 21/29] net/qdma: add mailbox communication library Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 22/29] net/qdma: mbox API adaptation in Rx/Tx init Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 23/29] net/qdma: add support for VF interfaces Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 24/29] net/qdma: add Rx/Tx queue setup routine for VF devices Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 25/29] net/qdma: add basic PMD ops for VF Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 26/29] net/qdma: add datapath burst API " Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 27/29] net/qdma: add device specific APIs for export Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 28/29] net/qdma: add additional debug APIs Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 29/29] net/qdma: add stats PMD ops for PF and VF Aman Kumar
2022-07-07  6:57 ` [RFC PATCH 00/29] cover letter for net/qdma PMD Thomas Monjalon
2022-07-07 13:55   ` Aman Kumar
2022-07-07 14:15     ` Thomas Monjalon
2022-07-07 14:19       ` Hemant Agrawal
2022-07-18 18:15         ` aman.kumar
2022-07-19 12:12           ` Thomas Monjalon
2022-07-19 17:22             ` aman.kumar
2023-07-02 23:36               ` Stephen Hemminger
2023-07-03  9:15                 ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220706075219.517046-15-aman.kumar@vvdntech.in \
    --to=aman.kumar@vvdntech.in \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.