All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v5 16/24] net/ngbe: add Tx queue setup and release
Date: Wed,  2 Jun 2021 17:41:00 +0800	[thread overview]
Message-ID: <20210602094108.1575640-17-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20210602094108.1575640-1-jiawenwu@trustnetic.com>

Setup device Tx queue and release Tx queue.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ngbe/ngbe_ethdev.c |   2 +
 drivers/net/ngbe/ngbe_ethdev.h |   6 +
 drivers/net/ngbe/ngbe_rxtx.c   | 212 +++++++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_rxtx.h   |  91 ++++++++++++++
 4 files changed, 311 insertions(+)

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 8eb41a7a2b..2f8ac48f33 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -663,6 +663,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.link_update                = ngbe_dev_link_update,
 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
 	.rx_queue_release           = ngbe_dev_rx_queue_release,
+	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
+	.tx_queue_release           = ngbe_dev_tx_queue_release,
 };
 
 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index c324ca7e0f..f52d813a47 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -57,11 +57,17 @@ struct ngbe_adapter {
 
 void ngbe_dev_rx_queue_release(void *rxq);
 
+void ngbe_dev_tx_queue_release(void *txq);
+
 int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
 		const struct rte_eth_rxconf *rx_conf,
 		struct rte_mempool *mb_pool);
 
+int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_tx_desc, unsigned int socket_id,
+		const struct rte_eth_txconf *tx_conf);
+
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 		int wait_to_complete);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 9992983bef..2d8db3245f 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -15,6 +15,99 @@
 #include "ngbe_ethdev.h"
 #include "ngbe_rxtx.h"
 
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/*********************************************************************
+ *
+ *  Queue management functions
+ *
+ **********************************************************************/
+
+static void __rte_cold
+ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
+{
+	unsigned int i;
+
+	if (txq->sw_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+static void __rte_cold
+ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
+{
+	if (txq != NULL &&
+	    txq->sw_ring != NULL)
+		rte_free(txq->sw_ring);
+}
+
+static void __rte_cold
+ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->free_swring(txq);
+		rte_free(txq);
+	}
+}
+
+void __rte_cold
+ngbe_dev_tx_queue_release(void *txq)
+{
+	ngbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic ngbe_tx_queue fields to defaults */
+static void __rte_cold
+ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
+{
+	static const struct ngbe_tx_desc zeroed_desc = {0};
+	struct ngbe_tx_entry *txe = txq->sw_ring;
+	uint16_t prev, i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		txq->tx_ring[i] = zeroed_desc;
+
+	/* Initialize SW ring entries */
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
+
+		txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
+		txe[i].mbuf = NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+	txq->tx_tail = 0;
+
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	memset((void *)&txq->ctx_cache, 0,
+		NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
+}
+
+static const struct ngbe_txq_ops def_txq_ops = {
+	.release_mbufs = ngbe_tx_queue_release_mbufs,
+	.free_swring = ngbe_tx_free_swring,
+	.reset = ngbe_reset_tx_queue,
+};
+
 uint64_t
 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
@@ -42,6 +135,125 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 	return tx_offload_capa;
 }
 
+int __rte_cold
+ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_txconf *tx_conf)
+{
+	const struct rte_memzone *tz;
+	struct ngbe_tx_queue *txq;
+	struct ngbe_hw     *hw;
+	uint16_t tx_free_thresh;
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = NGBE_DEV_HW(dev);
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	/*
+	 * Validate number of transmit descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of NGBE_ALIGN.
+	 */
+	if (nb_desc % NGBE_TXD_ALIGN != 0 ||
+	    nb_desc > NGBE_RING_DESC_MAX ||
+	    nb_desc < NGBE_RING_DESC_MIN) {
+		return -EINVAL;
+	}
+
+	/*
+	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required
+	 * to transmit a packet is greater than the number of free TX
+	 * descriptors.
+	 * One descriptor in the TX ring is used as a sentinel to avoid a
+	 * H/W race condition, hence the maximum threshold constraints.
+	 * When set to zero use default values.
+	 */
+	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	if ((nb_desc % tx_free_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_free_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the tx queue data structure */
+	txq = rte_zmalloc_socket("ethdev TX queue",
+				 sizeof(struct ngbe_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL)
+		return -ENOMEM;
+
+	/*
+	 * Allocate TX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+			sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
+			NGBE_ALIGN, socket_id);
+	if (tz == NULL) {
+		ngbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_free_thresh = tx_free_thresh;
+	txq->pthresh = tx_conf->tx_thresh.pthresh;
+	txq->hthresh = tx_conf->tx_thresh.hthresh;
+	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+	txq->reg_idx = queue_idx;
+	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
+	txq->ops = &def_txq_ops;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
+	txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
+
+	txq->tx_ring_phys_addr = TMZ_PADDR(tz);
+	txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
+
+	/* Allocate software ring */
+	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+				sizeof(struct ngbe_tx_entry) * nb_desc,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		ngbe_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+	txq->ops->reset(txq);
+
+	dev->data->tx_queues[queue_idx] = txq;
+
+	return 0;
+}
+
 /**
  * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
  *
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index e1676a53b4..2db5cc3f2a 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -43,6 +43,31 @@ struct ngbe_rx_desc {
 	} qw1; /* also as r.hdr_addr */
 };
 
+/*****************************************************************************
+ * Transmit Descriptor
+ *****************************************************************************/
+/**
+ * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
+ **/
+struct ngbe_tx_ctx_desc {
+	__le32 dw0; /* w.vlan_macip_lens  */
+	__le32 dw1; /* w.seqnum_seed      */
+	__le32 dw2; /* w.type_tucmd_mlhl  */
+	__le32 dw3; /* w.mss_l4len_idx    */
+};
+
+/* @ngbe_tx_ctx_desc.dw3 */
+#define NGBE_TXD_DD               MS(0, 0x1) /* descriptor done */
+
+/**
+ * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
+ **/
+struct ngbe_tx_desc {
+	__le64 qw0; /* r.buffer_addr ,  w.reserved    */
+	__le32 dw2; /* r.cmd_type_len,  w.nxtseq_seed */
+	__le32 dw3; /* r.olinfo_status, w.status      */
+};
+
 #define RTE_PMD_NGBE_RX_MAX_BURST 32
 
 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
@@ -62,6 +87,15 @@ struct ngbe_scattered_rx_entry {
 	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
 };
 
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ngbe_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
 /**
  * Structure associated with each RX queue.
  */
@@ -98,6 +132,63 @@ struct ngbe_rx_queue {
 	struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
 };
 
+/**
+ * NGBE CTX Constants
+ */
+enum ngbe_ctx_num {
+	NGBE_CTX_0    = 0, /**< CTX0 */
+	NGBE_CTX_1    = 1, /**< CTX1  */
+	NGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
+};
+
+/**
+ * Structure to check if new context need be built
+ */
+struct ngbe_ctx_info {
+	uint64_t flags;           /**< ol_flags for context build. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct ngbe_tx_queue {
+	/** TX ring virtual address. */
+	volatile struct ngbe_tx_desc *tx_ring;
+	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+	struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD.*/
+	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+	volatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */
+	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t            tx_tail;       /**< current value of TDT reg. */
+	/**< Start freeing TX buffers if there are less free descriptors than
+	 *   this value.
+	 */
+	uint16_t            tx_free_thresh;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t            last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
+	uint16_t            nb_tx_free;
+	uint16_t            tx_next_dd;    /**< next desc to scan for DD bit */
+	uint16_t            queue_id;      /**< TX queue index. */
+	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint16_t            port_id;       /**< Device port identifier. */
+	uint8_t             pthresh;       /**< Prefetch threshold register. */
+	uint8_t             hthresh;       /**< Host threshold register. */
+	uint8_t             wthresh;       /**< Write-back threshold reg. */
+	uint64_t            offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint32_t            ctx_curr;      /**< Hardware context states. */
+	/** Hardware context0 history. */
+	struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
+	const struct ngbe_txq_ops *ops;       /**< txq ops */
+	uint8_t             tx_deferred_start; /**< not in global dev start. */
+};
+
+struct ngbe_txq_ops {
+	void (*release_mbufs)(struct ngbe_tx_queue *txq);
+	void (*free_swring)(struct ngbe_tx_queue *txq);
+	void (*reset)(struct ngbe_tx_queue *txq);
+};
+
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev);
-- 
2.27.0




  parent reply	other threads:[~2021-06-02  9:41 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02  9:40 [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 01/24] net/ngbe: add build and doc infrastructure Jiawen Wu
2021-06-14 17:05   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 02/24] net/ngbe: add device IDs Jiawen Wu
2021-06-14 17:08   ` Andrew Rybchenko
2021-06-15  2:52     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 03/24] net/ngbe: support probe and remove Jiawen Wu
2021-06-14 17:27   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 04/24] net/ngbe: add device init and uninit Jiawen Wu
2021-06-14 17:36   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 05/24] net/ngbe: add log type and error type Jiawen Wu
2021-06-14 17:54   ` Andrew Rybchenko
2021-06-15  7:13     ` Jiawen Wu
2021-07-01 13:57   ` David Marchand
2021-07-02  2:08     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 06/24] net/ngbe: define registers Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 07/24] net/ngbe: set MAC type and LAN id Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 08/24] net/ngbe: init and validate EEPROM Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 09/24] net/ngbe: add HW initialization Jiawen Wu
2021-06-14 18:01   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 10/24] net/ngbe: identify PHY and reset PHY Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 11/24] net/ngbe: store MAC address Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 12/24] net/ngbe: add info get operation Jiawen Wu
2021-06-14 18:13   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 13/24] net/ngbe: support link update Jiawen Wu
2021-06-14 18:45   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 14/24] net/ngbe: setup the check PHY link Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 15/24] net/ngbe: add Rx queue setup and release Jiawen Wu
2021-06-14 18:53   ` Andrew Rybchenko
2021-06-15  7:50     ` Jiawen Wu
2021-06-15  8:06       ` Andrew Rybchenko
2021-06-02  9:41 ` Jiawen Wu [this message]
2021-06-14 18:59   ` [dpdk-dev] [PATCH v5 16/24] net/ngbe: add Tx " Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 17/24] net/ngbe: add Rx and Tx init Jiawen Wu
2021-06-14 19:01   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 18/24] net/ngbe: add packet type Jiawen Wu
2021-06-14 19:06   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 19/24] net/ngbe: add simple Rx and Tx flow Jiawen Wu
2021-06-14 19:10   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 20/24] net/ngbe: support bulk and scatter Rx Jiawen Wu
2021-06-14 19:17   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 21/24] net/ngbe: support full-featured Tx path Jiawen Wu
2021-06-14 19:22   ` Andrew Rybchenko
2021-06-14 19:23     ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 22/24] net/ngbe: add device start operation Jiawen Wu
2021-06-14 19:33   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 23/24] net/ngbe: start and stop RxTx Jiawen Wu
2021-06-14 20:44   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 24/24] net/ngbe: add device stop operation Jiawen Wu
2021-06-11  1:38 ` [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-14 20:56 ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210602094108.1575640-17-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.