All of lore.kernel.org
 help / color / mirror / Atom feed
From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com,
	ferruh.yigit@amd.com, beilei.xing@intel.com
Cc: dev@dpdk.org, xiaoyun.li@intel.com, helin.zhang@intel.com,
	Junfeng Guo <junfeng.guo@intel.com>,
	Rushil Gupta <rushilg@google.com>,
	Jordan Kimbrough <jrkim@google.com>,
	Jeroen de Borst <jeroendb@google.com>
Subject: [RFC v2 2/9] net/gve: add Rx queue setup for DQO
Date: Mon, 30 Jan 2023 14:26:35 +0800	[thread overview]
Message-ID: <20230130062642.3337239-3-junfeng.guo@intel.com> (raw)
In-Reply-To: <20230130062642.3337239-1-junfeng.guo@intel.com>

Add support for rx_queue_setup_dqo ops.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Jordan Kimbrough <jrkim@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |   1 +
 drivers/net/gve/gve_ethdev.h |  14 ++++
 drivers/net/gve/gve_rx_dqo.c | 148 +++++++++++++++++++++++++++++++++++
 drivers/net/gve/meson.build  |   1 +
 4 files changed, 164 insertions(+)
 create mode 100644 drivers/net/gve/gve_rx_dqo.c

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index d03f2fba92..26182b0422 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -366,6 +366,7 @@ gve_eth_dev_ops_override(struct eth_dev_ops *local_eth_dev_ops)
 {
 	/* override eth_dev ops for DQO */
 	local_eth_dev_ops->tx_queue_setup = gve_tx_queue_setup_dqo;
+	local_eth_dev_ops->rx_queue_setup = gve_rx_queue_setup_dqo;
 }
 
 static void
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 2dfcef6893..0adfc90554 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -145,6 +145,7 @@ struct gve_rx_queue {
 	uint16_t nb_rx_desc;
 	uint16_t expected_seqno; /* the next expected seqno */
 	uint16_t free_thresh;
+	uint16_t nb_rx_hold;
 	uint32_t next_avail;
 	uint32_t nb_avail;
 
@@ -163,6 +164,14 @@ struct gve_rx_queue {
 	uint16_t ntfy_id;
 	uint16_t rx_buf_len;
 
+	/* newly added for DQO*/
+	volatile struct gve_rx_desc_dqo *rx_ring;
+	struct gve_rx_compl_desc_dqo *compl_ring;
+	const struct rte_memzone *compl_ring_mz;
+	uint64_t compl_ring_phys_addr;
+	uint8_t cur_gen_bit;
+	uint16_t bufq_tail;
+
 	/* Only valid for DQO_RDA queue format */
 	struct gve_rx_queue *bufq;
 
@@ -334,6 +343,11 @@ gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
 /* Below functions are used for DQO */
 
+int
+gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_rxconf *conf,
+		       struct rte_mempool *pool);
 int
 gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 		       uint16_t nb_desc, unsigned int socket_id,
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
new file mode 100644
index 0000000000..e8a6d575fc
--- /dev/null
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Intel Corporation
+ */
+
+#include "gve_ethdev.h"
+#include "base/gve_adminq.h"
+
+static void
+gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
+{
+	struct rte_mbuf **sw_ring;
+	uint32_t size, i;
+
+	if (rxq == NULL) {
+		PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
+		return;
+	}
+
+	size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc_dqo);
+	for (i = 0; i < size; i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	size = rxq->nb_rx_desc * sizeof(struct gve_rx_compl_desc_dqo);
+	for (i = 0; i < size; i++)
+		((volatile char *)rxq->compl_ring)[i] = 0;
+
+	sw_ring = rxq->sw_ring;
+	for (i = 0; i < rxq->nb_rx_desc; i++)
+		sw_ring[i] = NULL;
+
+	rxq->bufq_tail = 0;
+	rxq->next_avail = 0;
+	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+	rxq->rx_tail = 0;
+	rxq->cur_gen_bit = 1;
+}
+
+int
+gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_rxconf *conf,
+		       struct rte_mempool *pool)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	const struct rte_memzone *mz;
+	struct gve_rx_queue *rxq;
+	uint16_t free_thresh;
+	int err = 0;
+
+	if (nb_desc != hw->rx_desc_cnt) {
+		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
+			    hw->rx_desc_cnt);
+	}
+	nb_desc = hw->rx_desc_cnt;
+
+	/* Allocate the RX queue data structure. */
+	rxq = rte_zmalloc_socket("gve rxq",
+				 sizeof(struct gve_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (rxq == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure");
+		return -ENOMEM;
+	}
+
+	/* check free_thresh here */
+	free_thresh = conf->rx_free_thresh ?
+			conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;
+	if (free_thresh >= nb_desc) {
+		PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u).",
+			    free_thresh, rxq->nb_rx_desc);
+		err = -EINVAL;
+		goto err_rxq;
+	}
+
+	rxq->nb_rx_desc = nb_desc;
+	rxq->free_thresh = free_thresh;
+	rxq->queue_id = queue_id;
+	rxq->port_id = dev->data->port_id;
+	rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
+
+	rxq->mpool = pool;
+	rxq->hw = hw;
+	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
+
+	rxq->rx_buf_len =
+		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
+
+	/* Allocate software ring */
+	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring",
+					  nb_desc * sizeof(struct rte_mbuf *),
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq->sw_ring == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
+		err = -ENOMEM;
+		goto err_rxq;
+	}
+
+	/* Allocate RX buffer queue */
+	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+				      nb_desc * sizeof(struct gve_rx_desc_dqo),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue");
+		err = -ENOMEM;
+		goto err_rxq;
+	}
+	rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr;
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->mz = mz;
+
+	/* Allocate RX completion queue */
+	mz = rte_eth_dma_zone_reserve(dev, "compl_ring", queue_id,
+				      nb_desc * sizeof(struct gve_rx_compl_desc_dqo),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX completion queue");
+		err = -ENOMEM;
+		goto err_rxq;
+	}
+	/* Zero all the descriptors in the ring */
+	memset(mz->addr, 0, nb_desc * sizeof(struct gve_rx_compl_desc_dqo));
+	rxq->compl_ring = (struct gve_rx_compl_desc_dqo *)mz->addr;
+	rxq->compl_ring_phys_addr = mz->iova;
+	rxq->compl_ring_mz = mz;
+
+	mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
+				      sizeof(struct gve_queue_resources),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
+		err = -ENOMEM;
+		goto err_rxq;
+	}
+	rxq->qres = (struct gve_queue_resources *)mz->addr;
+	rxq->qres_mz = mz;
+
+	gve_reset_rxq_dqo(rxq);
+
+	dev->data->rx_queues[queue_id] = rxq;
+
+	return 0;
+
+err_rxq:
+	rte_free(rxq);
+	return err;
+}
diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build
index 2ddb0cbf9e..c9d87903f9 100644
--- a/drivers/net/gve/meson.build
+++ b/drivers/net/gve/meson.build
@@ -11,6 +11,7 @@ sources = files(
         'base/gve_adminq.c',
         'gve_rx.c',
         'gve_tx.c',
+        'gve_rx_dqo.c',
         'gve_tx_dqo.c',
         'gve_ethdev.c',
 )
-- 
2.34.1


  parent reply	other threads:[~2023-01-30  6:32 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-18  2:53 [RFC 0/8] gve PMD enhancement Junfeng Guo
2023-01-18  2:53 ` [RFC 1/8] net/gve: add Rx queue setup for DQO Junfeng Guo
2023-01-18  2:53 ` [RFC 2/8] net/gve: support device start and close " Junfeng Guo
2023-01-18  2:53 ` [RFC 3/8] net/gve: support queue release and stop " Junfeng Guo
2023-01-18  2:53 ` [RFC 4/8] net/gve: support basic Tx data path " Junfeng Guo
2023-01-18  2:53 ` [RFC 5/8] net/gve: support basic Rx " Junfeng Guo
2023-01-18  2:53 ` [RFC 6/8] net/gve: support basic stats " Junfeng Guo
2023-01-18  2:53 ` [RFC 7/8] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-18  2:53 ` [RFC 8/8] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-01-25 13:37 ` [RFC 0/8] gve PMD enhancement Li, Xiaoyun
2023-01-30  6:26 ` [RFC v2 0/9] " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 1/9] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-01-30  6:26   ` Junfeng Guo [this message]
2023-01-30  6:26   ` [RFC v2 3/9] net/gve: support device start and close " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 4/9] net/gve: support queue release and stop " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 5/9] net/gve: support basic Tx data path " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 6/9] net/gve: support basic Rx " Junfeng Guo
2023-01-30 18:32     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 7/9] net/gve: support basic stats " Junfeng Guo
2023-01-30 18:27     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 8/9] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-30  6:26   ` [RFC v2 9/9] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-02-17  7:32   ` [RFC v3 00/10] gve PMD enhancement Junfeng Guo
2023-02-17  7:32     ` [RFC v3 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-02-17  7:32     ` [RFC v3 02/10] net/gve: add Rx " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 03/10] net/gve: support device start and close " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 04/10] net/gve: support queue release and stop " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 05/10] net/gve: support basic Tx data path " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 06/10] net/gve: support basic Rx " Junfeng Guo
2023-02-17 15:17       ` Honnappa Nagarahalli
2023-02-23  5:32         ` Guo, Junfeng
2023-02-17  7:32     ` [RFC v3 07/10] net/gve: support basic stats " Junfeng Guo
2023-02-17 15:28       ` Honnappa Nagarahalli
2023-02-17  7:32     ` [RFC v3 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 09/10] net/gve: support jumbo frame for GQI Junfeng Guo
2023-02-17  7:32     ` [RFC v3 10/10] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230130062642.3337239-3-junfeng.guo@intel.com \
    --to=junfeng.guo@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=helin.zhang@intel.com \
    --cc=jeroendb@google.com \
    --cc=jingjing.wu@intel.com \
    --cc=jrkim@google.com \
    --cc=qi.z.zhang@intel.com \
    --cc=rushilg@google.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.