All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/10] gve PMD enhancement
@ 2023-04-13  6:16 Junfeng Guo
  2023-04-13  6:16 ` [PATCH 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
                   ` (10 more replies)
  0 siblings, 11 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing; +Cc: dev, Junfeng Guo

This patch set includs two main enhancements for gve PMD:
 - support basic data path with DQO queue format
 - support jumbo frame with GQI queue format

This patch set is based on this:
patchwork.dpdk.org/project/dpdk/list/?series=27653&state=*

Junfeng Guo (10):
  net/gve: add Tx queue setup for DQO
  net/gve: add Rx queue setup for DQO
  net/gve: support device start and close for DQO
  net/gve: support queue release and stop for DQO
  net/gve: support basic Tx data path for DQO
  net/gve: support basic Rx data path for DQO
  net/gve: support basic stats for DQO
  net/gve: enable Tx checksum offload for DQO
  net/gve: add maintainers for GVE
  net/gve: support jumbo frame for GQI

 MAINTAINERS                  |   3 +
 drivers/net/gve/gve_ethdev.c |  88 +++++++-
 drivers/net/gve/gve_ethdev.h |  69 +++++-
 drivers/net/gve/gve_rx.c     | 140 +++++++++----
 drivers/net/gve/gve_rx_dqo.c | 353 +++++++++++++++++++++++++++++++
 drivers/net/gve/gve_tx.c     |   3 +
 drivers/net/gve/gve_tx_dqo.c | 393 +++++++++++++++++++++++++++++++++++
 drivers/net/gve/meson.build  |   2 +
 8 files changed, 1005 insertions(+), 46 deletions(-)
 create mode 100644 drivers/net/gve/gve_rx_dqo.c
 create mode 100644 drivers/net/gve/gve_tx_dqo.c

-- 
2.34.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 01/10] net/gve: add Tx queue setup for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 02/10] net/gve: add Rx " Junfeng Guo
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add support for tx_queue_setup_dqo ops.

DQO format has submission and completion queue pair for each Tx/Rx
queue. Note that with DQO format all descriptors and doorbells, as
well as counters are written in little-endian.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |  21 +++-
 drivers/net/gve/gve_ethdev.h |  27 ++++-
 drivers/net/gve/gve_tx_dqo.c | 185 +++++++++++++++++++++++++++++++++++
 drivers/net/gve/meson.build  |   1 +
 4 files changed, 230 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/gve/gve_tx_dqo.c

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index cf28a4a3b7..90345b193d 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -298,6 +298,7 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
 		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH,
 		.offloads = 0,
 	};
 
@@ -528,6 +529,21 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
 	.xstats_get_names     = gve_xstats_get_names,
 };
 
+static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
+	.dev_configure        = gve_dev_configure,
+	.dev_start            = gve_dev_start,
+	.dev_stop             = gve_dev_stop,
+	.dev_close            = gve_dev_close,
+	.dev_infos_get        = gve_dev_info_get,
+	.tx_queue_setup       = gve_tx_queue_setup_dqo,
+	.link_update          = gve_link_update,
+	.stats_get            = gve_dev_stats_get,
+	.stats_reset          = gve_dev_stats_reset,
+	.mtu_set              = gve_dev_mtu_set,
+	.xstats_get           = gve_xstats_get,
+	.xstats_get_names     = gve_xstats_get_names,
+};
+
 static void
 gve_free_counter_array(struct gve_priv *priv)
 {
@@ -770,8 +786,6 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
 	rte_be32_t *db_bar;
 	int err;
 
-	eth_dev->dev_ops = &gve_eth_dev_ops;
-
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
@@ -807,10 +821,11 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
 		return err;
 
 	if (gve_is_gqi(priv)) {
+		eth_dev->dev_ops = &gve_eth_dev_ops;
 		eth_dev->rx_pkt_burst = gve_rx_burst;
 		eth_dev->tx_pkt_burst = gve_tx_burst;
 	} else {
-		PMD_DRV_LOG(ERR, "DQO_RDA is not implemented and will be added in the future");
+		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
 	}
 
 	eth_dev->data->mac_addrs = &priv->dev_addr;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 0b825113f6..6c6defa045 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -28,7 +28,8 @@
 #define PCI_MSIX_FLAGS_QSIZE	0x07FF	/* Table size */
 
 #define GVE_DEFAULT_RX_FREE_THRESH  512
-#define GVE_DEFAULT_TX_FREE_THRESH  256
+#define GVE_DEFAULT_TX_FREE_THRESH   32
+#define GVE_DEFAULT_TX_RS_THRESH     32
 #define GVE_TX_MAX_FREE_SZ          512
 
 #define GVE_MIN_BUF_SIZE	    1024
@@ -53,6 +54,13 @@ union gve_tx_desc {
 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 };
 
+/* Tx desc for DQO format */
+union gve_tx_desc_dqo {
+	struct gve_tx_pkt_desc_dqo pkt;
+	struct gve_tx_tso_context_desc_dqo tso_ctx;
+	struct gve_tx_general_context_desc_dqo general_ctx;
+};
+
 /* Offload features */
 union gve_tx_offload {
 	uint64_t data;
@@ -100,8 +108,10 @@ struct gve_tx_queue {
 	uint32_t tx_tail;
 	uint16_t nb_tx_desc;
 	uint16_t nb_free;
+	uint16_t nb_used;
 	uint32_t next_to_clean;
 	uint16_t free_thresh;
+	uint16_t rs_thresh;
 
 	/* Only valid for DQO_QPL queue format */
 	uint16_t sw_tail;
@@ -128,7 +138,15 @@ struct gve_tx_queue {
 	struct gve_queue_resources *qres;
 
 	/* newly added for DQO */
+	volatile union gve_tx_desc_dqo *tx_ring;
+	struct gve_tx_compl_desc *compl_ring;
+	const struct rte_memzone *compl_ring_mz;
 	uint64_t compl_ring_phys_addr;
+	uint32_t complq_tail;
+	uint16_t sw_size;
+	uint8_t cur_gen_bit;
+	uint32_t last_desc_cleaned;
+	void **txqs;
 
 	/* Only valid for DQO_RDA queue format */
 	struct gve_tx_queue *complq;
@@ -342,4 +360,11 @@ gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t
 gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
+/* Below functions are used for DQO */
+
+int
+gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_txconf *conf);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
new file mode 100644
index 0000000000..22d20ff16f
--- /dev/null
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2023 Google LLC
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+#include "gve_ethdev.h"
+#include "base/gve_adminq.h"
+
+static int
+check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh,
+		    uint16_t tx_free_thresh)
+{
+	if (tx_rs_thresh >= (nb_desc - 2)) {
+		PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+			    "number of TX descriptors (%u) minus 2",
+			    tx_rs_thresh, nb_desc);
+		return -EINVAL;
+	}
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_DRV_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+			    "number of TX descriptors (%u) minus 3.",
+			    tx_free_thresh, nb_desc);
+		return -EINVAL;
+	}
+	if (tx_rs_thresh > tx_free_thresh) {
+		PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+			    "equal to tx_free_thresh (%u).",
+			    tx_rs_thresh, tx_free_thresh);
+		return -EINVAL;
+	}
+	if ((nb_desc % tx_rs_thresh) != 0) {
+		PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+			    "number of TX descriptors (%u).",
+			    tx_rs_thresh, nb_desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+gve_reset_txq_dqo(struct gve_tx_queue *txq)
+{
+	struct rte_mbuf **sw_ring;
+	uint32_t size, i;
+
+	if (txq == NULL) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+		return;
+	}
+
+	size = txq->nb_tx_desc * sizeof(union gve_tx_desc_dqo);
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->tx_ring)[i] = 0;
+
+	size = txq->sw_size * sizeof(struct gve_tx_compl_desc);
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->compl_ring)[i] = 0;
+
+	sw_ring = txq->sw_ring;
+	for (i = 0; i < txq->sw_size; i++)
+		sw_ring[i] = NULL;
+
+	txq->tx_tail = 0;
+	txq->nb_used = 0;
+
+	txq->last_desc_cleaned = 0;
+	txq->sw_tail = 0;
+	txq->nb_free = txq->nb_tx_desc - 1;
+
+	txq->complq_tail = 0;
+	txq->cur_gen_bit = 1;
+}
+
+int
+gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_txconf *conf)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	const struct rte_memzone *mz;
+	struct gve_tx_queue *txq;
+	uint16_t free_thresh;
+	uint16_t rs_thresh;
+	uint16_t sw_size;
+	int err = 0;
+
+	if (nb_desc != hw->tx_desc_cnt) {
+		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
+			    hw->tx_desc_cnt);
+	}
+	nb_desc = hw->tx_desc_cnt;
+
+	/* Allocate the TX queue data structure. */
+	txq = rte_zmalloc_socket("gve txq",
+				 sizeof(struct gve_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for tx queue structure");
+		return -ENOMEM;
+	}
+
+	/* need to check free_thresh here */
+	free_thresh = conf->tx_free_thresh ?
+			conf->tx_free_thresh : GVE_DEFAULT_TX_FREE_THRESH;
+	rs_thresh = conf->tx_rs_thresh ?
+			conf->tx_rs_thresh : GVE_DEFAULT_TX_RS_THRESH;
+	if (check_tx_thresh_dqo(nb_desc, rs_thresh, free_thresh))
+		return -EINVAL;
+
+	txq->nb_tx_desc = nb_desc;
+	txq->free_thresh = free_thresh;
+	txq->rs_thresh = rs_thresh;
+	txq->queue_id = queue_id;
+	txq->port_id = dev->data->port_id;
+	txq->ntfy_id = queue_id;
+	txq->hw = hw;
+	txq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[txq->ntfy_id].id)];
+
+	/* Allocate software ring */
+	sw_size = nb_desc * DQO_TX_MULTIPLIER;
+	txq->sw_ring = rte_zmalloc_socket("gve tx sw ring",
+					  sw_size * sizeof(struct rte_mbuf *),
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
+		err = -ENOMEM;
+		goto free_txq;
+	}
+	txq->sw_size = sw_size;
+
+	/* Allocate TX hardware ring descriptors. */
+	mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+				      nb_desc * sizeof(union gve_tx_desc_dqo),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
+		err = -ENOMEM;
+		goto free_txq_sw_ring;
+	}
+	txq->tx_ring = (union gve_tx_desc_dqo *)mz->addr;
+	txq->tx_ring_phys_addr = mz->iova;
+	txq->mz = mz;
+
+	/* Allocate TX completion ring descriptors. */
+	mz = rte_eth_dma_zone_reserve(dev, "tx_compl_ring", queue_id,
+				      sw_size * sizeof(struct gve_tx_compl_desc),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+		err = -ENOMEM;
+		goto free_txq_mz;
+	}
+	txq->compl_ring = (struct gve_tx_compl_desc *)mz->addr;
+	txq->compl_ring_phys_addr = mz->iova;
+	txq->compl_ring_mz = mz;
+	txq->txqs = dev->data->tx_queues;
+
+	mz = rte_eth_dma_zone_reserve(dev, "txq_res", queue_id,
+				      sizeof(struct gve_queue_resources),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX resource");
+		err = -ENOMEM;
+		goto free_txq_cq_mz;
+	}
+	txq->qres = (struct gve_queue_resources *)mz->addr;
+	txq->qres_mz = mz;
+
+	gve_reset_txq_dqo(txq);
+
+	dev->data->tx_queues[queue_id] = txq;
+
+	return 0;
+
+free_txq_cq_mz:
+	rte_memzone_free(txq->compl_ring_mz);
+free_txq_mz:
+	rte_memzone_free(txq->mz);
+free_txq_sw_ring:
+	rte_free(txq->sw_ring);
+free_txq:
+	rte_free(txq);
+	return err;
+}
diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build
index af0010c01c..2ddb0cbf9e 100644
--- a/drivers/net/gve/meson.build
+++ b/drivers/net/gve/meson.build
@@ -11,6 +11,7 @@ sources = files(
         'base/gve_adminq.c',
         'gve_rx.c',
         'gve_tx.c',
+        'gve_tx_dqo.c',
         'gve_ethdev.c',
 )
 includes += include_directories('base')
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 02/10] net/gve: add Rx queue setup for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
  2023-04-13  6:16 ` [PATCH 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 03/10] net/gve: support device start and close " Junfeng Guo
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add support for rx_queue_setup_dqo ops.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |   1 +
 drivers/net/gve/gve_ethdev.h |  11 +++
 drivers/net/gve/gve_rx_dqo.c | 156 +++++++++++++++++++++++++++++++++++
 drivers/net/gve/meson.build  |   1 +
 4 files changed, 169 insertions(+)
 create mode 100644 drivers/net/gve/gve_rx_dqo.c

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 90345b193d..d387d7154b 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -535,6 +535,7 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
 	.dev_stop             = gve_dev_stop,
 	.dev_close            = gve_dev_close,
 	.dev_infos_get        = gve_dev_info_get,
+	.rx_queue_setup       = gve_rx_queue_setup_dqo,
 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
 	.link_update          = gve_link_update,
 	.stats_get            = gve_dev_stats_get,
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 6c6defa045..cb8cd62886 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -167,6 +167,7 @@ struct gve_rx_queue {
 	uint16_t nb_rx_desc;
 	uint16_t expected_seqno; /* the next expected seqno */
 	uint16_t free_thresh;
+	uint16_t nb_rx_hold;
 	uint32_t next_avail;
 	uint32_t nb_avail;
 
@@ -189,7 +190,12 @@ struct gve_rx_queue {
 	uint16_t rx_buf_len;
 
 	/* newly added for DQO */
+	volatile struct gve_rx_desc_dqo *rx_ring;
+	struct gve_rx_compl_desc_dqo *compl_ring;
+	const struct rte_memzone *compl_ring_mz;
 	uint64_t compl_ring_phys_addr;
+	uint8_t cur_gen_bit;
+	uint16_t bufq_tail;
 
 	/* Only valid for DQO_RDA queue format */
 	struct gve_rx_queue *bufq;
@@ -362,6 +368,11 @@ gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
 /* Below functions are used for DQO */
 
+int
+gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_rxconf *conf,
+		       struct rte_mempool *pool);
 int
 gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 		       uint16_t nb_desc, unsigned int socket_id,
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
new file mode 100644
index 0000000000..c419c4dd2f
--- /dev/null
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2023 Google LLC
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+
+#include "gve_ethdev.h"
+#include "base/gve_adminq.h"
+
+static void
+gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
+{
+	struct rte_mbuf **sw_ring;
+	uint32_t size, i;
+
+	if (rxq == NULL) {
+		PMD_DRV_LOG(ERR, "pointer to rxq is NULL");
+		return;
+	}
+
+	size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc_dqo);
+	for (i = 0; i < size; i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	size = rxq->nb_rx_desc * sizeof(struct gve_rx_compl_desc_dqo);
+	for (i = 0; i < size; i++)
+		((volatile char *)rxq->compl_ring)[i] = 0;
+
+	sw_ring = rxq->sw_ring;
+	for (i = 0; i < rxq->nb_rx_desc; i++)
+		sw_ring[i] = NULL;
+
+	rxq->bufq_tail = 0;
+	rxq->next_avail = 0;
+	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+	rxq->rx_tail = 0;
+	rxq->cur_gen_bit = 1;
+}
+
+int
+gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
+		       uint16_t nb_desc, unsigned int socket_id,
+		       const struct rte_eth_rxconf *conf,
+		       struct rte_mempool *pool)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	const struct rte_memzone *mz;
+	struct gve_rx_queue *rxq;
+	uint16_t free_thresh;
+	int err = 0;
+
+	if (nb_desc != hw->rx_desc_cnt) {
+		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
+			    hw->rx_desc_cnt);
+	}
+	nb_desc = hw->rx_desc_cnt;
+
+	/* Allocate the RX queue data structure. */
+	rxq = rte_zmalloc_socket("gve rxq",
+				 sizeof(struct gve_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (rxq == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure");
+		return -ENOMEM;
+	}
+
+	/* check free_thresh here */
+	free_thresh = conf->rx_free_thresh ?
+			conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;
+	if (free_thresh >= nb_desc) {
+		PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u).",
+			    free_thresh, rxq->nb_rx_desc);
+		err = -EINVAL;
+		goto free_rxq;
+	}
+
+	rxq->nb_rx_desc = nb_desc;
+	rxq->free_thresh = free_thresh;
+	rxq->queue_id = queue_id;
+	rxq->port_id = dev->data->port_id;
+	rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;
+
+	rxq->mpool = pool;
+	rxq->hw = hw;
+	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
+
+	rxq->rx_buf_len =
+		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
+
+	/* Allocate software ring */
+	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring",
+					  nb_desc * sizeof(struct rte_mbuf *),
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq->sw_ring == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring");
+		err = -ENOMEM;
+		goto free_rxq;
+	}
+
+	/* Allocate RX buffer queue */
+	mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+				      nb_desc * sizeof(struct gve_rx_desc_dqo),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue");
+		err = -ENOMEM;
+		goto free_rxq_sw_ring;
+	}
+	rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr;
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->mz = mz;
+
+	/* Allocate RX completion queue */
+	mz = rte_eth_dma_zone_reserve(dev, "compl_ring", queue_id,
+				      nb_desc * sizeof(struct gve_rx_compl_desc_dqo),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX completion queue");
+		err = -ENOMEM;
+		goto free_rxq_mz;
+	}
+	/* Zero all the descriptors in the ring */
+	memset(mz->addr, 0, nb_desc * sizeof(struct gve_rx_compl_desc_dqo));
+	rxq->compl_ring = (struct gve_rx_compl_desc_dqo *)mz->addr;
+	rxq->compl_ring_phys_addr = mz->iova;
+	rxq->compl_ring_mz = mz;
+
+	mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id,
+				      sizeof(struct gve_queue_resources),
+				      PAGE_SIZE, socket_id);
+	if (mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
+		err = -ENOMEM;
+		goto free_rxq_cq_mz;
+	}
+	rxq->qres = (struct gve_queue_resources *)mz->addr;
+	rxq->qres_mz = mz;
+
+	gve_reset_rxq_dqo(rxq);
+
+	dev->data->rx_queues[queue_id] = rxq;
+
+	return 0;
+
+free_rxq_cq_mz:
+	rte_memzone_free(rxq->compl_ring_mz);
+free_rxq_mz:
+	rte_memzone_free(rxq->mz);
+free_rxq_sw_ring:
+	rte_free(rxq->sw_ring);
+free_rxq:
+	rte_free(rxq);
+	return err;
+}
diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build
index 2ddb0cbf9e..c9d87903f9 100644
--- a/drivers/net/gve/meson.build
+++ b/drivers/net/gve/meson.build
@@ -11,6 +11,7 @@ sources = files(
         'base/gve_adminq.c',
         'gve_rx.c',
         'gve_tx.c',
+        'gve_rx_dqo.c',
         'gve_tx_dqo.c',
         'gve_ethdev.c',
 )
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 03/10] net/gve: support device start and close for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
  2023-04-13  6:16 ` [PATCH 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
  2023-04-13  6:16 ` [PATCH 02/10] net/gve: add Rx " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 04/10] net/gve: support queue release and stop " Junfeng Guo
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add device start and close support for DQO.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c | 43 +++++++++++++++++++++++++++++++++++-
 1 file changed, 42 insertions(+), 1 deletion(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index d387d7154b..fc60db63c5 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -78,6 +78,9 @@ gve_free_qpls(struct gve_priv *priv)
 	uint16_t nb_rxqs = priv->max_nb_rxq;
 	uint32_t i;
 
+	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+		return;
+
 	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
 		if (priv->qpl[i].mz != NULL)
 			rte_memzone_free(priv->qpl[i].mz);
@@ -138,6 +141,41 @@ gve_refill_pages(struct gve_rx_queue *rxq)
 	return 0;
 }
 
+static int
+gve_refill_dqo(struct gve_rx_queue *rxq)
+{
+	struct rte_mbuf *nmb;
+	uint16_t i;
+	int diag;
+
+	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
+	if (diag < 0) {
+		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+			nmb = rte_pktmbuf_alloc(rxq->mpool);
+			if (!nmb)
+				break;
+			rxq->sw_ring[i] = nmb;
+		}
+		if (i < rxq->nb_rx_desc - 1)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (i == rxq->nb_rx_desc - 1)
+			break;
+		nmb = rxq->sw_ring[i];
+		rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+		rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i);
+	}
+
+	rxq->nb_rx_hold = 0;
+	rxq->bufq_tail = rxq->nb_rx_desc - 1;
+
+	rte_write32(rxq->bufq_tail, rxq->qrx_tail);
+
+	return 0;
+}
+
 static int
 gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 {
@@ -206,7 +244,10 @@ gve_dev_start(struct rte_eth_dev *dev)
 
 		rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);
 
-		err = gve_refill_pages(rxq);
+		if (gve_is_gqi(priv))
+			err = gve_refill_pages(rxq);
+		else
+			err = gve_refill_dqo(rxq);
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to refill for RX");
 			goto err_rx;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 04/10] net/gve: support queue release and stop for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (2 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 03/10] net/gve: support device start and close " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 05/10] net/gve: support basic Tx data path " Junfeng Guo
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add support for queue operations:
 - gve_tx_queue_release_dqo
 - gve_rx_queue_release_dqo
 - gve_stop_tx_queues_dqo
 - gve_stop_rx_queues_dqo

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c | 18 +++++++++---
 drivers/net/gve/gve_ethdev.h | 12 ++++++++
 drivers/net/gve/gve_rx.c     |  3 ++
 drivers/net/gve/gve_rx_dqo.c | 57 ++++++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_tx.c     |  3 ++
 drivers/net/gve/gve_tx_dqo.c | 55 ++++++++++++++++++++++++++++++++++
 6 files changed, 144 insertions(+), 4 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index fc60db63c5..340315a1a3 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -292,11 +292,19 @@ gve_dev_close(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
 	}
 
-	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		gve_tx_queue_release(dev, i);
+	if (gve_is_gqi(priv)) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release(dev, i);
+
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release(dev, i);
+	} else {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release_dqo(dev, i);
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		gve_rx_queue_release(dev, i);
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release_dqo(dev, i);
+	}
 
 	gve_free_qpls(priv);
 	rte_free(priv->adminq);
@@ -578,6 +586,8 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
 	.dev_infos_get        = gve_dev_info_get,
 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
+	.rx_queue_release     = gve_rx_queue_release_dqo,
+	.tx_queue_release     = gve_tx_queue_release_dqo,
 	.link_update          = gve_link_update,
 	.stats_get            = gve_dev_stats_get,
 	.stats_reset          = gve_dev_stats_reset,
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index cb8cd62886..c8e1dd1435 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -378,4 +378,16 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 		       uint16_t nb_desc, unsigned int socket_id,
 		       const struct rte_eth_txconf *conf);
 
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 8d8f94efff..3dd3f578f9 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -359,6 +359,9 @@ gve_stop_rx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_rx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index c419c4dd2f..7f58844839 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -7,6 +7,38 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
+{
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+			rxq->sw_ring[i] = NULL;
+		}
+	}
+
+	rxq->nb_avail = rxq->nb_rx_desc;
+}
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_rx_queue *q = dev->data->rx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_rxq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static void
 gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
 {
@@ -56,6 +88,12 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->rx_desc_cnt;
 
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_id]) {
+		gve_rx_queue_release_dqo(dev, queue_id);
+		dev->data->rx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the RX queue data structure. */
 	rxq = rte_zmalloc_socket("gve rxq",
 				 sizeof(struct gve_rx_queue),
@@ -154,3 +192,22 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(rxq);
 	return err;
 }
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_rx_queue *rxq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		gve_release_rxq_mbufs_dqo(rxq);
+		gve_reset_rxq_dqo(rxq);
+	}
+}
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index fee3b939c7..13dc807623 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -672,6 +672,9 @@ gve_stop_tx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_tx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 22d20ff16f..ea6d5ff85e 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -6,6 +6,36 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq)
+{
+	uint16_t i;
+
+	for (i = 0; i < txq->sw_size; i++) {
+		if (txq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i]);
+			txq->sw_ring[i] = NULL;
+		}
+	}
+}
+
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_tx_queue *q = dev->data->tx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_txq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static int
 check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh,
 		    uint16_t tx_free_thresh)
@@ -91,6 +121,12 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->tx_desc_cnt;
 
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_id]) {
+		gve_tx_queue_release_dqo(dev, queue_id);
+		dev->data->tx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("gve txq",
 				 sizeof(struct gve_tx_queue),
@@ -183,3 +219,22 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(txq);
 	return err;
 }
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_tx_queue *txq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		gve_release_txq_mbufs_dqo(txq);
+		gve_reset_txq_dqo(txq);
+	}
+}
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 05/10] net/gve: support basic Tx data path for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (3 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 04/10] net/gve: support queue release and stop " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 06/10] net/gve: support basic Rx " Junfeng Guo
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add basic Tx data path support for DQO.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |   1 +
 drivers/net/gve/gve_ethdev.h |   4 +
 drivers/net/gve/gve_tx_dqo.c | 141 +++++++++++++++++++++++++++++++++++
 3 files changed, 146 insertions(+)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 340315a1a3..37bd8da12d 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -878,6 +878,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
 		eth_dev->tx_pkt_burst = gve_tx_burst;
 	} else {
 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
+		eth_dev->tx_pkt_burst = gve_tx_burst_dqo;
 	}
 
 	eth_dev->data->mac_addrs = &priv->dev_addr;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index c8e1dd1435..1b8f511668 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -147,6 +147,7 @@ struct gve_tx_queue {
 	uint8_t cur_gen_bit;
 	uint32_t last_desc_cleaned;
 	void **txqs;
+	uint16_t re_cnt;
 
 	/* Only valid for DQO_RDA queue format */
 	struct gve_tx_queue *complq;
@@ -390,4 +391,7 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
 void
 gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
 
+uint16_t
+gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index ea6d5ff85e..2ea38a8f8e 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -6,6 +6,147 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_tx_clean_dqo(struct gve_tx_queue *txq)
+{
+	struct gve_tx_compl_desc *compl_ring;
+	struct gve_tx_compl_desc *compl_desc;
+	struct gve_tx_queue *aim_txq;
+	uint16_t nb_desc_clean;
+	struct rte_mbuf *txe;
+	uint16_t compl_tag;
+	uint16_t next;
+
+	next = txq->complq_tail;
+	compl_ring = txq->compl_ring;
+	compl_desc = &compl_ring[next];
+
+	if (compl_desc->generation != txq->cur_gen_bit)
+		return;
+
+	compl_tag = rte_le_to_cpu_16(compl_desc->completion_tag);
+
+	aim_txq = txq->txqs[compl_desc->id];
+
+	switch (compl_desc->type) {
+	case GVE_COMPL_TYPE_DQO_DESC:
+		/* need to clean Descs from last_cleaned to compl_tag */
+		if (aim_txq->last_desc_cleaned > compl_tag)
+			nb_desc_clean = aim_txq->nb_tx_desc - aim_txq->last_desc_cleaned +
+					compl_tag;
+		else
+			nb_desc_clean = compl_tag - aim_txq->last_desc_cleaned;
+		aim_txq->nb_free += nb_desc_clean;
+		aim_txq->last_desc_cleaned = compl_tag;
+		break;
+	case GVE_COMPL_TYPE_DQO_REINJECTION:
+		PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!");
+		/* FALLTHROUGH */
+	case GVE_COMPL_TYPE_DQO_PKT:
+		txe = aim_txq->sw_ring[compl_tag];
+		if (txe != NULL) {
+			rte_pktmbuf_free_seg(txe);
+			txe = NULL;
+		}
+		break;
+	case GVE_COMPL_TYPE_DQO_MISS:
+		rte_delay_us_sleep(1);
+		PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_MISS ignored !!!");
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown completion type.");
+		return;
+	}
+
+	next++;
+	if (next == txq->nb_tx_desc * DQO_TX_MULTIPLIER) {
+		next = 0;
+		txq->cur_gen_bit ^= 1;
+	}
+
+	txq->complq_tail = next;
+}
+
+uint16_t
+gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct gve_tx_queue *txq = tx_queue;
+	volatile union gve_tx_desc_dqo *txr;
+	volatile union gve_tx_desc_dqo *txd;
+	struct rte_mbuf **sw_ring;
+	struct rte_mbuf *tx_pkt;
+	uint16_t mask, sw_mask;
+	uint16_t nb_to_clean;
+	uint16_t nb_tx = 0;
+	uint16_t nb_used;
+	uint16_t tx_id;
+	uint16_t sw_id;
+
+	sw_ring = txq->sw_ring;
+	txr = txq->tx_ring;
+
+	mask = txq->nb_tx_desc - 1;
+	sw_mask = txq->sw_size - 1;
+	tx_id = txq->tx_tail;
+	sw_id = txq->sw_tail;
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = tx_pkts[nb_tx];
+
+		if (txq->nb_free <= txq->free_thresh) {
+			nb_to_clean = DQO_TX_MULTIPLIER * txq->rs_thresh;
+			while (nb_to_clean--)
+				gve_tx_clean_dqo(txq);
+		}
+
+		if (txq->nb_free < tx_pkt->nb_segs)
+			break;
+
+		nb_used = tx_pkt->nb_segs;
+
+		do {
+			txd = &txr[tx_id];
+
+			sw_ring[sw_id] = tx_pkt;
+
+			/* fill Tx descriptor */
+			txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
+			txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
+			txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id);
+			txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO);
+
+			/* size of desc_ring and sw_ring could be different */
+			tx_id = (tx_id + 1) & mask;
+			sw_id = (sw_id + 1) & sw_mask;
+
+			tx_pkt = tx_pkt->next;
+		} while (tx_pkt);
+
+		/* fill the last descriptor with End of Packet (EOP) bit */
+		txd->pkt.end_of_packet = 1;
+
+		txq->nb_free -= nb_used;
+		txq->nb_used += nb_used;
+	}
+
+	/* update the tail pointer if any packets were processed */
+	if (nb_tx > 0) {
+		/* Request a descriptor completion on the last descriptor */
+		txq->re_cnt += nb_tx;
+		if (txq->re_cnt >= GVE_TX_MIN_RE_INTERVAL) {
+			txd = &txr[(tx_id - 1) & mask];
+			txd->pkt.report_event = true;
+			txq->re_cnt = 0;
+		}
+
+		rte_write32(tx_id, txq->qtx_tail);
+		txq->tx_tail = tx_id;
+		txq->sw_tail = sw_id;
+	}
+
+	return nb_tx;
+}
+
 static inline void
 gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq)
 {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 06/10] net/gve: support basic Rx data path for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (4 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 05/10] net/gve: support basic Tx data path " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 07/10] net/gve: support basic stats " Junfeng Guo
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add basic Rx data path support for DQO.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |   1 +
 drivers/net/gve/gve_ethdev.h |   3 +
 drivers/net/gve/gve_rx_dqo.c | 128 +++++++++++++++++++++++++++++++++++
 3 files changed, 132 insertions(+)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 37bd8da12d..a532b8a93a 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -878,6 +878,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
 		eth_dev->tx_pkt_burst = gve_tx_burst;
 	} else {
 		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
+		eth_dev->rx_pkt_burst = gve_rx_burst_dqo;
 		eth_dev->tx_pkt_burst = gve_tx_burst_dqo;
 	}
 
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 1b8f511668..617bb55a85 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -391,6 +391,9 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
 void
 gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
 
+uint16_t
+gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
 uint16_t
 gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index 7f58844839..d0eaea9c24 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -7,6 +7,134 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_rx_refill_dqo(struct gve_rx_queue *rxq)
+{
+	volatile struct gve_rx_desc_dqo *rx_buf_ring;
+	volatile struct gve_rx_desc_dqo *rx_buf_desc;
+	struct rte_mbuf *nmb[rxq->free_thresh];
+	uint16_t nb_refill = rxq->free_thresh;
+	uint16_t nb_desc = rxq->nb_rx_desc;
+	uint16_t next_avail = rxq->bufq_tail;
+	struct rte_eth_dev *dev;
+	uint64_t dma_addr;
+	uint16_t delta;
+	int i;
+
+	if (rxq->nb_rx_hold < rxq->free_thresh)
+		return;
+
+	rx_buf_ring = rxq->rx_ring;
+	delta = nb_desc - next_avail;
+	if (unlikely(delta < nb_refill)) {
+		if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta) == 0)) {
+			for (i = 0; i < delta; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rxq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->header_buf_addr = 0;
+				rx_buf_desc->buf_addr = dma_addr;
+			}
+			nb_refill -= delta;
+			next_avail = 0;
+			rxq->nb_rx_hold -= delta;
+		} else {
+			dev = &rte_eth_devices[rxq->port_id];
+			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+				    rxq->port_id, rxq->queue_id);
+			return;
+		}
+	}
+
+	if (nb_desc - next_avail >= nb_refill) {
+		if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill) == 0)) {
+			for (i = 0; i < nb_refill; i++) {
+				rx_buf_desc = &rx_buf_ring[next_avail + i];
+				rxq->sw_ring[next_avail + i] = nmb[i];
+				dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+				rx_buf_desc->header_buf_addr = 0;
+				rx_buf_desc->buf_addr = dma_addr;
+			}
+			next_avail += nb_refill;
+			rxq->nb_rx_hold -= nb_refill;
+		} else {
+			dev = &rte_eth_devices[rxq->port_id];
+			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+				    rxq->port_id, rxq->queue_id);
+		}
+	}
+
+	rte_write32(next_avail, rxq->qrx_tail);
+
+	rxq->bufq_tail = next_avail;
+}
+
+uint16_t
+gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	volatile struct gve_rx_compl_desc_dqo *rx_compl_ring;
+	volatile struct gve_rx_compl_desc_dqo *rx_desc;
+	struct gve_rx_queue *rxq;
+	struct rte_mbuf *rxm;
+	uint16_t rx_id_bufq;
+	uint16_t pkt_len;
+	uint16_t rx_id;
+	uint16_t nb_rx;
+
+	nb_rx = 0;
+	rxq = rx_queue;
+	rx_id = rxq->rx_tail;
+	rx_id_bufq = rxq->next_avail;
+	rx_compl_ring = rxq->compl_ring;
+
+	while (nb_rx < nb_pkts) {
+		rx_desc = &rx_compl_ring[rx_id];
+
+		/* check status */
+		if (rx_desc->generation != rxq->cur_gen_bit)
+			break;
+
+		if (unlikely(rx_desc->rx_error))
+			continue;
+
+		pkt_len = rx_desc->packet_len;
+
+		rx_id++;
+		if (rx_id == rxq->nb_rx_desc) {
+			rx_id = 0;
+			rxq->cur_gen_bit ^= 1;
+		}
+
+		rxm = rxq->sw_ring[rx_id_bufq];
+		rx_id_bufq++;
+		if (rx_id_bufq == rxq->nb_rx_desc)
+			rx_id_bufq = 0;
+		rxq->nb_rx_hold++;
+
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->port = rxq->port_id;
+		rxm->ol_flags = 0;
+
+		rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+		rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
+
+		rx_pkts[nb_rx++] = rxm;
+	}
+
+	if (nb_rx > 0) {
+		rxq->rx_tail = rx_id;
+		if (rx_id_bufq != rxq->next_avail)
+			rxq->next_avail = rx_id_bufq;
+
+		gve_rx_refill_dqo(rxq);
+	}
+
+	return nb_rx;
+}
+
 static inline void
 gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
 {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 07/10] net/gve: support basic stats for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (5 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 06/10] net/gve: support basic Rx " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add basic stats support for DQO.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c |  5 ++++-
 drivers/net/gve/gve_rx_dqo.c | 14 +++++++++++++-
 drivers/net/gve/gve_tx_dqo.c |  7 +++++++
 3 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index a532b8a93a..8b6861a24f 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -150,14 +150,17 @@ gve_refill_dqo(struct gve_rx_queue *rxq)
 
 	diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);
 	if (diag < 0) {
+		rxq->stats.no_mbufs_bulk++;
 		for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
 			nmb = rte_pktmbuf_alloc(rxq->mpool);
 			if (!nmb)
 				break;
 			rxq->sw_ring[i] = nmb;
 		}
-		if (i < rxq->nb_rx_desc - 1)
+		if (i < rxq->nb_rx_desc - 1) {
+			rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i;
 			return -ENOMEM;
+		}
 	}
 
 	for (i = 0; i < rxq->nb_rx_desc; i++) {
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index d0eaea9c24..1d6b21359c 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -39,6 +39,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
 			next_avail = 0;
 			rxq->nb_rx_hold -= delta;
 		} else {
+			rxq->stats.no_mbufs_bulk++;
+			rxq->stats.no_mbufs += nb_desc - next_avail;
 			dev = &rte_eth_devices[rxq->port_id];
 			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
 			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
@@ -59,6 +61,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
 			next_avail += nb_refill;
 			rxq->nb_rx_hold -= nb_refill;
 		} else {
+			rxq->stats.no_mbufs_bulk++;
+			rxq->stats.no_mbufs += nb_desc - next_avail;
 			dev = &rte_eth_devices[rxq->port_id];
 			dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
 			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
@@ -82,7 +86,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	uint16_t pkt_len;
 	uint16_t rx_id;
 	uint16_t nb_rx;
+	uint64_t bytes;
 
+	bytes = 0;
 	nb_rx = 0;
 	rxq = rx_queue;
 	rx_id = rxq->rx_tail;
@@ -96,8 +102,10 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (rx_desc->generation != rxq->cur_gen_bit)
 			break;
 
-		if (unlikely(rx_desc->rx_error))
+		if (unlikely(rx_desc->rx_error)) {
+			rxq->stats.errors++;
 			continue;
+		}
 
 		pkt_len = rx_desc->packet_len;
 
@@ -122,6 +130,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
 
 		rx_pkts[nb_rx++] = rxm;
+		bytes += pkt_len;
 	}
 
 	if (nb_rx > 0) {
@@ -130,6 +139,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			rxq->next_avail = rx_id_bufq;
 
 		gve_rx_refill_dqo(rxq);
+
+		rxq->stats.packets += nb_rx;
+		rxq->stats.bytes += bytes;
 	}
 
 	return nb_rx;
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 2ea38a8f8e..578a409616 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -81,10 +81,12 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint16_t nb_used;
 	uint16_t tx_id;
 	uint16_t sw_id;
+	uint64_t bytes;
 
 	sw_ring = txq->sw_ring;
 	txr = txq->tx_ring;
 
+	bytes = 0;
 	mask = txq->nb_tx_desc - 1;
 	sw_mask = txq->sw_size - 1;
 	tx_id = txq->tx_tail;
@@ -119,6 +121,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			tx_id = (tx_id + 1) & mask;
 			sw_id = (sw_id + 1) & sw_mask;
 
+			bytes += tx_pkt->pkt_len;
 			tx_pkt = tx_pkt->next;
 		} while (tx_pkt);
 
@@ -142,6 +145,10 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		rte_write32(tx_id, txq->qtx_tail);
 		txq->tx_tail = tx_id;
 		txq->sw_tail = sw_id;
+
+		txq->stats.packets += nb_tx;
+		txq->stats.bytes += bytes;
+		txq->stats.errors += nb_pkts - nb_tx;
 	}
 
 	return nb_tx;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 08/10] net/gve: enable Tx checksum offload for DQO
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (6 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 07/10] net/gve: support basic stats " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-04-13  6:16 ` [PATCH 09/10] net/gve: add maintainers for GVE Junfeng Guo
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Enable Tx checksum offload once any flag of L4 checksum is set.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.h | 4 ++++
 drivers/net/gve/gve_tx_dqo.c | 5 +++++
 2 files changed, 9 insertions(+)

diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 617bb55a85..4a0e860afa 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -38,6 +38,10 @@
 #define GVE_MAX_MTU	RTE_ETHER_MTU
 #define GVE_MIN_MTU	RTE_ETHER_MIN_MTU
 
+#define GVE_TX_CKSUM_OFFLOAD_MASK (		\
+		RTE_MBUF_F_TX_L4_MASK  |	\
+		RTE_MBUF_F_TX_TCP_SEG)
+
 /* A list of pages registered with the device during setup and used by a queue
  * as buffers
  */
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 578a409616..b38eeaea4b 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -78,6 +78,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint16_t mask, sw_mask;
 	uint16_t nb_to_clean;
 	uint16_t nb_tx = 0;
+	uint64_t ol_flags;
 	uint16_t nb_used;
 	uint16_t tx_id;
 	uint16_t sw_id;
@@ -104,6 +105,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		if (txq->nb_free < tx_pkt->nb_segs)
 			break;
 
+		ol_flags = tx_pkt->ol_flags;
 		nb_used = tx_pkt->nb_segs;
 
 		do {
@@ -128,6 +130,9 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		/* fill the last descriptor with End of Packet (EOP) bit */
 		txd->pkt.end_of_packet = 1;
 
+		if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK)
+			txd->pkt.checksum_offload_enable = 1;
+
 		txq->nb_free -= nb_used;
 		txq->nb_used += nb_used;
 	}
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 09/10] net/gve: add maintainers for GVE
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (7 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-05-04 11:01   ` Ferruh Yigit
  2023-04-13  6:16 ` [PATCH 10/10] net/gve: support jumbo frame for GQI Junfeng Guo
  2023-05-04 10:52 ` [PATCH 00/10] gve PMD enhancement Ferruh Yigit
  10 siblings, 1 reply; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta

Add maintainers from Google for GVE.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
---
 MAINTAINERS | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 8df23e5099..08001751b0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -713,6 +713,9 @@ F: doc/guides/nics/features/enic.ini
 
 Google Virtual Ethernet
 M: Junfeng Guo <junfeng.guo@intel.com>
+M: Jeroen de Borst <jeroendb@google.com>
+M: Rushil Gupta <rushilg@google.com>
+M: Joshua Washington <joshwash@google.com>
 F: drivers/net/gve/
 F: doc/guides/nics/gve.rst
 F: doc/guides/nics/features/gve.ini
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 10/10] net/gve: support jumbo frame for GQI
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (8 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 09/10] net/gve: add maintainers for GVE Junfeng Guo
@ 2023-04-13  6:16 ` Junfeng Guo
  2023-05-04 10:52 ` [PATCH 00/10] gve PMD enhancement Ferruh Yigit
  10 siblings, 0 replies; 14+ messages in thread
From: Junfeng Guo @ 2023-04-13  6:16 UTC (permalink / raw)
  To: qi.z.zhang, jingjing.wu, ferruh.yigit, beilei.xing
  Cc: dev, Junfeng Guo, Rushil Gupta, Joshua Washington, Jeroen de Borst

Add multi-segment support to enable GQI Rx Jumbo Frame.

Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.h |   8 ++
 drivers/net/gve/gve_rx.c     | 137 +++++++++++++++++++++++++----------
 2 files changed, 108 insertions(+), 37 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 4a0e860afa..53a75044c5 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -159,6 +159,13 @@ struct gve_tx_queue {
 	uint8_t is_gqi_qpl;
 };
 
+struct gve_rx_ctx {
+	struct rte_mbuf *mbuf_head;
+	struct rte_mbuf *mbuf_tail;
+	uint16_t total_frags;
+	bool drop_pkt;
+};
+
 struct gve_rx_queue {
 	volatile struct gve_rx_desc *rx_desc_ring;
 	volatile union gve_rx_data_slot *rx_data_ring;
@@ -167,6 +174,7 @@ struct gve_rx_queue {
 	uint64_t rx_ring_phys_addr;
 	struct rte_mbuf **sw_ring;
 	struct rte_mempool *mpool;
+	struct gve_rx_ctx ctx;
 
 	uint16_t rx_tail;
 	uint16_t nb_rx_desc;
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 3dd3f578f9..f2f6202404 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -5,6 +5,8 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+
 static inline void
 gve_rx_refill(struct gve_rx_queue *rxq)
 {
@@ -87,43 +89,72 @@ gve_rx_refill(struct gve_rx_queue *rxq)
 	}
 }
 
-uint16_t
-gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+/*
+ * This method processes a single rte_mbuf and handles packet segmentation
+ * In QPL mode it copies data from the mbuf to the gve_rx_queue.
+ */
+static void
+gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
+	    uint16_t rx_id)
 {
-	volatile struct gve_rx_desc *rxr, *rxd;
-	struct gve_rx_queue *rxq = rx_queue;
-	uint16_t rx_id = rxq->rx_tail;
-	struct rte_mbuf *rxe;
-	uint16_t nb_rx, len;
-	uint64_t bytes = 0;
+	uint16_t padding = 0;
 	uint64_t addr;
-	uint16_t i;
-
-	rxr = rxq->rx_desc_ring;
-	nb_rx = 0;
 
-	for (i = 0; i < nb_pkts; i++) {
-		rxd = &rxr[rx_id];
-		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
-			break;
-
-		if (rxd->flags_seq & GVE_RXF_ERR) {
-			rxq->stats.errors++;
-			continue;
-		}
-
-		len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
-		rxe = rxq->sw_ring[rx_id];
-		if (rxq->is_gqi_qpl) {
-			addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD;
-			rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
-				   (void *)(size_t)addr, len);
-		}
+	rxe->data_len = len;
+	if (!rxq->ctx.mbuf_head) {
+		rxq->ctx.mbuf_head = rxe;
+		rxq->ctx.mbuf_tail = rxe;
+		rxe->nb_segs = 1;
 		rxe->pkt_len = len;
 		rxe->data_len = len;
 		rxe->port = rxq->port_id;
 		rxe->ol_flags = 0;
+		padding = GVE_RX_PAD;
+	} else {
+		rxq->ctx.mbuf_head->pkt_len += len;
+		rxq->ctx.mbuf_head->nb_segs += 1;
+		rxq->ctx.mbuf_tail->next = rxe;
+		rxq->ctx.mbuf_tail = rxe;
+	}
+	if (rxq->is_gqi_qpl) {
+		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
+		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
+				    (void *)(size_t)addr, len);
+	}
+}
+
+/*
+ * This method processes a single packet fragment associated with the
+ * passed packet descriptor.
+ * This methods returns whether the fragment is the last fragment
+ * of a packet.
+ */
+static bool
+gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id)
+{
+	bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq);
+	uint16_t frag_size = rte_be_to_cpu_16(rxd->len);
+	struct gve_rx_ctx *ctx = &rxq->ctx;
+	bool is_first_frag = ctx->total_frags == 0;
+	struct rte_mbuf *rxe;
+
+	if (ctx->drop_pkt)
+		goto finish_frag;
 
+	if (rxd->flags_seq & GVE_RXF_ERR) {
+		ctx->drop_pkt = true;
+		rxq->stats.errors++;
+		goto finish_frag;
+	}
+
+	if (is_first_frag)
+		frag_size -= GVE_RX_PAD;
+
+	rxe = rxq->sw_ring[rx_id];
+	gve_rx_mbuf(rxq, rxe, frag_size, rx_id);
+	rxq->stats.bytes += frag_size;
+
+	if (is_first_frag) {
 		if (rxd->flags_seq & GVE_RXF_TCP)
 			rxe->packet_type |= RTE_PTYPE_L4_TCP;
 		if (rxd->flags_seq & GVE_RXF_UDP)
@@ -137,28 +168,60 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 			rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash);
 		}
+	}
 
-		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
+finish_frag:
+	ctx->total_frags++;
+	return is_last_frag;
+}
+
+static void
+gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+	ctx->mbuf_head = NULL;
+	ctx->mbuf_tail = NULL;
+	ctx->drop_pkt = false;
+	ctx->total_frags = 0;
+}
+
+uint16_t
+gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	volatile struct gve_rx_desc *rxr, *rxd;
+	struct gve_rx_queue *rxq = rx_queue;
+	struct gve_rx_ctx *ctx = &rxq->ctx;
+	uint16_t rx_id = rxq->rx_tail;
+	uint16_t nb_rx;
+
+	rxr = rxq->rx_desc_ring;
+	nb_rx = 0;
+
+	while (nb_rx < nb_pkts) {
+		rxd = &rxr[rx_id];
+		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
+			break;
+
+		if (gve_rx(rxq, rxd, rx_id)) {
+			if (!ctx->drop_pkt)
+				rx_pkts[nb_rx++] = ctx->mbuf_head;
+			rxq->nb_avail += ctx->total_frags;
+			gve_rx_ctx_clear(ctx);
+		}
 
 		rx_id++;
 		if (rx_id == rxq->nb_rx_desc)
 			rx_id = 0;
 
-		rx_pkts[nb_rx] = rxe;
-		bytes += len;
-		nb_rx++;
+		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
 	}
 
-	rxq->nb_avail += nb_rx;
 	rxq->rx_tail = rx_id;
 
 	if (rxq->nb_avail > rxq->free_thresh)
 		gve_rx_refill(rxq);
 
-	if (nb_rx) {
+	if (nb_rx)
 		rxq->stats.packets += nb_rx;
-		rxq->stats.bytes += bytes;
-	}
 
 	return nb_rx;
 }
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 00/10] gve PMD enhancement
  2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
                   ` (9 preceding siblings ...)
  2023-04-13  6:16 ` [PATCH 10/10] net/gve: support jumbo frame for GQI Junfeng Guo
@ 2023-05-04 10:52 ` Ferruh Yigit
  10 siblings, 0 replies; 14+ messages in thread
From: Ferruh Yigit @ 2023-05-04 10:52 UTC (permalink / raw)
  To: Junfeng Guo, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev

On 4/13/2023 7:16 AM, Junfeng Guo wrote:
> This patch set includs two main enhancements for gve PMD:
>  - support basic data path with DQO queue format
>  - support jumbo frame with GQI queue format
> 
> This patch set is based on this:
> patchwork.dpdk.org/project/dpdk/list/?series=27653&state=*
> 
> Junfeng Guo (10):
>   net/gve: add Tx queue setup for DQO
>   net/gve: add Rx queue setup for DQO
>   net/gve: support device start and close for DQO
>   net/gve: support queue release and stop for DQO
>   net/gve: support basic Tx data path for DQO
>   net/gve: support basic Rx data path for DQO
>   net/gve: support basic stats for DQO
>   net/gve: enable Tx checksum offload for DQO
>   net/gve: add maintainers for GVE
>   net/gve: support jumbo frame for GQI

Except 9/10 (maintainers file update), please see note on relevant patch
for it,
Series applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 09/10] net/gve: add maintainers for GVE
  2023-04-13  6:16 ` [PATCH 09/10] net/gve: add maintainers for GVE Junfeng Guo
@ 2023-05-04 11:01   ` Ferruh Yigit
  2023-05-05  2:16     ` Guo, Junfeng
  0 siblings, 1 reply; 14+ messages in thread
From: Ferruh Yigit @ 2023-05-04 11:01 UTC (permalink / raw)
  To: Junfeng Guo, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: dev, Rushil Gupta, Jeroen de Borst, Joshua Washington

On 4/13/2023 7:16 AM, Junfeng Guo wrote:
> Add maintainers from Google for GVE.
> 
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> Signed-off-by: Rushil Gupta <rushilg@google.com>
> ---
>  MAINTAINERS | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 8df23e5099..08001751b0 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -713,6 +713,9 @@ F: doc/guides/nics/features/enic.ini
>  
>  Google Virtual Ethernet
>  M: Junfeng Guo <junfeng.guo@intel.com>
> +M: Jeroen de Borst <jeroendb@google.com>
> +M: Rushil Gupta <rushilg@google.com>
> +M: Joshua Washington <joshwash@google.com>
>  F: drivers/net/gve/
>  F: doc/guides/nics/gve.rst
>  F: doc/guides/nics/features/gve.ini

Requested ack from new added maintainers not received (to other version
of this patch [2]). This patch is not dependency for rest of the set, so
dropping it from the set.

Also there is a standalone version of this patch [1], can you please
send a new version to standalone one, instead of including same patch in
various sets?



[1]
https://patches.dpdk.org/project/dpdk/patch/20221109072352.1387300-1-junfeng.guo@intel.com/

[2]
https://patches.dpdk.org/project/dpdk/patch/20230328094512.1796648-4-junfeng.guo@intel.com/

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH 09/10] net/gve: add maintainers for GVE
  2023-05-04 11:01   ` Ferruh Yigit
@ 2023-05-05  2:16     ` Guo, Junfeng
  0 siblings, 0 replies; 14+ messages in thread
From: Guo, Junfeng @ 2023-05-05  2:16 UTC (permalink / raw)
  To: Ferruh Yigit, Zhang, Qi Z, Wu, Jingjing, Xing, Beilei
  Cc: dev, Rushil Gupta, Jeroen de Borst, Joshua Washington



> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Thursday, May 4, 2023 19:01
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Rushil Gupta <rushilg@google.com>; Jeroen de Borst
> <jeroendb@google.com>; Joshua Washington <joshwash@google.com>
> Subject: Re: [PATCH 09/10] net/gve: add maintainers for GVE
> 
> On 4/13/2023 7:16 AM, Junfeng Guo wrote:
> > Add maintainers from Google for GVE.
> >
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> > Signed-off-by: Rushil Gupta <rushilg@google.com>
> > ---
> >  MAINTAINERS | 3 +++
> >  1 file changed, 3 insertions(+)
> >
> > diff --git a/MAINTAINERS b/MAINTAINERS
> > index 8df23e5099..08001751b0 100644
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -713,6 +713,9 @@ F: doc/guides/nics/features/enic.ini
> >
> >  Google Virtual Ethernet
> >  M: Junfeng Guo <junfeng.guo@intel.com>
> > +M: Jeroen de Borst <jeroendb@google.com>
> > +M: Rushil Gupta <rushilg@google.com>
> > +M: Joshua Washington <joshwash@google.com>
> >  F: drivers/net/gve/
> >  F: doc/guides/nics/gve.rst
> >  F: doc/guides/nics/features/gve.ini
> 
> Requested ack from new added maintainers not received (to other
> version
> of this patch [2]). This patch is not dependency for rest of the set, so
> dropping it from the set.
> 
> Also there is a standalone version of this patch [1], can you please
> send a new version to standalone one, instead of including same patch in
> various sets?
> 

Sure! Will send out a standalone version with in-reply-to [1].

Thanks for the careful review!

> 
> 
> [1]
> https://patches.dpdk.org/project/dpdk/patch/20221109072352.1387300-
> 1-junfeng.guo@intel.com/
> 
> [2]
> https://patches.dpdk.org/project/dpdk/patch/20230328094512.1796648-
> 4-junfeng.guo@intel.com/

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2023-05-05  2:16 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
2023-04-13  6:16 ` [PATCH 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-04-13  6:16 ` [PATCH 02/10] net/gve: add Rx " Junfeng Guo
2023-04-13  6:16 ` [PATCH 03/10] net/gve: support device start and close " Junfeng Guo
2023-04-13  6:16 ` [PATCH 04/10] net/gve: support queue release and stop " Junfeng Guo
2023-04-13  6:16 ` [PATCH 05/10] net/gve: support basic Tx data path " Junfeng Guo
2023-04-13  6:16 ` [PATCH 06/10] net/gve: support basic Rx " Junfeng Guo
2023-04-13  6:16 ` [PATCH 07/10] net/gve: support basic stats " Junfeng Guo
2023-04-13  6:16 ` [PATCH 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
2023-04-13  6:16 ` [PATCH 09/10] net/gve: add maintainers for GVE Junfeng Guo
2023-05-04 11:01   ` Ferruh Yigit
2023-05-05  2:16     ` Guo, Junfeng
2023-04-13  6:16 ` [PATCH 10/10] net/gve: support jumbo frame for GQI Junfeng Guo
2023-05-04 10:52 ` [PATCH 00/10] gve PMD enhancement Ferruh Yigit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.