All of lore.kernel.org
 help / color / mirror / Atom feed
From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com,
	ferruh.yigit@amd.com, beilei.xing@intel.com
Cc: dev@dpdk.org, xiaoyun.li@intel.com, helin.zhang@intel.com,
	Junfeng Guo <junfeng.guo@intel.com>,
	Rushil Gupta <rushilg@google.com>,
	Jeroen de Borst <jeroendb@google.com>
Subject: [RFC v3 09/10] net/gve: support jumbo frame for GQI
Date: Fri, 17 Feb 2023 15:32:27 +0800	[thread overview]
Message-ID: <20230217073228.340815-10-junfeng.guo@intel.com> (raw)
In-Reply-To: <20230217073228.340815-1-junfeng.guo@intel.com>

Add multi-segment support to enable GQI Rx Jumbo Frame.

Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.h |   8 ++
 drivers/net/gve/gve_rx.c     | 137 +++++++++++++++++++++++++----------
 2 files changed, 108 insertions(+), 37 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index bca6e86ef0..02b997312c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -142,6 +142,13 @@ struct gve_tx_queue {
 	uint8_t is_gqi_qpl;
 };
 
+struct gve_rx_ctx {
+	struct rte_mbuf *mbuf_head;
+	struct rte_mbuf *mbuf_tail;
+	uint16_t total_frags;
+	bool drop_pkt;
+};
+
 struct gve_rx_queue {
 	volatile struct gve_rx_desc *rx_desc_ring;
 	volatile union gve_rx_data_slot *rx_data_ring;
@@ -150,6 +157,7 @@ struct gve_rx_queue {
 	uint64_t rx_ring_phys_addr;
 	struct rte_mbuf **sw_ring;
 	struct rte_mempool *mpool;
+	struct gve_rx_ctx ctx;
 
 	uint16_t rx_tail;
 	uint16_t nb_rx_desc;
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index e264bcadad..ecef0c4a86 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -5,6 +5,8 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+
 static inline void
 gve_rx_refill(struct gve_rx_queue *rxq)
 {
@@ -82,43 +84,72 @@ gve_rx_refill(struct gve_rx_queue *rxq)
 	}
 }
 
-uint16_t
-gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+/*
+ * This method processes a single rte_mbuf and handles packet segmentation
+ * In QPL mode it copies data from the mbuf to the gve_rx_queue.
+ */
+static void
+gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
+	    uint16_t rx_id)
 {
-	volatile struct gve_rx_desc *rxr, *rxd;
-	struct gve_rx_queue *rxq = rx_queue;
-	uint16_t rx_id = rxq->rx_tail;
-	struct rte_mbuf *rxe;
-	uint16_t nb_rx, len;
-	uint64_t bytes = 0;
+	uint16_t padding = 0;
 	uint64_t addr;
-	uint16_t i;
-
-	rxr = rxq->rx_desc_ring;
-	nb_rx = 0;
 
-	for (i = 0; i < nb_pkts; i++) {
-		rxd = &rxr[rx_id];
-		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
-			break;
-
-		if (rxd->flags_seq & GVE_RXF_ERR) {
-			rxq->errors++;
-			continue;
-		}
-
-		len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
-		rxe = rxq->sw_ring[rx_id];
-		if (rxq->is_gqi_qpl) {
-			addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD;
-			rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
-				   (void *)(size_t)addr, len);
-		}
+	rxe->data_len = len;
+	if (!rxq->ctx.mbuf_head) {
+		rxq->ctx.mbuf_head = rxe;
+		rxq->ctx.mbuf_tail = rxe;
+		rxe->nb_segs = 1;
 		rxe->pkt_len = len;
 		rxe->data_len = len;
 		rxe->port = rxq->port_id;
 		rxe->ol_flags = 0;
+		padding = GVE_RX_PAD;
+	} else {
+		rxq->ctx.mbuf_head->pkt_len += len;
+		rxq->ctx.mbuf_head->nb_segs += 1;
+		rxq->ctx.mbuf_tail->next = rxe;
+		rxq->ctx.mbuf_tail = rxe;
+	}
+	if (rxq->is_gqi_qpl) {
+		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
+		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
+				    (void *)(size_t)addr, len);
+	}
+}
+
+/*
+ * This method processes a single packet fragment associated with the
+ * passed packet descriptor.
+ * This methods returns whether the fragment is the last fragment
+ * of a packet.
+ */
+static bool
+gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id)
+{
+	bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq);
+	uint16_t frag_size = rte_be_to_cpu_16(rxd->len);
+	struct gve_rx_ctx *ctx = &rxq->ctx;
+	bool is_first_frag = ctx->total_frags == 0;
+	struct rte_mbuf *rxe;
+
+	if (ctx->drop_pkt)
+		goto finish_frag;
 
+	if (rxd->flags_seq & GVE_RXF_ERR) {
+		ctx->drop_pkt = true;
+		rxq->errors++;
+		goto finish_frag;
+	}
+
+	if (is_first_frag)
+		frag_size -= GVE_RX_PAD;
+
+	rxe = rxq->sw_ring[rx_id];
+	gve_rx_mbuf(rxq, rxe, frag_size, rx_id);
+	rxq->bytes += frag_size;
+
+	if (is_first_frag) {
 		if (rxd->flags_seq & GVE_RXF_TCP)
 			rxe->packet_type |= RTE_PTYPE_L4_TCP;
 		if (rxd->flags_seq & GVE_RXF_UDP)
@@ -132,28 +163,60 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 			rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash);
 		}
+	}
 
-		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
+finish_frag:
+	ctx->total_frags++;
+	return is_last_frag;
+}
+
+static void
+gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+	ctx->mbuf_head = NULL;
+	ctx->mbuf_tail = NULL;
+	ctx->drop_pkt = false;
+	ctx->total_frags = 0;
+}
+
+uint16_t
+gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	volatile struct gve_rx_desc *rxr, *rxd;
+	struct gve_rx_queue *rxq = rx_queue;
+	struct gve_rx_ctx *ctx = &rxq->ctx;
+	uint16_t rx_id = rxq->rx_tail;
+	uint16_t nb_rx;
+
+	rxr = rxq->rx_desc_ring;
+	nb_rx = 0;
+
+	while (nb_rx < nb_pkts) {
+		rxd = &rxr[rx_id];
+		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
+			break;
+
+		if (gve_rx(rxq, rxd, rx_id)) {
+			if (!ctx->drop_pkt)
+				rx_pkts[nb_rx++] = ctx->mbuf_head;
+			rxq->nb_avail += ctx->total_frags;
+			gve_rx_ctx_clear(ctx);
+		}
 
 		rx_id++;
 		if (rx_id == rxq->nb_rx_desc)
 			rx_id = 0;
 
-		rx_pkts[nb_rx] = rxe;
-		bytes += len;
-		nb_rx++;
+		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
 	}
 
-	rxq->nb_avail += nb_rx;
 	rxq->rx_tail = rx_id;
 
 	if (rxq->nb_avail > rxq->free_thresh)
 		gve_rx_refill(rxq);
 
-	if (nb_rx) {
+	if (nb_rx)
 		rxq->packets += nb_rx;
-		rxq->bytes += bytes;
-	}
 
 	return nb_rx;
 }
-- 
2.34.1


  parent reply	other threads:[~2023-02-17  7:39 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-18  2:53 [RFC 0/8] gve PMD enhancement Junfeng Guo
2023-01-18  2:53 ` [RFC 1/8] net/gve: add Rx queue setup for DQO Junfeng Guo
2023-01-18  2:53 ` [RFC 2/8] net/gve: support device start and close " Junfeng Guo
2023-01-18  2:53 ` [RFC 3/8] net/gve: support queue release and stop " Junfeng Guo
2023-01-18  2:53 ` [RFC 4/8] net/gve: support basic Tx data path " Junfeng Guo
2023-01-18  2:53 ` [RFC 5/8] net/gve: support basic Rx " Junfeng Guo
2023-01-18  2:53 ` [RFC 6/8] net/gve: support basic stats " Junfeng Guo
2023-01-18  2:53 ` [RFC 7/8] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-18  2:53 ` [RFC 8/8] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-01-25 13:37 ` [RFC 0/8] gve PMD enhancement Li, Xiaoyun
2023-01-30  6:26 ` [RFC v2 0/9] " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 1/9] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-01-30  6:26   ` [RFC v2 2/9] net/gve: add Rx " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 3/9] net/gve: support device start and close " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 4/9] net/gve: support queue release and stop " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 5/9] net/gve: support basic Tx data path " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 6/9] net/gve: support basic Rx " Junfeng Guo
2023-01-30 18:32     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 7/9] net/gve: support basic stats " Junfeng Guo
2023-01-30 18:27     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 8/9] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-30  6:26   ` [RFC v2 9/9] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-02-17  7:32   ` [RFC v3 00/10] gve PMD enhancement Junfeng Guo
2023-02-17  7:32     ` [RFC v3 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-02-17  7:32     ` [RFC v3 02/10] net/gve: add Rx " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 03/10] net/gve: support device start and close " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 04/10] net/gve: support queue release and stop " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 05/10] net/gve: support basic Tx data path " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 06/10] net/gve: support basic Rx " Junfeng Guo
2023-02-17 15:17       ` Honnappa Nagarahalli
2023-02-23  5:32         ` Guo, Junfeng
2023-02-17  7:32     ` [RFC v3 07/10] net/gve: support basic stats " Junfeng Guo
2023-02-17 15:28       ` Honnappa Nagarahalli
2023-02-17  7:32     ` [RFC v3 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
2023-02-17  7:32     ` Junfeng Guo [this message]
2023-02-17  7:32     ` [RFC v3 10/10] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230217073228.340815-10-junfeng.guo@intel.com \
    --to=junfeng.guo@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=helin.zhang@intel.com \
    --cc=jeroendb@google.com \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=rushilg@google.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.