All of lore.kernel.org
 help / color / mirror / Atom feed
From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com
Cc: ferruh.yigit@xilinx.com, dev@dpdk.org, xiaoyun.li@intel.com,
	awogbemila@google.com, bruce.richardson@intel.com,
	junfeng.guo@intel.com
Subject: [PATCH v2 07/10] net/gve: add Rx/Tx support
Date: Mon, 29 Aug 2022 16:41:24 +0800	[thread overview]
Message-ID: <20220829084127.934183-8-junfeng.guo@intel.com> (raw)
In-Reply-To: <20220829084127.934183-1-junfeng.guo@intel.com>

Add Rx/Tx of GQI_QPL queue format and GQI_RDA queue format.

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/gve/gve.h        |  17 ++
 drivers/net/gve/gve_ethdev.c |   5 +
 drivers/net/gve/gve_rx.c     | 143 +++++++++++
 drivers/net/gve/gve_tx.c     | 452 +++++++++++++++++++++++++++++++++++
 4 files changed, 617 insertions(+)

diff --git a/drivers/net/gve/gve.h b/drivers/net/gve/gve.h
index a53a852a5f..7f4d0e37f3 100644
--- a/drivers/net/gve/gve.h
+++ b/drivers/net/gve/gve.h
@@ -25,6 +25,7 @@
 
 #define GVE_DEFAULT_RX_FREE_THRESH  512
 #define GVE_DEFAULT_TX_FREE_THRESH  256
+#define GVE_TX_MAX_FREE_SZ          512
 
 /* PTYPEs are always 10 bits. */
 #define GVE_NUM_PTYPES	1024
@@ -45,6 +46,18 @@ union gve_tx_desc {
 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 };
 
+/* Offload features */
+union gve_tx_offload {
+	uint64_t data;
+	struct {
+		uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+		uint64_t l3_len:9; /* L3 (IP) Header Length. */
+		uint64_t l4_len:8; /* L4 Header Length. */
+		uint64_t tso_segsz:16; /* TCP TSO segment size */
+		/* uint64_t unused : 24; */
+	};
+};
+
 struct gve_tx_iovec {
 	uint32_t iov_base; /* offset in fifo */
 	uint32_t iov_len;
@@ -298,4 +311,8 @@ void gve_stop_tx_queues(struct rte_eth_dev *dev);
 
 void gve_stop_rx_queues(struct rte_eth_dev *dev);
 
+uint16_t gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
 #endif /* _GVE_H_ */
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 5201398664..5ebe2c30ea 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -583,6 +583,11 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
 	if (err)
 		return err;
 
+	if (gve_is_gqi(priv)) {
+		eth_dev->rx_pkt_burst = gve_rx_burst;
+		eth_dev->tx_pkt_burst = gve_tx_burst;
+	}
+
 	eth_dev->data->mac_addrs = rte_zmalloc("gve_mac", sizeof(struct rte_ether_addr), 0);
 	if (!eth_dev->data->mac_addrs) {
 		PMD_INIT_LOG(ERR, "Failed to allocate memory to store mac address");
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 7298b4cc86..8f560ae592 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -5,6 +5,149 @@
 #include "gve.h"
 #include "gve_adminq.h"
 
+static inline void
+gve_rx_refill(struct gve_rx_queue *rxq)
+{
+	uint16_t mask = rxq->nb_rx_desc - 1;
+	uint16_t idx = rxq->next_avail & mask;
+	uint32_t next_avail = rxq->next_avail;
+	uint16_t nb_alloc, i;
+	struct rte_mbuf *nmb;
+	int diag;
+
+	/* wrap around */
+	nb_alloc = rxq->nb_rx_desc - idx;
+	if (nb_alloc <= rxq->nb_avail) {
+		diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[idx], nb_alloc);
+		if (diag < 0) {
+			for (i = 0; i < nb_alloc; i++) {
+				nmb = rte_pktmbuf_alloc(rxq->mpool);
+				if (!nmb)
+					break;
+				rxq->sw_ring[idx + i] = nmb;
+			}
+			if (i != nb_alloc)
+				nb_alloc = i;
+		}
+		rxq->nb_avail -= nb_alloc;
+		next_avail += nb_alloc;
+
+		/* queue page list mode doesn't need real refill. */
+		if (rxq->is_gqi_qpl) {
+			idx += nb_alloc;
+		} else {
+			for (i = 0; i < nb_alloc; i++) {
+				nmb = rxq->sw_ring[idx];
+				rxq->rx_data_ring[idx].addr =
+					rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
+				idx++;
+			}
+		}
+		if (idx == rxq->nb_rx_desc)
+			idx = 0;
+	}
+
+	if (rxq->nb_avail > 0) {
+		nb_alloc = rxq->nb_avail;
+		if (rxq->nb_rx_desc < idx + rxq->nb_avail)
+			nb_alloc = rxq->nb_rx_desc - idx;
+		diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[idx], nb_alloc);
+		if (diag < 0) {
+			for (i = 0; i < nb_alloc; i++) {
+				nmb = rte_pktmbuf_alloc(rxq->mpool);
+				if (!nmb)
+					break;
+				rxq->sw_ring[idx + i] = nmb;
+			}
+			nb_alloc = i;
+		}
+		rxq->nb_avail -= nb_alloc;
+		next_avail += nb_alloc;
+
+		if (!rxq->is_gqi_qpl) {
+			for (i = 0; i < nb_alloc; i++) {
+				nmb = rxq->sw_ring[idx];
+				rxq->rx_data_ring[idx].addr =
+					rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));
+				idx++;
+			}
+		}
+	}
+
+	if (next_avail != rxq->next_avail) {
+		rte_write32(rte_cpu_to_be_32(next_avail), rxq->qrx_tail);
+		rxq->next_avail = next_avail;
+	}
+}
+
+uint16_t
+gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	volatile struct gve_rx_desc *rxr, *rxd;
+	struct gve_rx_queue *rxq = rx_queue;
+	uint16_t rx_id = rxq->rx_tail;
+	struct rte_mbuf *rxe;
+	uint16_t nb_rx, len;
+	uint64_t addr;
+
+	rxr = rxq->rx_desc_ring;
+
+	for (nb_rx = 0; nb_rx < nb_pkts; nb_rx++) {
+		rxd = &rxr[rx_id];
+		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
+			break;
+
+		if (rxd->flags_seq & GVE_RXF_ERR)
+			continue;
+
+		len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
+		rxe = rxq->sw_ring[rx_id];
+		rxe->data_off = RTE_PKTMBUF_HEADROOM;
+		if (rxq->is_gqi_qpl) {
+			addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD;
+			rte_memcpy((void *)((uint64_t)rxe->buf_addr + rxe->data_off),
+				   (void *)addr, len);
+		}
+		rxe->nb_segs = 1;
+		rxe->next = NULL;
+		rxe->pkt_len = len;
+		rxe->data_len = len;
+		rxe->port = rxq->port_id;
+		rxe->packet_type = 0;
+		rxe->ol_flags = 0;
+
+		if (rxd->flags_seq & GVE_RXF_TCP)
+			rxe->packet_type |= RTE_PTYPE_L4_TCP;
+		if (rxd->flags_seq & GVE_RXF_UDP)
+			rxe->packet_type |= RTE_PTYPE_L4_UDP;
+		if (rxd->flags_seq & GVE_RXF_IPV4)
+			rxe->packet_type |= RTE_PTYPE_L3_IPV4;
+		if (rxd->flags_seq & GVE_RXF_IPV6)
+			rxe->packet_type |= RTE_PTYPE_L3_IPV6;
+
+		if (gve_needs_rss(rxd->flags_seq)) {
+			rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+			rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash);
+		}
+
+		rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno);
+
+		rx_id++;
+		if (rx_id == rxq->nb_rx_desc)
+			rx_id = 0;
+
+		rx_pkts[nb_rx] = rxe;
+	}
+
+	rxq->nb_avail += nb_rx;
+	rxq->rx_tail = rx_id;
+
+	if (rxq->nb_avail > rxq->free_thresh)
+		gve_rx_refill(rxq);
+
+	return nb_rx;
+}
+
 static inline void
 gve_reset_rxq(struct gve_rx_queue *rxq)
 {
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 947c9d1627..2dc3411672 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -5,6 +5,458 @@
 #include "gve.h"
 #include "gve_adminq.h"
 
+static inline void
+gve_free_bulk_mbuf(struct rte_mbuf **txep, int num)
+{
+	struct rte_mbuf *m, *free[GVE_TX_MAX_FREE_SZ];
+	int nb_free = 0;
+	int i, s;
+
+	if (unlikely(num == 0))
+		return;
+
+	/* Find the 1st mbuf which needs to be free */
+	for (s = 0; s < num; s++) {
+		if (txep[s] != NULL) {
+			m = rte_pktmbuf_prefree_seg(txep[s]);
+			if (m != NULL)
+				break;
+			}
+	}
+
+	if (s == num)
+		return;
+
+	free[0] = m;
+	nb_free = 1;
+	for (i = s + 1; i < num; i++) {
+		if (likely(txep[i] != NULL)) {
+			m = rte_pktmbuf_prefree_seg(txep[i]);
+			if (likely(m != NULL)) {
+				if (likely(m->pool == free[0]->pool)) {
+					free[nb_free++] = m;
+				} else {
+					rte_mempool_put_bulk(free[0]->pool, (void *)free, nb_free);
+					free[0] = m;
+					nb_free = 1;
+				}
+			}
+			txep[i] = NULL;
+		}
+	}
+	rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+}
+
+static inline void
+gve_tx_clean(struct gve_tx_queue *txq)
+{
+	uint16_t mask = txq->nb_tx_desc - 1;
+	uint32_t start = txq->next_to_clean & mask;
+	uint32_t ntc, nb_clean, i;
+	struct gve_tx_iovec *iov;
+
+	ntc = rte_be_to_cpu_32(rte_read32(txq->qtx_head));
+	ntc = ntc & mask;
+
+	if (ntc == start)
+		return;
+
+	/* if wrap around, free twice. */
+	if (ntc < start) {
+		nb_clean = txq->nb_tx_desc - start;
+		if (nb_clean > GVE_TX_MAX_FREE_SZ)
+			nb_clean = GVE_TX_MAX_FREE_SZ;
+		if (txq->is_gqi_qpl) {
+			for (i = start; i < start + nb_clean; i++) {
+				iov = &txq->iov_ring[i];
+				txq->fifo_avail += iov->iov_len;
+				iov->iov_base = 0;
+				iov->iov_len = 0;
+			}
+		} else {
+			gve_free_bulk_mbuf(&txq->sw_ring[start], nb_clean);
+		}
+		txq->nb_free += nb_clean;
+		start += nb_clean;
+		if (start == txq->nb_tx_desc)
+			start = 0;
+		txq->next_to_clean += nb_clean;
+	}
+
+	if (ntc > start) {
+		nb_clean = ntc - start;
+		if (nb_clean > GVE_TX_MAX_FREE_SZ)
+			nb_clean = GVE_TX_MAX_FREE_SZ;
+		if (txq->is_gqi_qpl) {
+			for (i = start; i < start + nb_clean; i++) {
+				iov = &txq->iov_ring[i];
+				txq->fifo_avail += iov->iov_len;
+				iov->iov_base = 0;
+				iov->iov_len = 0;
+			}
+		} else {
+			gve_free_bulk_mbuf(&txq->sw_ring[start], nb_clean);
+		}
+		txq->nb_free += nb_clean;
+		txq->next_to_clean += nb_clean;
+	}
+}
+
+static inline void
+gve_tx_clean_swr_qpl(struct gve_tx_queue *txq)
+{
+	uint32_t start = txq->sw_ntc;
+	uint32_t ntc, nb_clean;
+
+	ntc = txq->sw_tail;
+
+	if (ntc == start)
+		return;
+
+	/* if wrap around, free twice. */
+	if (ntc < start) {
+		nb_clean = txq->nb_tx_desc - start;
+		if (nb_clean > GVE_TX_MAX_FREE_SZ)
+			nb_clean = GVE_TX_MAX_FREE_SZ;
+		gve_free_bulk_mbuf(&txq->sw_ring[start], nb_clean);
+
+		txq->sw_nb_free += nb_clean;
+		start += nb_clean;
+		if (start == txq->nb_tx_desc)
+			start = 0;
+		txq->sw_ntc = start;
+	}
+
+	if (ntc > start) {
+		nb_clean = ntc - start;
+		if (nb_clean > GVE_TX_MAX_FREE_SZ)
+			nb_clean = GVE_TX_MAX_FREE_SZ;
+		gve_free_bulk_mbuf(&txq->sw_ring[start], nb_clean);
+		txq->sw_nb_free += nb_clean;
+		start += nb_clean;
+		txq->sw_ntc = start;
+	}
+}
+
+static inline void
+gve_tx_fill_pkt_desc(volatile union gve_tx_desc *desc, struct rte_mbuf *mbuf,
+		     uint8_t desc_cnt, uint16_t len, uint64_t addr)
+{
+	uint64_t csum_l4 = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+	uint8_t l4_csum_offset = 0;
+	uint8_t l4_hdr_offset = 0;
+
+	if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+		csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
+
+	switch (csum_l4) {
+	case RTE_MBUF_F_TX_TCP_CKSUM:
+		l4_csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+		l4_hdr_offset = mbuf->l2_len + mbuf->l3_len;
+		break;
+	case RTE_MBUF_F_TX_UDP_CKSUM:
+		l4_csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+		l4_hdr_offset = mbuf->l2_len + mbuf->l3_len;
+		break;
+	case RTE_MBUF_F_TX_SCTP_CKSUM:
+		l4_csum_offset = offsetof(struct rte_sctp_hdr, cksum);
+		l4_hdr_offset = mbuf->l2_len + mbuf->l3_len;
+		break;
+	}
+
+	if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
+		desc->pkt.l4_csum_offset = l4_csum_offset >> 1;
+		desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
+	} else if (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+		desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
+		desc->pkt.l4_csum_offset = l4_csum_offset >> 1;
+		desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
+	} else {
+		desc->pkt.type_flags = GVE_TXD_STD;
+		desc->pkt.l4_csum_offset = 0;
+		desc->pkt.l4_hdr_offset = 0;
+	}
+	desc->pkt.desc_cnt = desc_cnt;
+	desc->pkt.len = rte_cpu_to_be_16(mbuf->pkt_len);
+	desc->pkt.seg_len = rte_cpu_to_be_16(len);
+	desc->pkt.seg_addr = rte_cpu_to_be_64(addr);
+}
+
+static inline void
+gve_tx_fill_seg_desc(volatile union gve_tx_desc *desc, uint64_t ol_flags,
+		      union gve_tx_offload tx_offload,
+		      uint16_t len, uint64_t addr)
+{
+	desc->seg.type_flags = GVE_TXD_SEG;
+	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		if (ol_flags & RTE_MBUF_F_TX_IPV6)
+			desc->seg.type_flags |= GVE_TXSF_IPV6;
+		desc->seg.l3_offset = tx_offload.l2_len >> 1;
+		desc->seg.mss = rte_cpu_to_be_16(tx_offload.tso_segsz);
+	}
+	desc->seg.seg_len = rte_cpu_to_be_16(len);
+	desc->seg.seg_addr = rte_cpu_to_be_64(addr);
+}
+
+static inline bool
+is_fifo_avail(struct gve_tx_queue *txq, uint16_t len)
+{
+	if (txq->fifo_avail < len)
+		return false;
+	/* Don't split segment. */
+	if (txq->fifo_head + len > txq->fifo_size &&
+	    txq->fifo_size - txq->fifo_head + len > txq->fifo_avail)
+		return false;
+	return true;
+}
+static inline uint64_t
+gve_tx_alloc_from_fifo(struct gve_tx_queue *txq, uint16_t tx_id, uint16_t len)
+{
+	uint32_t head = txq->fifo_head;
+	uint32_t size = txq->fifo_size;
+	struct gve_tx_iovec *iov;
+	uint32_t aligned_head;
+	uint32_t iov_len = 0;
+	uint64_t fifo_addr;
+
+	iov = &txq->iov_ring[tx_id];
+
+	/* Don't split segment */
+	if (head + len > size) {
+		iov_len += (size - head);
+		head = 0;
+	}
+
+	fifo_addr = head;
+	iov_len += len;
+	iov->iov_base = head;
+
+	/* Re-align to a cacheline for next head */
+	head += len;
+	aligned_head = RTE_ALIGN(head, RTE_CACHE_LINE_SIZE);
+	iov_len += (aligned_head - head);
+	iov->iov_len = iov_len;
+
+	if (aligned_head == txq->fifo_size)
+		aligned_head = 0;
+	txq->fifo_head = aligned_head;
+	txq->fifo_avail -= iov_len;
+
+	return fifo_addr;
+}
+
+static inline uint16_t
+gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	union gve_tx_offload tx_offload = {0};
+	volatile union gve_tx_desc *txr, *txd;
+	struct gve_tx_queue *txq = tx_queue;
+	struct rte_mbuf **sw_ring = txq->sw_ring;
+	uint16_t mask = txq->nb_tx_desc - 1;
+	uint16_t tx_id = txq->tx_tail & mask;
+	uint64_t ol_flags, addr, fifo_addr;
+	uint32_t tx_tail = txq->tx_tail;
+	struct rte_mbuf *tx_pkt, *first;
+	uint16_t sw_id = txq->sw_tail;
+	uint16_t nb_used, i;
+	uint16_t nb_tx = 0;
+	uint32_t hlen;
+
+	txr = txq->tx_desc_ring;
+
+	if (txq->nb_free < txq->free_thresh || txq->fifo_avail == 0)
+		gve_tx_clean(txq);
+
+	if (txq->sw_nb_free < txq->free_thresh)
+		gve_tx_clean_swr_qpl(txq);
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = *tx_pkts++;
+		ol_flags = tx_pkt->ol_flags;
+
+		if (txq->sw_nb_free < tx_pkt->nb_segs) {
+			gve_tx_clean_swr_qpl(txq);
+			if (txq->sw_nb_free < tx_pkt->nb_segs)
+				goto end_of_tx;
+		}
+
+		/* Even for multi-segs, use 1 qpl buf for data */
+		nb_used = 1;
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			nb_used++;
+
+		if (txq->nb_free < nb_used)
+			goto end_of_tx;
+
+		tx_offload.l2_len = tx_pkt->l2_len;
+		tx_offload.l3_len = tx_pkt->l3_len;
+		tx_offload.l4_len = tx_pkt->l4_len;
+		tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+		first = tx_pkt;
+		txd = &txr[tx_id];
+		hlen = ol_flags & RTE_MBUF_F_TX_TCP_SEG ?
+			(uint32_t)(tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len) :
+			tx_pkt->pkt_len;
+
+		sw_ring[sw_id] = tx_pkt;
+		if (!is_fifo_avail(txq, hlen)) {
+			gve_tx_clean(txq);
+			if (!is_fifo_avail(txq, hlen))
+				goto end_of_tx;
+		}
+		addr = (uint64_t)(tx_pkt->buf_addr) + tx_pkt->data_off;
+		fifo_addr = gve_tx_alloc_from_fifo(txq, tx_id, hlen);
+
+		/* For TSO, check if there's enough fifo space for data first */
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+			if (!is_fifo_avail(txq, tx_pkt->pkt_len - hlen)) {
+				gve_tx_clean(txq);
+				if (!is_fifo_avail(txq, tx_pkt->pkt_len - hlen))
+					goto end_of_tx;
+			}
+		}
+		if (tx_pkt->nb_segs == 1 || ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			rte_memcpy((void *)(fifo_addr + txq->fifo_base), (void *)addr, hlen);
+		else
+			rte_pktmbuf_read(tx_pkt, 0, hlen, (void *)(fifo_addr + txq->fifo_base));
+		gve_tx_fill_pkt_desc(txd, tx_pkt, nb_used, hlen, fifo_addr);
+
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+			tx_id = (tx_id + 1) & mask;
+			txd = &txr[tx_id];
+			addr = (uint64_t)(tx_pkt->buf_addr) + tx_pkt->data_off + hlen;
+			fifo_addr = gve_tx_alloc_from_fifo(txq, tx_id, tx_pkt->pkt_len - hlen);
+			if (tx_pkt->nb_segs == 1)
+				rte_memcpy((void *)(fifo_addr + txq->fifo_base), (void *)addr,
+					   tx_pkt->pkt_len - hlen);
+			else
+				rte_pktmbuf_read(tx_pkt, hlen, tx_pkt->pkt_len - hlen,
+						 (void *)(fifo_addr + txq->fifo_base));
+
+			gve_tx_fill_seg_desc(txd, ol_flags, tx_offload,
+					     tx_pkt->pkt_len - hlen, fifo_addr);
+		}
+
+		/* record mbuf in sw_ring for free */
+		for (i = 1; i < first->nb_segs; i++) {
+			sw_id = (sw_id + 1) & mask;
+			tx_pkt = tx_pkt->next;
+			sw_ring[sw_id] = tx_pkt;
+		}
+
+		sw_id = (sw_id + 1) & mask;
+		tx_id = (tx_id + 1) & mask;
+
+		txq->nb_free -= nb_used;
+		txq->sw_nb_free -= first->nb_segs;
+		tx_tail += nb_used;
+	}
+
+end_of_tx:
+	if (nb_tx) {
+		rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
+		txq->tx_tail = tx_tail;
+		txq->sw_tail = sw_id;
+	}
+
+	return nb_tx;
+}
+
+static inline uint16_t
+gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	union gve_tx_offload tx_offload = {0};
+	volatile union gve_tx_desc *txr, *txd;
+	struct gve_tx_queue *txq = tx_queue;
+	struct rte_mbuf **sw_ring = txq->sw_ring;
+	uint16_t mask = txq->nb_tx_desc - 1;
+	uint16_t tx_id = txq->tx_tail & mask;
+	uint32_t tx_tail = txq->tx_tail;
+	struct rte_mbuf *tx_pkt, *first;
+	uint16_t nb_used, hlen, i;
+	uint64_t ol_flags, addr;
+	uint16_t nb_tx = 0;
+
+	txr = txq->tx_desc_ring;
+
+	if (txq->nb_free < txq->free_thresh)
+		gve_tx_clean(txq);
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = *tx_pkts++;
+		ol_flags = tx_pkt->ol_flags;
+
+		nb_used = tx_pkt->nb_segs;
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			nb_used++;
+
+		if (txq->nb_free < nb_used)
+			goto end_of_tx;
+
+		tx_offload.l2_len = tx_pkt->l2_len;
+		tx_offload.l3_len = tx_pkt->l3_len;
+		tx_offload.l4_len = tx_pkt->l4_len;
+		tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+		first = tx_pkt;
+		txd = &txr[tx_id];
+
+		hlen = ol_flags & RTE_MBUF_F_TX_TCP_SEG ?
+			(uint32_t)(tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len) :
+			tx_pkt->pkt_len;
+		/*
+		 * if tso, the driver needs to fill 2 descs for 1 mbuf
+		 * so only put this mbuf into the 1st tx entry in sw ring
+		 */
+		sw_ring[tx_id] = tx_pkt;
+		addr = rte_mbuf_data_iova(tx_pkt);
+		gve_tx_fill_pkt_desc(txd, tx_pkt, nb_used, hlen, addr);
+
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+			tx_id = (tx_id + 1) & mask;
+			txd = &txr[tx_id];
+			addr = rte_mbuf_data_iova(tx_pkt) + hlen;
+			gve_tx_fill_seg_desc(txd, ol_flags, tx_offload,
+					     tx_pkt->data_len - hlen, addr);
+		}
+
+		for (i = 1; i < first->nb_segs; i++) {
+			tx_id = (tx_id + 1) & mask;
+			txd = &txr[tx_id];
+			tx_pkt = tx_pkt->next;
+			sw_ring[tx_id] = tx_pkt;
+			addr = rte_mbuf_data_iova(tx_pkt);
+			gve_tx_fill_seg_desc(txd, ol_flags, tx_offload,
+					     tx_pkt->data_len, addr);
+		}
+		tx_id = (tx_id + 1) & mask;
+
+		txq->nb_free -= nb_used;
+		tx_tail += nb_used;
+	}
+
+end_of_tx:
+	if (nb_tx) {
+		rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
+		txq->tx_tail = tx_tail;
+	}
+
+	return nb_tx;
+}
+
+uint16_t
+gve_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct gve_tx_queue *txq = tx_queue;
+
+	if (txq->is_gqi_qpl)
+		return gve_tx_burst_qpl(tx_queue, tx_pkts, nb_pkts);
+
+	return gve_tx_burst_ra(tx_queue, tx_pkts, nb_pkts);
+}
+
 static inline void
 gve_reset_txq(struct gve_tx_queue *txq)
 {
-- 
2.34.1


  parent reply	other threads:[~2022-08-29  8:42 UTC|newest]

Thread overview: 192+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-29 19:30 [PATCH 00/10] introduce GVE PMD Xiaoyun Li
2022-07-29 19:30 ` [PATCH 01/10] net/gve: introduce GVE PMD base code Xiaoyun Li
2022-07-29 22:42   ` Stephen Hemminger
2022-07-29 22:45     ` Stephen Hemminger
2022-08-23  8:44       ` Guo, Junfeng
2022-08-29  8:41   ` [PATCH v2 00/10] introduce GVE PMD Junfeng Guo
2022-08-29  8:41     ` [PATCH v2 01/10] net/gve: introduce GVE PMD base code Junfeng Guo
2022-09-01 17:19       ` Ferruh Yigit
2022-09-01 18:23         ` Stephen Hemminger
2022-09-01 20:49           ` Thomas Monjalon
2022-09-06  9:31             ` Guo, Junfeng
2022-09-14 10:38               ` Thomas Monjalon
2022-08-29  8:41     ` [PATCH v2 02/10] net/gve: add logs and OS specific implementation Junfeng Guo
2022-09-01 17:20       ` Ferruh Yigit
2022-09-07  6:58         ` Guo, Junfeng
2022-09-07 11:16           ` Ferruh Yigit
2022-09-08  8:09             ` Guo, Junfeng
2022-08-29  8:41     ` [PATCH v2 03/10] net/gve: support device initialization Junfeng Guo
2022-09-01 17:21       ` Ferruh Yigit
2022-09-23  9:38         ` Guo, Junfeng
2022-09-01 17:22       ` Ferruh Yigit
2022-08-29  8:41     ` [PATCH v2 04/10] net/gve: add link update support Junfeng Guo
2022-09-01 17:23       ` Ferruh Yigit
2022-09-23  9:38         ` Guo, Junfeng
2022-08-29  8:41     ` [PATCH v2 05/10] net/gve: add MTU set support Junfeng Guo
2022-08-29  8:41     ` [PATCH v2 06/10] net/gve: add queue operations Junfeng Guo
2022-08-29  8:41     ` Junfeng Guo [this message]
2022-08-29  8:41     ` [PATCH v2 08/10] net/gve: add support to get dev info and configure dev Junfeng Guo
2022-09-01 17:23       ` Ferruh Yigit
2022-09-23  9:38         ` Guo, Junfeng
2022-08-29  8:41     ` [PATCH v2 09/10] net/gve: add stats support Junfeng Guo
2022-09-01 17:24       ` Ferruh Yigit
2022-09-23  9:38         ` Guo, Junfeng
2022-08-29  8:41     ` [PATCH v2 10/10] doc: update documentation Junfeng Guo
2022-09-01 17:20       ` Ferruh Yigit
2022-09-23  9:38       ` [PATCH v3 0/9] introduce GVE PMD Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 1/9] net/gve: introduce GVE PMD base code Junfeng Guo
2022-09-23 18:57           ` Stephen Hemminger
2022-09-27  7:27             ` Guo, Junfeng
2022-09-23 18:58           ` Stephen Hemminger
2022-09-27  7:27             ` Guo, Junfeng
2022-09-27  7:32           ` [PATCH v4 0/9] introduce GVE PMD Junfeng Guo
2022-09-27  7:32             ` [PATCH v4 1/9] net/gve/base: introduce GVE PMD base code Junfeng Guo
2022-10-06 14:19               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-10-10 10:17               ` [PATCH v5 0/8] introduce GVE PMD Junfeng Guo
2022-10-10 10:17                 ` [PATCH v5 1/8] net/gve/base: introduce GVE PMD base code Junfeng Guo
2022-10-19 13:45                   ` Ferruh Yigit
2022-10-19 15:13                     ` Hemant Agrawal
2022-10-19 15:18                       ` Ferruh Yigit
2022-10-20  3:33                         ` Hemant Agrawal
2022-10-19 15:48                     ` Li, Xiaoyun
2022-10-19 20:52                       ` Ferruh Yigit
2022-10-20  8:50                         ` Li, Xiaoyun
2022-10-20 10:36                   ` [PATCH v6 0/8] introduce GVE PMD Junfeng Guo
2022-10-20 10:36                     ` [PATCH v6 1/8] net/gve/base: introduce GVE PMD base code Junfeng Guo
2022-10-20 14:39                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-20 14:40                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-20 10:36                     ` [PATCH v6 2/8] net/gve/base: add OS specific implementation Junfeng Guo
2022-10-20 10:36                     ` [PATCH v6 3/8] net/gve: add support for device initialization Junfeng Guo
2022-10-20 14:42                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-20 10:36                     ` [PATCH v6 4/8] net/gve: add support for link update Junfeng Guo
2022-10-20 10:36                     ` [PATCH v6 5/8] net/gve: add support for MTU setting Junfeng Guo
2022-10-20 14:45                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-20 10:36                     ` [PATCH v6 6/8] net/gve: add support for dev info get and dev configure Junfeng Guo
2022-10-20 14:45                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-20 10:36                     ` [PATCH v6 7/8] net/gve: add support for queue operations Junfeng Guo
2022-10-20 10:36                     ` [PATCH v6 8/8] net/gve: add support for Rx/Tx Junfeng Guo
2022-10-20 14:47                       ` Ferruh Yigit
2022-10-24  2:10                         ` Guo, Junfeng
2022-10-21  9:19                     ` [PATCH v7 0/8] introduce GVE PMD Junfeng Guo
2022-10-21  9:19                       ` [PATCH v7 1/8] net/gve/base: introduce base code Junfeng Guo
2022-10-21  9:49                         ` Ferruh Yigit
2022-10-24  5:04                           ` Guo, Junfeng
2022-10-24 10:47                             ` Ferruh Yigit
2022-10-24 13:23                               ` Guo, Junfeng
2022-10-24 10:50                         ` Ferruh Yigit
2022-10-24 13:26                           ` Guo, Junfeng
2022-10-25  9:07                         ` [PATCH v8 0/8] introduce GVE PMD Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 1/8] net/gve/base: introduce base code Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 2/8] net/gve/base: add OS specific implementation Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 3/8] net/gve: add support for device initialization Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 4/8] net/gve: add support for link update Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 5/8] net/gve: add support for MTU setting Junfeng Guo
2022-10-25 15:55                             ` Stephen Hemminger
2022-10-26  2:15                               ` Guo, Junfeng
2022-10-25  9:07                           ` [PATCH v8 6/8] net/gve: add support for dev info get and dev configure Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 7/8] net/gve: add support for queue operations Junfeng Guo
2022-10-25  9:07                           ` [PATCH v8 8/8] net/gve: add support for Rx/Tx Junfeng Guo
2022-10-25 12:33                           ` [PATCH v8 0/8] introduce GVE PMD Ferruh Yigit
2022-10-26  2:05                             ` Guo, Junfeng
2022-10-21  9:19                       ` [PATCH v7 2/8] net/gve/base: add OS specific implementation Junfeng Guo
2022-10-21  9:19                       ` [PATCH v7 3/8] net/gve: add support for device initialization Junfeng Guo
2022-10-21  9:49                         ` Ferruh Yigit
2022-10-24  5:04                           ` Guo, Junfeng
2022-10-24 10:47                             ` Ferruh Yigit
2022-10-24 13:22                               ` Guo, Junfeng
2022-10-21  9:19                       ` [PATCH v7 4/8] net/gve: add support for link update Junfeng Guo
2022-10-21  9:19                       ` [PATCH v7 5/8] net/gve: add support for MTU setting Junfeng Guo
2022-10-21  9:50                         ` Ferruh Yigit
2022-10-24  5:04                           ` Guo, Junfeng
2022-10-24 10:47                             ` Ferruh Yigit
2022-10-24 13:23                               ` Guo, Junfeng
2022-10-21  9:19                       ` [PATCH v7 6/8] net/gve: add support for dev info get and dev configure Junfeng Guo
2022-10-21  9:51                         ` Ferruh Yigit
2022-10-24  5:04                           ` Guo, Junfeng
2022-10-24 10:48                             ` Ferruh Yigit
2022-10-24 13:23                               ` Guo, Junfeng
2022-10-21  9:19                       ` [PATCH v7 7/8] net/gve: add support for queue operations Junfeng Guo
2022-10-21  9:19                       ` [PATCH v7 8/8] net/gve: add support for Rx/Tx Junfeng Guo
2022-10-21  9:52                         ` Ferruh Yigit
2022-10-24  5:04                           ` Guo, Junfeng
2022-10-24 10:50                             ` Ferruh Yigit
2022-10-24 13:25                               ` Guo, Junfeng
2022-10-25  9:07                                 ` Guo, Junfeng
2022-10-21 13:12                       ` [PATCH v7 0/8] introduce GVE PMD Ferruh Yigit
2022-10-24 10:50                         ` Ferruh Yigit
2022-10-24 13:25                           ` Guo, Junfeng
2022-10-10 10:17                 ` [PATCH v5 2/8] net/gve/base: add OS specific implementation Junfeng Guo
2022-10-10 10:17                 ` [PATCH v5 3/8] net/gve: add support for device initialization Junfeng Guo
2022-10-19 13:46                   ` Ferruh Yigit
2022-10-19 15:59                     ` Li, Xiaoyun
2022-10-19 21:00                       ` Ferruh Yigit
2022-10-20  9:29                         ` Guo, Junfeng
2022-10-20 11:15                           ` Ferruh Yigit
2022-10-21  4:46                             ` Guo, Junfeng
2022-10-19 13:47                   ` Ferruh Yigit
2022-10-19 14:02                     ` Xia, Chenbo
2022-10-19 14:24                     ` Zhang, Helin
2022-10-19 21:16                       ` Ferruh Yigit
2022-10-19 16:20                     ` Li, Xiaoyun
2022-10-10 10:17                 ` [PATCH v5 4/8] net/gve: add support for link update Junfeng Guo
2022-10-10 10:17                 ` [PATCH v5 5/8] net/gve: add support for MTU setting Junfeng Guo
2022-10-19 13:47                   ` Ferruh Yigit
2022-10-20 10:14                     ` Guo, Junfeng
2022-10-10 10:17                 ` [PATCH v5 6/8] net/gve: add support for dev info get and dev configure Junfeng Guo
2022-10-19 13:49                   ` Ferruh Yigit
2022-10-20  9:29                     ` Guo, Junfeng
2022-10-20 11:19                       ` Ferruh Yigit
2022-10-21  5:22                         ` Guo, Junfeng
2022-10-10 10:17                 ` [PATCH v5 7/8] net/gve: add support for queue operations Junfeng Guo
2022-10-10 10:17                 ` [PATCH v5 8/8] net/gve: add support for Rx/Tx Junfeng Guo
2022-10-19 13:47                   ` Ferruh Yigit
2022-10-20  9:34                     ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 2/9] net/gve/base: add logs and OS specific implementation Junfeng Guo
2022-10-06 14:20               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 3/9] net/gve: add support for device initialization Junfeng Guo
2022-10-06 14:22               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 4/9] net/gve: add support for link update Junfeng Guo
2022-10-06 14:23               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 5/9] net/gve: add support for MTU setting Junfeng Guo
2022-09-27  7:32             ` [PATCH v4 6/9] net/gve: add support for queue operations Junfeng Guo
2022-09-27  7:32             ` [PATCH v4 7/9] net/gve: add support for Rx/Tx Junfeng Guo
2022-10-06 14:24               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-10-10  9:39                   ` Li, Xiaoyun
2022-10-10 10:18                     ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 8/9] net/gve: add support for dev info get and dev configure Junfeng Guo
2022-10-06 14:25               ` Ferruh Yigit
2022-10-09  9:14                 ` Guo, Junfeng
2022-09-27  7:32             ` [PATCH v4 9/9] net/gve: add support for stats Junfeng Guo
2022-10-06 14:25               ` Ferruh Yigit
2022-10-09  9:15                 ` Guo, Junfeng
2022-09-23  9:38         ` [PATCH v3 2/9] net/gve: add logs and OS specific implementation Junfeng Guo
2022-09-23 19:01           ` Stephen Hemminger
2022-09-27  7:27             ` Guo, Junfeng
2022-09-23  9:38         ` [PATCH v3 3/9] net/gve: support device initialization Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 4/9] net/gve: add link update support Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 5/9] net/gve: add MTU set support Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 6/9] net/gve: add queue operations Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 7/9] net/gve: add Rx/Tx support Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 8/9] net/gve: add support to get dev info and configure dev Junfeng Guo
2022-09-23  9:38         ` [PATCH v3 9/9] net/gve: add stats support Junfeng Guo
2022-09-01 17:19     ` [PATCH v2 00/10] introduce GVE PMD Ferruh Yigit
2022-09-07  2:09       ` Guo, Junfeng
2022-07-29 19:30 ` [PATCH 02/10] net/gve: add logs and OS specific implementation Xiaoyun Li
2022-07-29 19:30 ` [PATCH 03/10] net/gve: support device initialization Xiaoyun Li
2022-07-29 19:30 ` [PATCH 04/10] net/gve: add link update support Xiaoyun Li
2022-07-29 19:30 ` [PATCH 05/10] net/gve: add MTU set support Xiaoyun Li
2022-07-29 19:30 ` [PATCH 06/10] net/gve: add queue operations Xiaoyun Li
2022-07-29 19:30 ` [PATCH 07/10] net/gve: add Rx/Tx support Xiaoyun Li
2022-07-29 19:30 ` [PATCH 08/10] net/gve: add support to get dev info and configure dev Xiaoyun Li
2022-07-29 19:30 ` [PATCH 09/10] net/gve: add stats support Xiaoyun Li
2022-07-29 19:30 ` [PATCH 10/10] doc: update documentation Xiaoyun Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220829084127.934183-8-junfeng.guo@intel.com \
    --to=junfeng.guo@intel.com \
    --cc=awogbemila@google.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@xilinx.com \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.