netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 00/11] bnxt: Support XDP multi buffer
@ 2022-03-20  6:57 Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Michael Chan
                   ` (10 more replies)
  0 siblings, 11 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 1218 bytes --]

This series adds XDP multi buffer support, allowing MTU to go beyond
the page size limit.

Dave, please don't apply these patches too quickly so that others can
review them first.  Thanks.

Andy Gospodarek (11):
  bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff
  bnxt: add flag to denote that an xdp program is currently attached
  bnxt: refactor bnxt_rx_pages operate on skb_shared_info
  bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb
  bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp
  bnxt: set xdp_buff pfmemalloc flag if needed
  bnxt: change receive ring space parameters
  bnxt: add page_pool support for aggregation ring when using xdp
  bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff
  bnxt: support transmit and free of aggregation buffers
  bnxt: XDP multibuffer enablement

 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 304 +++++++++++++-----
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |   8 +-
 .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c |   2 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 205 ++++++++++--
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  15 +-
 5 files changed, 421 insertions(+), 113 deletions(-)

-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  9:47   ` kernel test robot
  2022-03-20  6:57 ` [PATCH net-next 02/11] bnxt: add flag to denote that an xdp program is currently attached Michael Chan
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 5509 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Move initialization of xdp_buff outside of bnxt_rx_xdp to prepare
for allowing bnxt_rx_xdp to operate on multibuffer xdp_buffs.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 11 +++--
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 44 ++++++++++++++-----
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  7 ++-
 3 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 92a1a43b3bee..21d5c76b1e70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1731,6 +1731,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	u8 *data_ptr, agg_bufs, cmp_type;
 	dma_addr_t dma_addr;
 	struct sk_buff *skb;
+	struct xdp_buff xdp;
 	u32 flags, misc;
 	void *data;
 	int rc = 0;
@@ -1839,11 +1840,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	len = flags >> RX_CMP_LEN_SHIFT;
 	dma_addr = rx_buf->mapping;
 
-	if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
-		rc = 1;
-		goto next_rx;
+	if (bnxt_xdp_attached(bp, rxr)) {
+		bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+			rc = 1;
+			goto next_rx;
+		}
 	}
-
 	if (len <= bp->rx_copy_thresh) {
 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
 		bnxt_reuse_rx_data(rxr, cons, data);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 52fad0fdeacf..5073f97722f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -104,18 +104,44 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 	}
 }
 
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+	return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+			u16 cons, u8 **data_ptr, unsigned int *len,
+			struct xdp_buff *xdp)
+{
+	struct bnxt_sw_rx_bd *rx_buf;
+	struct pci_dev *pdev;
+	dma_addr_t mapping;
+	u32 offset;
+
+	pdev = bp->pdev;
+	rx_buf = &rxr->rx_buf_ring[cons];
+	offset = bp->rx_offset;
+
+	mapping = rx_buf->mapping - bp->rx_dma_offset;
+	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+	xdp_init_buff(xdp, PAGE_SIZE, &rxr->xdp_rxq);
+	xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
 /* returns the following:
  * true    - packet consumed by XDP and new buffer is allocated.
  * false   - packet should be passed to the stack.
  */
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-		 struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+		 struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
 {
 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
 	struct bnxt_tx_ring_info *txr;
 	struct bnxt_sw_rx_bd *rx_buf;
 	struct pci_dev *pdev;
-	struct xdp_buff xdp;
 	dma_addr_t mapping;
 	void *orig_data;
 	u32 tx_avail;
@@ -126,16 +152,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		return false;
 
 	pdev = bp->pdev;
-	rx_buf = &rxr->rx_buf_ring[cons];
 	offset = bp->rx_offset;
 
-	mapping = rx_buf->mapping - bp->rx_dma_offset;
-	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
 	txr = rxr->bnapi->tx_ring;
 	/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
-	xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
-	xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
 	orig_data = xdp.data;
 
 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -148,15 +168,17 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		*event &= ~BNXT_RX_EVENT;
 
 	*len = xdp.data_end - xdp.data;
-	if (orig_data != xdp.data) {
+	if (orig_data != xdp.data)
 		offset = xdp.data - xdp.data_hard_start;
-		*data_ptr = xdp.data_hard_start + offset;
-	}
+
 	switch (act) {
 	case XDP_PASS:
 		return false;
 
 	case XDP_TX:
+		rx_buf = &rxr->rx_buf_ring[cons];
+		mapping = rx_buf->mapping - bp->rx_dma_offset;
+
 		if (tx_avail < 1) {
 			trace_xdp_exception(bp->dev, xdp_prog, act);
 			bnxt_reuse_rx_data(rxr, cons, page);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..39690bdb5526 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -15,10 +15,15 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
 				   dma_addr_t mapping, u32 len);
 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-		 struct page *page, u8 **data_ptr, unsigned int *len,
+		 struct xdp_buff xdp, struct page *page, unsigned int *len,
 		 u8 *event);
 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
 		  struct xdp_frame **frames, u32 flags);
 
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+			u16 cons, u8 **data_ptr, unsigned int *len,
+			struct xdp_buff *xdp);
 #endif
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 02/11] bnxt: add flag to denote that an xdp program is currently attached
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 03/11] bnxt: refactor bnxt_rx_pages operate on skb_shared_info Michael Chan
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 1416 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

This will be used to determine if bnxt_rx_xdp should be called
rather than calling it every time.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 21d5c76b1e70..b7d7ee775fdc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1729,6 +1729,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	struct bnxt_sw_rx_bd *rx_buf;
 	unsigned int len;
 	u8 *data_ptr, agg_bufs, cmp_type;
+	bool xdp_active = false;
 	dma_addr_t dma_addr;
 	struct sk_buff *skb;
 	struct xdp_buff xdp;
@@ -1842,11 +1843,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
 	if (bnxt_xdp_attached(bp, rxr)) {
 		bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+		xdp_active = true;
+	}
+
+	/* skip running XDP prog if there are aggregation bufs */
+	if (!agg_bufs && xdp_active) {
 		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
 			rc = 1;
 			goto next_rx;
 		}
 	}
+
 	if (len <= bp->rx_copy_thresh) {
 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
 		bnxt_reuse_rx_data(rxr, cons, data);
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 03/11] bnxt: refactor bnxt_rx_pages operate on skb_shared_info
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 02/11] bnxt: add flag to denote that an xdp program is currently attached Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 04/11] bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb Michael Chan
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 3656 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Rather than operating on an sk_buff, add frags from the aggregation
ring into the frags of an skb_shared_info.  This will allow the
caller to use either an sk_buff or xdp_buff.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 50 +++++++++++++++--------
 1 file changed, 33 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b7d7ee775fdc..ba01a353bb3f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1038,22 +1038,23 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
 	return skb;
 }
 
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
-				     struct bnxt_cp_ring_info *cpr,
-				     struct sk_buff *skb, u16 idx,
-				     u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_pages(struct bnxt *bp,
+			   struct bnxt_cp_ring_info *cpr,
+			   struct skb_shared_info *shinfo,
+			   u16 idx, u32 agg_bufs, bool tpa)
 {
 	struct bnxt_napi *bnapi = cpr->bnapi;
 	struct pci_dev *pdev = bp->pdev;
 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 	u16 prod = rxr->rx_agg_prod;
+	u32 i, total_frag_len = 0;
 	bool p5_tpa = false;
-	u32 i;
 
 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
 		p5_tpa = true;
 
 	for (i = 0; i < agg_bufs; i++) {
+		skb_frag_t *frag = &shinfo->frags[i];
 		u16 cons, frag_len;
 		struct rx_agg_cmp *agg;
 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1069,8 +1070,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
 		cons_rx_buf = &rxr->rx_agg_ring[cons];
-		skb_fill_page_desc(skb, i, cons_rx_buf->page,
-				   cons_rx_buf->offset, frag_len);
+		skb_frag_off_set(frag, cons_rx_buf->offset);
+		skb_frag_size_set(frag, frag_len);
+		__skb_frag_set_page(frag, cons_rx_buf->page);
+		shinfo->nr_frags = i + 1;
 		__clear_bit(cons, rxr->rx_agg_bmap);
 
 		/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1082,15 +1085,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
 		cons_rx_buf->page = NULL;
 
 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
-			struct skb_shared_info *shinfo;
 			unsigned int nr_frags;
 
-			shinfo = skb_shinfo(skb);
 			nr_frags = --shinfo->nr_frags;
 			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
-			dev_kfree_skb(skb);
-
 			cons_rx_buf->page = page;
 
 			/* Update prod since possibly some pages have been
@@ -1098,20 +1096,38 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
 			 */
 			rxr->rx_agg_prod = prod;
 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
-			return NULL;
+			return 0;
 		}
 
 		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
 				     DMA_FROM_DEVICE,
 				     DMA_ATTR_WEAK_ORDERING);
 
-		skb->data_len += frag_len;
-		skb->len += frag_len;
-		skb->truesize += PAGE_SIZE;
-
+		total_frag_len += frag_len;
 		prod = NEXT_RX_AGG(prod);
 	}
 	rxr->rx_agg_prod = prod;
+	return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
+				     struct bnxt_cp_ring_info *cpr,
+				     struct sk_buff *skb, u16 idx,
+				     u32 agg_bufs, bool tpa)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+	u32 total_frag_len = 0;
+
+	total_frag_len = __bnxt_rx_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
+
+	if (!total_frag_len) {
+		dev_kfree_skb(skb);
+		return NULL;
+	}
+
+	skb->data_len += total_frag_len;
+	skb->len += total_frag_len;
+	skb->truesize += PAGE_SIZE * agg_bufs;
 	return skb;
 }
 
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 04/11] bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (2 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 03/11] bnxt: refactor bnxt_rx_pages operate on skb_shared_info Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 05/11] bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp Michael Chan
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 2512 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Clarify that this is reading buffers from the aggregation ring.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba01a353bb3f..3324d0070667 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1038,10 +1038,10 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
 	return skb;
 }
 
-static u32 __bnxt_rx_pages(struct bnxt *bp,
-			   struct bnxt_cp_ring_info *cpr,
-			   struct skb_shared_info *shinfo,
-			   u16 idx, u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+			       struct bnxt_cp_ring_info *cpr,
+			       struct skb_shared_info *shinfo,
+			       u16 idx, u32 agg_bufs, bool tpa)
 {
 	struct bnxt_napi *bnapi = cpr->bnapi;
 	struct pci_dev *pdev = bp->pdev;
@@ -1110,15 +1110,15 @@ static u32 __bnxt_rx_pages(struct bnxt *bp,
 	return total_frag_len;
 }
 
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
-				     struct bnxt_cp_ring_info *cpr,
-				     struct sk_buff *skb, u16 idx,
-				     u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+					     struct bnxt_cp_ring_info *cpr,
+					     struct sk_buff *skb, u16 idx,
+					     u32 agg_bufs, bool tpa)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	u32 total_frag_len = 0;
 
-	total_frag_len = __bnxt_rx_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
+	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
 
 	if (!total_frag_len) {
 		dev_kfree_skb(skb);
@@ -1660,7 +1660,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 	}
 
 	if (agg_bufs) {
-		skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
 		if (!skb) {
 			/* Page reuse already handled by bnxt_rx_pages(). */
 			cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1898,7 +1898,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	}
 
 	if (agg_bufs) {
-		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
+		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
 		if (!skb) {
 			cpr->sw_stats.rx.rx_oom_discards += 1;
 			rc = -ENOMEM;
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 05/11] bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (3 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 04/11] bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 06/11] bnxt: set xdp_buff pfmemalloc flag if needed Michael Chan
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 1920 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

This patch adds a new function that will read pages from the
aggregation ring and create an xdp_buff with frags based on
the entries in the aggregation ring.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 +++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3324d0070667..4f42efeddb32 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1131,6 +1131,27 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
 	return skb;
 }
 
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+				 struct bnxt_cp_ring_info *cpr,
+				 struct xdp_buff *xdp, u16 idx,
+				 u32 agg_bufs, bool tpa)
+{
+	struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+	u32 total_frag_len = 0;
+
+	if (!xdp_buff_has_frags(xdp))
+		shinfo->nr_frags = 0;
+
+	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
+
+	if (total_frag_len) {
+		xdp_buff_set_frags_flag(xdp);
+		shinfo->nr_frags = agg_bufs;
+		shinfo->xdp_frags_size = total_frag_len;
+	}
+	return total_frag_len;
+}
+
 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 			       u8 agg_bufs, u32 *raw_cons)
 {
@@ -1859,6 +1880,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
 	if (bnxt_xdp_attached(bp, rxr)) {
 		bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+		if (agg_bufs) {
+			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+							     cp_cons, agg_bufs,
+							     false);
+			if (!frag_len) {
+				cpr->sw_stats.rx.rx_oom_discards += 1;
+				rc = -ENOMEM;
+				goto next_rx;
+			}
+		}
 		xdp_active = true;
 	}
 
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 06/11] bnxt: set xdp_buff pfmemalloc flag if needed
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (4 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 05/11] bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 07/11] bnxt: change receive ring space parameters Michael Chan
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 2129 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Set the pfmemaloc flag in the xdp buff so that this can be
copied to the skb if needed for an XDP_PASS action.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4f42efeddb32..05f4b3fbf2e3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1041,7 +1041,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
 			       struct bnxt_cp_ring_info *cpr,
 			       struct skb_shared_info *shinfo,
-			       u16 idx, u32 agg_bufs, bool tpa)
+			       u16 idx, u32 agg_bufs, bool tpa,
+			       struct xdp_buff *xdp)
 {
 	struct bnxt_napi *bnapi = cpr->bnapi;
 	struct pci_dev *pdev = bp->pdev;
@@ -1084,6 +1085,9 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
 		page = cons_rx_buf->page;
 		cons_rx_buf->page = NULL;
 
+		if (xdp && page_is_pfmemalloc(page))
+			xdp_buff_set_frag_pfmemalloc(xdp);
+
 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
 			unsigned int nr_frags;
 
@@ -1118,8 +1122,8 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	u32 total_frag_len = 0;
 
-	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
-
+	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+					     agg_bufs, tpa, NULL);
 	if (!total_frag_len) {
 		dev_kfree_skb(skb);
 		return NULL;
@@ -1142,8 +1146,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
 	if (!xdp_buff_has_frags(xdp))
 		shinfo->nr_frags = 0;
 
-	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);
-
+	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+					     idx, agg_bufs, tpa, xdp);
 	if (total_frag_len) {
 		xdp_buff_set_frags_flag(xdp);
 		shinfo->nr_frags = agg_bufs;
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 07/11] bnxt: change receive ring space parameters
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (5 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 06/11] bnxt: set xdp_buff pfmemalloc flag if needed Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 08/11] bnxt: add page_pool support for aggregation ring when using xdp Michael Chan
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 4208 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Modify ring header data split and jumbo parameters to account
for the fact that the design for XDP multibuffer puts close to
the first 4k of data in a page and the remaining portions of
the packet go in the aggregation ring.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 44 +++++++++++++++--------
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |  1 +
 2 files changed, 30 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 05f4b3fbf2e3..b635b7ce6ba3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -56,6 +56,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <net/page_pool.h>
+#include <linux/align.h>
 
 #include "bnxt_hsi.h"
 #include "bnxt.h"
@@ -1933,11 +1934,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	}
 
 	if (agg_bufs) {
-		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
-		if (!skb) {
-			cpr->sw_stats.rx.rx_oom_discards += 1;
-			rc = -ENOMEM;
-			goto next_rx;
+		if (!xdp_active) {
+			skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+			if (!skb) {
+				cpr->sw_stats.rx.rx_oom_discards += 1;
+				rc = -ENOMEM;
+				goto next_rx;
+			}
 		}
 	}
 
@@ -3853,7 +3856,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
 	/* 8 for CRC and VLAN */
 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
 
-	rx_space = rx_size + NET_SKB_PAD +
+	rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3894,9 +3897,17 @@ void bnxt_set_ring_params(struct bnxt *bp)
 		}
 		bp->rx_agg_ring_size = agg_ring_size;
 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
-		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
-		rx_space = rx_size + NET_SKB_PAD +
-			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+		if (BNXT_RX_PAGE_MODE(bp)) {
+			rx_space = PAGE_SIZE;
+			rx_size = rx_space - SKB_DATA_ALIGN(ETH_HLEN + NET_IP_ALIGN + 8) -
+				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
+				  2 * SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		} else {
+			rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+			rx_space = rx_size + NET_SKB_PAD +
+				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		}
 	}
 
 	bp->rx_buf_use_size = rx_size;
@@ -5286,12 +5297,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
 	if (rc)
 		return rc;
 
-	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
-				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
-				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
-	req->enables =
-		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
-			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+	if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+		req->enables |=
+			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+	}
 	/* thresholds not implemented in firmware yet */
 	req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
 	req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 447a9406b8a2..9e2dabb58519 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1814,6 +1814,7 @@ struct bnxt {
 #define BNXT_SUPPORTS_TPA(bp)	(!BNXT_CHIP_TYPE_NITRO_A0(bp) &&	\
 				 (!((bp)->flags & BNXT_FLAG_CHIP_P5) ||	\
 				  (bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp)	((bp)->flags & BNXT_FLAG_JUMBO)
 
 #define BNXT_CHIP_SR2(bp)			\
 	((bp)->chip_num == CHIP_NUM_58818)
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 08/11] bnxt: add page_pool support for aggregation ring when using xdp
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (6 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 07/11] bnxt: change receive ring space parameters Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 09/11] bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff Michael Chan
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 3943 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

If we are using aggregation rings with XDP enabled, allocate page
buffers for the aggregation rings from the page_pool.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 77 ++++++++++++++---------
 1 file changed, 47 insertions(+), 30 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b635b7ce6ba3..980c176d7c88 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -739,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
 		page_pool_recycle_direct(rxr->page_pool, page);
 		return NULL;
 	}
-	*mapping += bp->rx_dma_offset;
 	return page;
 }
 
@@ -781,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 		if (!page)
 			return -ENOMEM;
 
+		mapping += bp->rx_dma_offset;
 		rx_buf->data = page;
 		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
 	} else {
@@ -841,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
 	u16 sw_prod = rxr->rx_sw_agg_prod;
 	unsigned int offset = 0;
 
-	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
-		page = rxr->rx_page;
-		if (!page) {
+	if (BNXT_RX_PAGE_MODE(bp)) {
+		page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+		if (!page)
+			return -ENOMEM;
+
+	} else {
+		if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+			page = rxr->rx_page;
+			if (!page) {
+				page = alloc_page(gfp);
+				if (!page)
+					return -ENOMEM;
+				rxr->rx_page = page;
+				rxr->rx_page_offset = 0;
+			}
+			offset = rxr->rx_page_offset;
+			rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+			if (rxr->rx_page_offset == PAGE_SIZE)
+				rxr->rx_page = NULL;
+			else
+				get_page(page);
+		} else {
 			page = alloc_page(gfp);
 			if (!page)
 				return -ENOMEM;
-			rxr->rx_page = page;
-			rxr->rx_page_offset = 0;
 		}
-		offset = rxr->rx_page_offset;
-		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
-		if (rxr->rx_page_offset == PAGE_SIZE)
-			rxr->rx_page = NULL;
-		else
-			get_page(page);
-	} else {
-		page = alloc_page(gfp);
-		if (!page)
-			return -ENOMEM;
-	}
 
-	mapping = dma_map_page_attrs(&pdev->dev, page, offset,
-				     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-				     DMA_ATTR_WEAK_ORDERING);
-	if (dma_mapping_error(&pdev->dev, mapping)) {
-		__free_page(page);
-		return -EIO;
+		mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+					     DMA_ATTR_WEAK_ORDERING);
+		if (dma_mapping_error(&pdev->dev, mapping)) {
+			__free_page(page);
+			return -EIO;
+		}
 	}
 
 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -1105,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
 		}
 
 		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
-				     DMA_FROM_DEVICE,
+				     bp->rx_dir,
 				     DMA_ATTR_WEAK_ORDERING);
 
 		total_frag_len += frag_len;
@@ -2936,14 +2944,23 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
 		if (!page)
 			continue;
 
-		dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
-				     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-				     DMA_ATTR_WEAK_ORDERING);
+		if (BNXT_RX_PAGE_MODE(bp)) {
+			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
+					     DMA_ATTR_WEAK_ORDERING);
+			rx_agg_buf->page = NULL;
+			__clear_bit(i, rxr->rx_agg_bmap);
 
-		rx_agg_buf->page = NULL;
-		__clear_bit(i, rxr->rx_agg_bmap);
+			page_pool_recycle_direct(rxr->page_pool, page);
+		} else {
+			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+					     DMA_ATTR_WEAK_ORDERING);
+			rx_agg_buf->page = NULL;
+			__clear_bit(i, rxr->rx_agg_bmap);
 
-		__free_page(page);
+			__free_page(page);
+		}
 	}
 
 skip_rx_agg_free:
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 09/11] bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (7 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 08/11] bnxt: add page_pool support for aggregation ring when using xdp Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers Michael Chan
  2022-03-20  6:57 ` [PATCH net-next 11/11] bnxt: XDP multibuffer enablement Michael Chan
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 6670 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Since we have an xdp_buff with frags there needs to be a way to
convert that into a valid sk_buff in the event that XDP_PASS is
the resulting operation.  This adds a new rx_skb_func when the
netdev has an MTU that prevents the packets from sitting in a
single page.

This also make sure that GRO/LRO stay disabled even when using
the aggregation ring for large buffers.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 63 ++++++++++++++++---
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 39 ++++++++++++
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  3 +
 3 files changed, 98 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 980c176d7c88..b92f5ef31132 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -971,6 +971,37 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
 	rxr->rx_sw_agg_prod = sw_prod;
 }
 
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+					      struct bnxt_rx_ring_info *rxr,
+					      u16 cons, void *data, u8 *data_ptr,
+					      dma_addr_t dma_addr,
+					      unsigned int offset_and_len)
+{
+	unsigned int len = offset_and_len & 0xffff;
+	struct page *page = data;
+	u16 prod = rxr->rx_prod;
+	struct sk_buff *skb;
+	int err;
+
+	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+	if (unlikely(err)) {
+		bnxt_reuse_rx_data(rxr, cons, data);
+		return NULL;
+	}
+	dma_addr -= bp->rx_dma_offset;
+	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+			     DMA_ATTR_WEAK_ORDERING);
+	skb = build_skb(page_address(page), PAGE_SIZE - bp->rx_dma_offset);
+	if (!skb) {
+		__free_page(page);
+		return NULL;
+	}
+	skb_mark_for_recycle(skb);
+	skb_reserve(skb, bp->rx_dma_offset);
+	__skb_put(skb, len);
+	return skb;
+}
+
 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 					struct bnxt_rx_ring_info *rxr,
 					u16 cons, void *data, u8 *data_ptr,
@@ -993,7 +1024,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 	dma_addr -= bp->rx_dma_offset;
 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
 			     DMA_ATTR_WEAK_ORDERING);
-	page_pool_release_page(rxr->page_pool, page);
 
 	if (unlikely(!payload))
 		payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1004,6 +1034,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 		return NULL;
 	}
 
+	skb_mark_for_recycle(skb);
 	off = (void *)data_ptr - page_address(page);
 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1949,6 +1980,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 				rc = -ENOMEM;
 				goto next_rx;
 			}
+		} else {
+			skb = bnxt_xdp_build_skb(bp, skb, rxr->page_pool, &xdp, rxcmp1);
+			if (!skb) {
+				/* we should be able to free the old skb here */
+				cpr->sw_stats.rx.rx_oom_discards += 1;
+				rc = -ENOMEM;
+				goto next_rx;
+			}
 		}
 	}
 
@@ -3965,14 +4004,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
 	if (page_mode) {
-		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
-			return -EOPNOTSUPP;
-		bp->dev->max_mtu =
-			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
-		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+			bp->flags |= BNXT_FLAG_JUMBO;
+			bp->rx_skb_func = bnxt_rx_multi_page_skb;
+			bp->dev->max_mtu =
+				min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+		} else {
+			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+			bp->rx_skb_func = bnxt_rx_page_skb;
+			bp->dev->max_mtu =
+				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+		}
 		bp->rx_dir = DMA_BIDIRECTIONAL;
-		bp->rx_skb_func = bnxt_rx_page_skb;
 		/* Disable LRO or GRO_HW */
 		netdev_update_features(bp->dev);
 	} else {
@@ -11116,6 +11162,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 
+	if (!(bp->flags & BNXT_FLAG_TPA))
+		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
 	if (!(features & NETIF_F_GRO))
 		features &= ~NETIF_F_GRO_HW;
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 5073f97722f1..3bcdbdd10bfc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -349,3 +349,42 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	}
 	return rc;
 }
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, struct page_pool *pool,
+		   struct xdp_buff *xdp, struct rx_cmp_ext *rxcmp1)
+{
+	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+	u8 num_frags, i;
+
+	if (unlikely(xdp_buff_has_frags(xdp)))
+		num_frags = sinfo->nr_frags;
+
+	if (!skb)
+		return NULL;
+
+	skb_checksum_none_assert(skb);
+	if (RX_CMP_L4_CS_OK(rxcmp1)) {
+		if (bp->dev->features & NETIF_F_RXCSUM) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+		}
+	}
+
+	if (unlikely(xdp_buff_has_frags(xdp))) {
+		xdp_update_skb_shared_info(skb, sinfo->nr_frags,
+					   sinfo->xdp_frags_size,
+					   PAGE_SIZE * sinfo->nr_frags,
+					   xdp_buff_is_frag_pfmemalloc(xdp));
+	}
+	/* debug frags and number of frags */
+	for (i = 0; i < num_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		skb_frag_set_page(skb, i, skb_frag_page(&sinfo->frags[i]));
+		skb_frag_size_set(frag, skb_frag_size(&sinfo->frags[i]));
+		skb_frag_off_set(frag, skb_frag_off(&sinfo->frags[i]));
+		page_pool_release_page(pool, skb_frag_page(frag));
+	}
+	return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 39690bdb5526..45134d299931 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -26,4 +26,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 			u16 cons, u8 **data_ptr, unsigned int *len,
 			struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+				   struct page_pool *pool, struct xdp_buff *xdp,
+				   struct rx_cmp_ext *rxcmp1);
 #endif
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (8 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 09/11] bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  2022-03-20 10:47   ` kernel test robot
  2022-03-20  6:57 ` [PATCH net-next 11/11] bnxt: XDP multibuffer enablement Michael Chan
  10 siblings, 1 reply; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 11695 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

This patch adds the following features:
- Support for XDP_TX and XDP_DROP action when using xdp_buff
  with frags
- Support for freeing all frags attached to an xdp_buff
- Cleanup of TX ring buffers after transmits complete
- Slight change in definition of bnxt_sw_tx_bd since nr_frags
  and RX producer may both need to be used

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     |  18 ++-
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |   7 +-
 .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c |   2 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 117 ++++++++++++++++--
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |   5 +-
 5 files changed, 126 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b92f5ef31132..84c89ee7dc2f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1949,9 +1949,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
 		bnxt_reuse_rx_data(rxr, cons, data);
 		if (!skb) {
-			if (agg_bufs)
-				bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
-						       agg_bufs, false);
+			if (agg_bufs) {
+				if (!xdp_active)
+					bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+							       agg_bufs, false);
+				else
+					bnxt_xdp_buff_frags_free(rxr, &xdp);
+			}
 			cpr->sw_stats.rx.rx_oom_discards += 1;
 			rc = -ENOMEM;
 			goto next_rx;
@@ -1984,6 +1988,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 			skb = bnxt_xdp_build_skb(bp, skb, rxr->page_pool, &xdp, rxcmp1);
 			if (!skb) {
 				/* we should be able to free the old skb here */
+				bnxt_xdp_buff_frags_free(rxr, &xdp);
 				cpr->sw_stats.rx.rx_oom_discards += 1;
 				rc = -ENOMEM;
 				goto next_rx;
@@ -2603,10 +2608,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 
-		if (bnapi->events & BNXT_AGG_EVENT)
-			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
 	}
+	if (bnapi->events & BNXT_AGG_EVENT) {
+		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+	}
 	bnapi->events = 0;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9e2dabb58519..801aa40f602f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -698,13 +698,12 @@ struct bnxt_sw_tx_bd {
 	};
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 	DEFINE_DMA_UNMAP_LEN(len);
+	struct page		*page;
 	u8			is_gso;
 	u8			is_push;
 	u8			action;
-	union {
-		unsigned short		nr_frags;
-		u16			rx_prod;
-	};
+	unsigned short		nr_frags;
+	u16			rx_prod;
 };
 
 struct bnxt_sw_rx_bd {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 22e965e18fbc..b3a48d6675fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3491,7 +3491,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
 		dev_kfree_skb(skb);
 		return -EIO;
 	}
-	bnxt_xmit_bd(bp, txr, map, pkt_size);
+	bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
 
 	/* Sync BD data before updating doorbell */
 	wmb();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 3bcdbdd10bfc..42f39e89f6fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -22,36 +22,91 @@
 
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
 				   struct bnxt_tx_ring_info *txr,
-				   dma_addr_t mapping, u32 len)
+				   dma_addr_t mapping, u32 len,
+				   struct xdp_buff *xdp)
 {
-	struct bnxt_sw_tx_bd *tx_buf;
+	struct skb_shared_info *sinfo;
+	struct bnxt_sw_tx_bd *tx_buf, *first_buf;
 	struct tx_bd *txbd;
+	int num_frags = 0;
 	u32 flags;
 	u16 prod;
+	int i;
+
+	if (xdp && xdp_buff_has_frags(xdp)) {
+		sinfo = xdp_get_shared_info_from_buff(xdp);
+		num_frags = sinfo->nr_frags;
+	}
 
+	/* fill up the first buffer */
 	prod = txr->tx_prod;
 	tx_buf = &txr->tx_buf_ring[prod];
+	first_buf = tx_buf;
+	tx_buf->nr_frags = num_frags;
+	if (xdp)
+		tx_buf->page = virt_to_head_page(xdp->data);
 
 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-	flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
-		TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+	flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 	txbd->tx_bd_opaque = prod;
 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
 
+	/* now let us fill up the frags into the next buffers */
+	for (i = 0; i < num_frags ; i++) {
+		skb_frag_t *frag = &sinfo->frags[i];
+		struct bnxt_sw_tx_bd *frag_tx_buf;
+		struct pci_dev *pdev = bp->pdev;
+		dma_addr_t frag_mapping;
+		int frag_len;
+
+		prod = NEXT_TX(prod);
+		txr->tx_prod = prod;
+
+		/* first fill up the first buffer */
+		frag_tx_buf = &txr->tx_buf_ring[prod];
+		frag_tx_buf->page = skb_frag_page(frag);
+
+		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+		frag_len = skb_frag_size(frag);
+		frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+						frag_len, DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+			return NULL;
+
+		dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+		flags = frag_len << TX_BD_LEN_SHIFT;
+		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+		txbd->tx_bd_opaque = prod;
+		txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+		len = frag_len;
+	}
+
+	flags &= ~TX_BD_LEN;
+	txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+			TX_BD_FLAGS_PACKET_END);
+	/* Sync TX BD */
+	wmb();
 	prod = NEXT_TX(prod);
 	txr->tx_prod = prod;
-	return tx_buf;
+
+	return first_buf;
 }
 
 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
-			    dma_addr_t mapping, u32 len, u16 rx_prod)
+			    dma_addr_t mapping, u32 len, u16 rx_prod,
+			    struct xdp_buff *xdp)
 {
 	struct bnxt_sw_tx_bd *tx_buf;
 
-	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
 	tx_buf->rx_prod = rx_prod;
 	tx_buf->action = XDP_TX;
+
 }
 
 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -61,7 +116,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
 {
 	struct bnxt_sw_tx_bd *tx_buf;
 
-	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
 	tx_buf->action = XDP_REDIRECT;
 	tx_buf->xdpf = xdpf;
 	dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -76,7 +131,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 	struct bnxt_sw_tx_bd *tx_buf;
 	u16 tx_cons = txr->tx_cons;
 	u16 last_tx_cons = tx_cons;
-	int i;
+	int i, j, frags;
 
 	for (i = 0; i < nr_pkts; i++) {
 		tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -94,6 +149,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 		} else if (tx_buf->action == XDP_TX) {
 			rx_doorbell_needed = true;
 			last_tx_cons = tx_cons;
+
+			frags = tx_buf->nr_frags;
+			for (j = 0; j < frags; j++) {
+				tx_cons = NEXT_TX(tx_cons);
+				tx_buf = &txr->tx_buf_ring[tx_cons];
+				page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+			}
 		}
 		tx_cons = NEXT_TX(tx_cons);
 	}
@@ -101,6 +163,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 	if (rx_doorbell_needed) {
 		tx_buf = &txr->tx_buf_ring[last_tx_cons];
 		bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
 	}
 }
 
@@ -131,6 +194,23 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 	xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
 }
 
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+			      struct xdp_buff *xdp)
+{
+	struct skb_shared_info *shinfo;
+	int i;
+
+	if (xdp)
+		shinfo = xdp_get_shared_info_from_buff(xdp);
+
+	for (i = 0; i < shinfo->nr_frags; i++) {
+		struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+		page_pool_recycle_direct(rxr->page_pool, page);
+	}
+	shinfo->nr_frags = 0;
+}
+
 /* returns the following:
  * true    - packet consumed by XDP and new buffer is allocated.
  * false   - packet should be passed to the stack.
@@ -143,6 +223,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 	struct bnxt_sw_rx_bd *rx_buf;
 	struct pci_dev *pdev;
 	dma_addr_t mapping;
+	u32 tx_needed = 1;
 	void *orig_data;
 	u32 tx_avail;
 	u32 offset;
@@ -178,18 +259,28 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 	case XDP_TX:
 		rx_buf = &rxr->rx_buf_ring[cons];
 		mapping = rx_buf->mapping - bp->rx_dma_offset;
+		*event = 0;
 
-		if (tx_avail < 1) {
+		if (unlikely(xdp_buff_has_frags(&xdp))) {
+			struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+			tx_needed += sinfo->nr_frags;
+			*event = BNXT_AGG_EVENT;
+		}
+
+		if (tx_avail < tx_needed) {
 			trace_xdp_exception(bp->dev, xdp_prog, act);
+			bnxt_xdp_buff_frags_free(rxr, &xdp);
 			bnxt_reuse_rx_data(rxr, cons, page);
 			return true;
 		}
 
-		*event = BNXT_TX_EVENT;
 		dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
 					   bp->rx_dir);
+
+		*event |= BNXT_TX_EVENT;
 		__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
-				NEXT_RX(rxr->rx_prod));
+				NEXT_RX(rxr->rx_prod), &xdp);
 		bnxt_reuse_rx_data(rxr, cons, page);
 		return true;
 	case XDP_REDIRECT:
@@ -204,6 +295,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		/* if we are unable to allocate a new buffer, abort and reuse */
 		if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
 			trace_xdp_exception(bp->dev, xdp_prog, act);
+			bnxt_xdp_buff_frags_free(rxr, &xdp);
 			bnxt_reuse_rx_data(rxr, cons, page);
 			return true;
 		}
@@ -223,6 +315,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		trace_xdp_exception(bp->dev, xdp_prog, act);
 		fallthrough;
 	case XDP_DROP:
+		bnxt_xdp_buff_frags_free(rxr, &xdp);
 		bnxt_reuse_rx_data(rxr, cons, page);
 		break;
 	}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 45134d299931..8ac15184bcc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -12,7 +12,8 @@
 
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
 				   struct bnxt_tx_ring_info *txr,
-				   dma_addr_t mapping, u32 len);
+				   dma_addr_t mapping, u32 len,
+				   struct xdp_buff *xdp);
 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		 struct xdp_buff xdp, struct page *page, unsigned int *len,
@@ -26,6 +27,8 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 			u16 cons, u8 **data_ptr, unsigned int *len,
 			struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+			      struct xdp_buff *xdp);
 struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
 				   struct page_pool *pool, struct xdp_buff *xdp,
 				   struct rx_cmp_ext *rxcmp1);
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH net-next 11/11] bnxt: XDP multibuffer enablement
  2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
                   ` (9 preceding siblings ...)
  2022-03-20  6:57 ` [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers Michael Chan
@ 2022-03-20  6:57 ` Michael Chan
  10 siblings, 0 replies; 14+ messages in thread
From: Michael Chan @ 2022-03-20  6:57 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

[-- Attachment #1: Type: text/plain, Size: 1766 bytes --]

From: Andy Gospodarek <gospo@broadcom.com>

Allow aggregation buffers to be in place in the receive path and
allow XDP programs to be attached when using a larger than 4k MTU.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 3 +--
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 5 -----
 2 files changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 84c89ee7dc2f..4f7213af1955 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1937,8 +1937,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 		xdp_active = true;
 	}
 
-	/* skip running XDP prog if there are aggregation bufs */
-	if (!agg_bufs && xdp_active) {
+	if (xdp_active) {
 		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
 			rc = 1;
 			goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 42f39e89f6fb..ff0454b70aa3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -375,11 +375,6 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
 	int tx_xdp = 0, rc, tc;
 	struct bpf_prog *old;
 
-	if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
-		netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
-			    bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
-		return -EOPNOTSUPP;
-	}
 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
 		netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
 		return -EOPNOTSUPP;
-- 
2.18.1


[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff
  2022-03-20  6:57 ` [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Michael Chan
@ 2022-03-20  9:47   ` kernel test robot
  0 siblings, 0 replies; 14+ messages in thread
From: kernel test robot @ 2022-03-20  9:47 UTC (permalink / raw)
  To: Michael Chan, davem; +Cc: llvm, kbuild-all, netdev, kuba, gospo

Hi Michael,

I love your patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Michael-Chan/bnxt-Support-XDP-multi-buffer/20220320-150017
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 092d992b76ed9d06389af0bc5efd5279d7b1ed9f
config: i386-randconfig-a013 (https://download.01.org/0day-ci/archive/20220320/202203201751.yZWkjAof-lkp@intel.com/config)
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project 85e9b2687a13d1908aa86d1b89c5ce398a06cd39)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/a00a69739b9d83e22d8fd2404e50a886e4e99944
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Michael-Chan/bnxt-Support-XDP-multi-buffer/20220320-150017
        git checkout a00a69739b9d83e22d8fd2404e50a886e4e99944
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/net/ethernet/broadcom/bnxt/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:200:36: warning: variable 'mapping' is uninitialized when used here [-Wuninitialized]
                   dma_unmap_page_attrs(&pdev->dev, mapping,
                                                    ^~~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:145:20: note: initialize the variable 'mapping' to silence this warning
           dma_addr_t mapping;
                             ^
                              = 0
   1 warning generated.


vim +/mapping +200 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c

a00a69739b9d83 Andy Gospodarek     2022-03-20  133  
c6d30e8391b85e Michael Chan        2017-02-06  134  /* returns the following:
c6d30e8391b85e Michael Chan        2017-02-06  135   * true    - packet consumed by XDP and new buffer is allocated.
c6d30e8391b85e Michael Chan        2017-02-06  136   * false   - packet should be passed to the stack.
c6d30e8391b85e Michael Chan        2017-02-06  137   */
c6d30e8391b85e Michael Chan        2017-02-06  138  bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
a00a69739b9d83 Andy Gospodarek     2022-03-20  139  		 struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
c6d30e8391b85e Michael Chan        2017-02-06  140  {
c6d30e8391b85e Michael Chan        2017-02-06  141  	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
38413406277fd0 Michael Chan        2017-02-06  142  	struct bnxt_tx_ring_info *txr;
c6d30e8391b85e Michael Chan        2017-02-06  143  	struct bnxt_sw_rx_bd *rx_buf;
c6d30e8391b85e Michael Chan        2017-02-06  144  	struct pci_dev *pdev;
c6d30e8391b85e Michael Chan        2017-02-06  145  	dma_addr_t mapping;
c6d30e8391b85e Michael Chan        2017-02-06  146  	void *orig_data;
38413406277fd0 Michael Chan        2017-02-06  147  	u32 tx_avail;
c6d30e8391b85e Michael Chan        2017-02-06  148  	u32 offset;
c6d30e8391b85e Michael Chan        2017-02-06  149  	u32 act;
c6d30e8391b85e Michael Chan        2017-02-06  150  
c6d30e8391b85e Michael Chan        2017-02-06  151  	if (!xdp_prog)
c6d30e8391b85e Michael Chan        2017-02-06  152  		return false;
c6d30e8391b85e Michael Chan        2017-02-06  153  
c6d30e8391b85e Michael Chan        2017-02-06  154  	pdev = bp->pdev;
c6d30e8391b85e Michael Chan        2017-02-06  155  	offset = bp->rx_offset;
c6d30e8391b85e Michael Chan        2017-02-06  156  
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  157  	txr = rxr->bnapi->tx_ring;
43b5169d8355cc Lorenzo Bianconi    2020-12-22  158  	/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
c6d30e8391b85e Michael Chan        2017-02-06  159  	orig_data = xdp.data;
c6d30e8391b85e Michael Chan        2017-02-06  160  
c6d30e8391b85e Michael Chan        2017-02-06  161  	act = bpf_prog_run_xdp(xdp_prog, &xdp);
c6d30e8391b85e Michael Chan        2017-02-06  162  
38413406277fd0 Michael Chan        2017-02-06  163  	tx_avail = bnxt_tx_avail(bp, txr);
38413406277fd0 Michael Chan        2017-02-06  164  	/* If the tx ring is not full, we must not update the rx producer yet
38413406277fd0 Michael Chan        2017-02-06  165  	 * because we may still be transmitting on some BDs.
38413406277fd0 Michael Chan        2017-02-06  166  	 */
38413406277fd0 Michael Chan        2017-02-06  167  	if (tx_avail != bp->tx_ring_size)
38413406277fd0 Michael Chan        2017-02-06  168  		*event &= ~BNXT_RX_EVENT;
38413406277fd0 Michael Chan        2017-02-06  169  
b968e735c79767 Nikita V. Shirokov  2018-04-17  170  	*len = xdp.data_end - xdp.data;
a00a69739b9d83 Andy Gospodarek     2022-03-20  171  	if (orig_data != xdp.data)
c6d30e8391b85e Michael Chan        2017-02-06  172  		offset = xdp.data - xdp.data_hard_start;
a00a69739b9d83 Andy Gospodarek     2022-03-20  173  
c6d30e8391b85e Michael Chan        2017-02-06  174  	switch (act) {
c6d30e8391b85e Michael Chan        2017-02-06  175  	case XDP_PASS:
c6d30e8391b85e Michael Chan        2017-02-06  176  		return false;
c6d30e8391b85e Michael Chan        2017-02-06  177  
38413406277fd0 Michael Chan        2017-02-06  178  	case XDP_TX:
a00a69739b9d83 Andy Gospodarek     2022-03-20  179  		rx_buf = &rxr->rx_buf_ring[cons];
a00a69739b9d83 Andy Gospodarek     2022-03-20  180  		mapping = rx_buf->mapping - bp->rx_dma_offset;
a00a69739b9d83 Andy Gospodarek     2022-03-20  181  
932dbf83ba18bd Michael Chan        2017-04-04  182  		if (tx_avail < 1) {
38413406277fd0 Michael Chan        2017-02-06  183  			trace_xdp_exception(bp->dev, xdp_prog, act);
38413406277fd0 Michael Chan        2017-02-06  184  			bnxt_reuse_rx_data(rxr, cons, page);
38413406277fd0 Michael Chan        2017-02-06  185  			return true;
38413406277fd0 Michael Chan        2017-02-06  186  		}
38413406277fd0 Michael Chan        2017-02-06  187  
38413406277fd0 Michael Chan        2017-02-06  188  		*event = BNXT_TX_EVENT;
38413406277fd0 Michael Chan        2017-02-06  189  		dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
38413406277fd0 Michael Chan        2017-02-06  190  					   bp->rx_dir);
52c0609258658f Andy Gospodarek     2019-07-08  191  		__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
38413406277fd0 Michael Chan        2017-02-06  192  				NEXT_RX(rxr->rx_prod));
38413406277fd0 Michael Chan        2017-02-06  193  		bnxt_reuse_rx_data(rxr, cons, page);
38413406277fd0 Michael Chan        2017-02-06  194  		return true;
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  195  	case XDP_REDIRECT:
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  196  		/* if we are calling this here then we know that the
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  197  		 * redirect is coming from a frame received by the
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  198  		 * bnxt_en driver.
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  199  		 */
f18c2b77b2e4ee Andy Gospodarek     2019-07-08 @200  		dma_unmap_page_attrs(&pdev->dev, mapping,
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  201  				     PAGE_SIZE, bp->rx_dir,
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  202  				     DMA_ATTR_WEAK_ORDERING);
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  203  
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  204  		/* if we are unable to allocate a new buffer, abort and reuse */
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  205  		if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  206  			trace_xdp_exception(bp->dev, xdp_prog, act);
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  207  			bnxt_reuse_rx_data(rxr, cons, page);
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  208  			return true;
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  209  		}
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  210  
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  211  		if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  212  			trace_xdp_exception(bp->dev, xdp_prog, act);
322b87ca55f2f3 Andy Gospodarek     2019-07-08  213  			page_pool_recycle_direct(rxr->page_pool, page);
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  214  			return true;
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  215  		}
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  216  
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  217  		*event |= BNXT_REDIRECT_EVENT;
f18c2b77b2e4ee Andy Gospodarek     2019-07-08  218  		break;
c6d30e8391b85e Michael Chan        2017-02-06  219  	default:
c8064e5b4adac5 Paolo Abeni         2021-11-30  220  		bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
df561f6688fef7 Gustavo A. R. Silva 2020-08-23  221  		fallthrough;
c6d30e8391b85e Michael Chan        2017-02-06  222  	case XDP_ABORTED:
c6d30e8391b85e Michael Chan        2017-02-06  223  		trace_xdp_exception(bp->dev, xdp_prog, act);
df561f6688fef7 Gustavo A. R. Silva 2020-08-23  224  		fallthrough;
c6d30e8391b85e Michael Chan        2017-02-06  225  	case XDP_DROP:
c6d30e8391b85e Michael Chan        2017-02-06  226  		bnxt_reuse_rx_data(rxr, cons, page);
c6d30e8391b85e Michael Chan        2017-02-06  227  		break;
c6d30e8391b85e Michael Chan        2017-02-06  228  	}
c6d30e8391b85e Michael Chan        2017-02-06  229  	return true;
c6d30e8391b85e Michael Chan        2017-02-06  230  }
c6d30e8391b85e Michael Chan        2017-02-06  231  

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers
  2022-03-20  6:57 ` [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers Michael Chan
@ 2022-03-20 10:47   ` kernel test robot
  0 siblings, 0 replies; 14+ messages in thread
From: kernel test robot @ 2022-03-20 10:47 UTC (permalink / raw)
  To: Michael Chan, davem; +Cc: llvm, kbuild-all, netdev, kuba, gospo

Hi Michael,

I love your patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Michael-Chan/bnxt-Support-XDP-multi-buffer/20220320-150017
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 092d992b76ed9d06389af0bc5efd5279d7b1ed9f
config: i386-randconfig-a013 (https://download.01.org/0day-ci/archive/20220320/202203201851.iT6RBOWK-lkp@intel.com/config)
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project 85e9b2687a13d1908aa86d1b89c5ce398a06cd39)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/01029de5d079c1271b0cdd6f64a6ee7132b1872f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Michael-Chan/bnxt-Support-XDP-multi-buffer/20220320-150017
        git checkout 01029de5d079c1271b0cdd6f64a6ee7132b1872f
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/net/ethernet/broadcom/bnxt/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:203:6: warning: variable 'shinfo' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
           if (xdp)
               ^~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:206:18: note: uninitialized use occurs here
           for (i = 0; i < shinfo->nr_frags; i++) {
                           ^~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:203:2: note: remove the 'if' if its condition is always true
           if (xdp)
           ^~~~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:200:32: note: initialize the variable 'shinfo' to silence this warning
           struct skb_shared_info *shinfo;
                                         ^
                                          = NULL
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:291:36: warning: variable 'mapping' is uninitialized when used here [-Wuninitialized]
                   dma_unmap_page_attrs(&pdev->dev, mapping,
                                                    ^~~~~~~
   drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c:225:20: note: initialize the variable 'mapping' to silence this warning
           dma_addr_t mapping;
                             ^
                              = 0
   2 warnings generated.


vim +203 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c

   196	
   197	void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
   198				      struct xdp_buff *xdp)
   199	{
   200		struct skb_shared_info *shinfo;
   201		int i;
   202	
 > 203		if (xdp)
   204			shinfo = xdp_get_shared_info_from_buff(xdp);
   205	
   206		for (i = 0; i < shinfo->nr_frags; i++) {
   207			struct page *page = skb_frag_page(&shinfo->frags[i]);
   208	
   209			page_pool_recycle_direct(rxr->page_pool, page);
   210		}
   211		shinfo->nr_frags = 0;
   212	}
   213	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2022-03-20 10:48 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-20  6:57 [PATCH net-next 00/11] bnxt: Support XDP multi buffer Michael Chan
2022-03-20  6:57 ` [PATCH net-next 01/11] bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Michael Chan
2022-03-20  9:47   ` kernel test robot
2022-03-20  6:57 ` [PATCH net-next 02/11] bnxt: add flag to denote that an xdp program is currently attached Michael Chan
2022-03-20  6:57 ` [PATCH net-next 03/11] bnxt: refactor bnxt_rx_pages operate on skb_shared_info Michael Chan
2022-03-20  6:57 ` [PATCH net-next 04/11] bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb Michael Chan
2022-03-20  6:57 ` [PATCH net-next 05/11] bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp Michael Chan
2022-03-20  6:57 ` [PATCH net-next 06/11] bnxt: set xdp_buff pfmemalloc flag if needed Michael Chan
2022-03-20  6:57 ` [PATCH net-next 07/11] bnxt: change receive ring space parameters Michael Chan
2022-03-20  6:57 ` [PATCH net-next 08/11] bnxt: add page_pool support for aggregation ring when using xdp Michael Chan
2022-03-20  6:57 ` [PATCH net-next 09/11] bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff Michael Chan
2022-03-20  6:57 ` [PATCH net-next 10/11] bnxt: support transmit and free of aggregation buffers Michael Chan
2022-03-20 10:47   ` kernel test robot
2022-03-20  6:57 ` [PATCH net-next 11/11] bnxt: XDP multibuffer enablement Michael Chan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).