netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v2] octeontx2-pf: Add support for page pool
@ 2023-05-18  5:51 Ratheesh Kannoth
  2023-05-19  1:41 ` Yunsheng Lin
  0 siblings, 1 reply; 6+ messages in thread
From: Ratheesh Kannoth @ 2023-05-18  5:51 UTC (permalink / raw)
  To: netdev, linux-kernel
  Cc: sgoutham, davem, edumazet, kuba, pabeni, sbhatta, gakula,
	schalla, hkelam, Ratheesh Kannoth

Page pool for each rx queue enhance rx side performance
by reclaiming buffers back to each queue specific pool. DMA
mapping is done only for first allocation of buffers.
As subsequent buffers allocation avoid DMA mapping,
it results in performance improvement.

Image        |  Performance with Linux kernel Packet Generator
------------ | -----------------------------------------------
Vannila      |   3Mpps
             |
with this    |   42Mpps
change	     |
-------------------------------------------------------------

Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
---

ChangeLog
v1 -> v2:
 * Removed GFP_DMA flag
 * Returned correct err value

v0 -> v1:
 * Removed CONFIG_PAGE_POOL #ifdefs in code
 * Used compound page APIs
 * Replaced page_pool_put_page API with page_pool_put_full_page API
---
 .../net/ethernet/marvell/octeontx2/Kconfig    |  1 +
 .../marvell/octeontx2/nic/otx2_common.c       | 75 ++++++++++++++++---
 .../marvell/octeontx2/nic/otx2_common.h       |  6 +-
 .../ethernet/marvell/octeontx2/nic/otx2_pf.c  | 11 ++-
 .../marvell/octeontx2/nic/otx2_txrx.c         | 19 +++--
 .../marvell/octeontx2/nic/otx2_txrx.h         |  1 +
 .../ethernet/marvell/octeontx2/nic/qos_sq.c   |  2 +-
 7 files changed, 93 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 993ac180a5db..a32d85d6f599 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -32,6 +32,7 @@ config OCTEONTX2_PF
 	tristate "Marvell OcteonTX2 NIC Physical Function driver"
 	select OCTEONTX2_MBOX
 	select NET_DEVLINK
+	select PAGE_POOL
 	depends on (64BIT && COMPILE_TEST) || ARM64
 	select DIMLIB
 	depends on PCI
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f9286648e45c..60476fd413e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -518,11 +518,32 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
 		     (pfvf->hw.cq_ecount_wait - 1));
 }
 
+static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+			       dma_addr_t *dma)
+{
+	unsigned int offset = 0;
+	struct page *page;
+	size_t sz;
+
+	sz = SKB_DATA_ALIGN(pool->rbsize);
+	sz = ALIGN(sz, OTX2_ALIGN);
+
+	page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC);
+	if (unlikely(!page))
+		return -ENOMEM;
+
+	*dma = page_pool_get_dma_addr(page) + offset;
+	return 0;
+}
+
 static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
 			     dma_addr_t *dma)
 {
 	u8 *buf;
 
+	if (pool->page_pool)
+		return otx2_alloc_pool_buf(pfvf, pool, dma);
+
 	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
 	if (unlikely(!buf))
 		return -ENOMEM;
@@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
 	}
 }
 
+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+		    u64 iova, int size)
+{
+	u64 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+	struct page *page = virt_to_head_page(phys_to_virt(pa));
+
+	if (pool->page_pool) {
+		page_pool_put_full_page(pool->page_pool, page, true);
+	} else {
+		dma_unmap_page_attrs(pfvf->dev, iova, size,
+				     DMA_FROM_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+
+		put_page(page);
+	}
+}
+
 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
 {
 	int pool_id, pool_start = 0, pool_end = 0, size = 0;
-	u64 iova, pa;
+	struct otx2_pool *pool;
+	u64 iova;
 
 	if (type == AURA_NIX_SQ) {
 		pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1224,15 +1263,13 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
 	/* Free SQB and RQB pointers from the aura pool */
 	for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
 		iova = otx2_aura_allocptr(pfvf, pool_id);
+		pool = &pfvf->qset.pool[pool_id];
 		while (iova) {
 			if (type == AURA_NIX_RQ)
 				iova -= OTX2_HEAD_ROOM;
 
-			pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
-			dma_unmap_page_attrs(pfvf->dev, iova, size,
-					     DMA_FROM_DEVICE,
-					     DMA_ATTR_SKIP_CPU_SYNC);
-			put_page(virt_to_page(phys_to_virt(pa)));
+			otx2_free_bufs(pfvf, pool, iova, size);
+
 			iova = otx2_aura_allocptr(pfvf, pool_id);
 		}
 	}
@@ -1250,6 +1287,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
 		pool = &pfvf->qset.pool[pool_id];
 		qmem_free(pfvf->dev, pool->stack);
 		qmem_free(pfvf->dev, pool->fc_addr);
+		page_pool_destroy(pool->page_pool);
+		pool->page_pool = NULL;
 	}
 	devm_kfree(pfvf->dev, pfvf->qset.pool);
 	pfvf->qset.pool = NULL;
@@ -1333,8 +1372,9 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
 }
 
 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
-		   int stack_pages, int numptrs, int buf_size)
+		   int stack_pages, int numptrs, int buf_size, int type)
 {
+	struct page_pool_params pp_params = { 0 };
 	struct npa_aq_enq_req *aq;
 	struct otx2_pool *pool;
 	int err;
@@ -1378,6 +1418,22 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
 	aq->ctype = NPA_AQ_CTYPE_POOL;
 	aq->op = NPA_AQ_INSTOP_INIT;
 
+	if (type != AURA_NIX_RQ) {
+		pool->page_pool = NULL;
+		return 0;
+	}
+
+	pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+	pp_params.pool_size = numptrs;
+	pp_params.nid = NUMA_NO_NODE;
+	pp_params.dev = pfvf->dev;
+	pp_params.dma_dir = DMA_FROM_DEVICE;
+	pool->page_pool = page_pool_create(&pp_params);
+	if (IS_ERR(pool->page_pool)) {
+		netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+		return PTR_ERR(pool->page_pool);
+	}
+
 	return 0;
 }
 
@@ -1412,7 +1468,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
 
 		/* Initialize pool context */
 		err = otx2_pool_init(pfvf, pool_id, stack_pages,
-				     num_sqbs, hw->sqb_size);
+				     num_sqbs, hw->sqb_size, AURA_NIX_SQ);
 		if (err)
 			goto fail;
 	}
@@ -1475,7 +1531,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
 	}
 	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
 		err = otx2_pool_init(pfvf, pool_id, stack_pages,
-				     num_ptrs, pfvf->rbsize);
+				     num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
 		if (err)
 			goto fail;
 	}
@@ -1659,7 +1715,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
 	req->bpid_per_chan = 0;
 #endif
 
-
 	return otx2_sync_mbox_msg(&pfvf->mbox);
 }
 EXPORT_SYMBOL(otx2_nix_config_bp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b2267c8bec37..a9ed15d1793a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -976,7 +976,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
-void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
@@ -984,7 +984,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
 		      dma_addr_t *dma);
 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
-		   int stack_pages, int numptrs, int buf_size);
+		   int stack_pages, int numptrs, int buf_size, int type);
 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
 		   int pool_id, int numptrs);
 
@@ -1054,6 +1054,8 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
 int otx2_handle_ntuple_tc_features(struct net_device *netdev,
 				   netdev_features_t features);
 int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
+		    u64 iova, int size);
 
 /* tc support */
 int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1883c3edda3..db3fcab1c8cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1555,7 +1555,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
 	struct nix_lf_free_req *free_req;
 	struct mbox *mbox = &pf->mbox;
 	struct otx2_cq_queue *cq;
+	struct otx2_pool *pool;
 	struct msg_req *req;
+	int pool_id;
 	int qidx;
 
 	/* Ensure all SQE are processed */
@@ -1584,7 +1586,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
 		cq = &qset->cq[qidx];
 		if (cq->cq_type == CQ_RX)
-			otx2_cleanup_rx_cqes(pf, cq);
+			otx2_cleanup_rx_cqes(pf, cq, qidx);
 		else
 			otx2_cleanup_tx_cqes(pf, cq);
 	}
@@ -1594,6 +1596,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
 	/* Free RQ buffer pointers*/
 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
 
+	for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
+		pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
+		pool = &pf->qset.pool[pool_id];
+		page_pool_destroy(pool->page_pool);
+		pool->page_pool = NULL;
+	}
+
 	otx2_free_cq_res(pf);
 
 	/* Free all ingress bandwidth profiles allocated */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e288f46b23a8..37d4e4b73816 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -217,9 +217,6 @@ static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 				va - page_address(page) + off,
 				len - off, pfvf->rbsize);
-
-		otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
-				    pfvf->rbsize, DMA_FROM_DEVICE);
 		return true;
 	}
 
@@ -382,6 +379,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
 	if (pfvf->netdev->features & NETIF_F_RXCSUM)
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+	skb_mark_for_recycle(skb);
+
 	napi_gro_frags(napi);
 }
 
@@ -1186,11 +1185,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
 }
 EXPORT_SYMBOL(otx2_sq_append_skb);
 
-void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
 {
 	struct nix_cqe_rx_s *cqe;
 	int processed_cqe = 0;
-	u64 iova, pa;
+	struct otx2_pool *pool;
+	u16 pool_id;
+	u64 iova;
 
 	if (pfvf->xdp_prog)
 		xdp_rxq_info_unreg(&cq->xdp_rxq);
@@ -1198,6 +1199,9 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
 		return;
 
+	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+	pool = &pfvf->qset.pool[pool_id];
+
 	while (cq->pend_cqe) {
 		cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
 		processed_cqe++;
@@ -1210,9 +1214,8 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
 			continue;
 		}
 		iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
-		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
-		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
-		put_page(virt_to_page(phys_to_virt(pa)));
+
+		otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
 	}
 
 	/* Free CQEs to HW */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 7ab6db9a986f..b5d689eeff80 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -118,6 +118,7 @@ struct otx2_cq_poll {
 struct otx2_pool {
 	struct qmem		*stack;
 	struct qmem		*fc_addr;
+	struct page_pool	*page_pool;
 	u16			rbsize;
 };
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index d96ed29c1567..9d887bfc3108 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -63,7 +63,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
 
 	/* Initialize pool context */
 	err = otx2_pool_init(pfvf, pool_id, stack_pages,
-			     num_sqbs, hw->sqb_size);
+			     num_sqbs, hw->sqb_size, AURA_NIX_SQ);
 	if (err)
 		goto aura_free;
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
  2023-05-18  5:51 [PATCH net-next v2] octeontx2-pf: Add support for page pool Ratheesh Kannoth
@ 2023-05-19  1:41 ` Yunsheng Lin
  2023-05-19  1:52   ` Ratheesh Kannoth
  0 siblings, 1 reply; 6+ messages in thread
From: Yunsheng Lin @ 2023-05-19  1:41 UTC (permalink / raw)
  To: Ratheesh Kannoth, netdev, linux-kernel
  Cc: sgoutham, davem, edumazet, kuba, pabeni, sbhatta, gakula,
	schalla, hkelam

On 2023/5/18 13:51, Ratheesh Kannoth wrote:
> Page pool for each rx queue enhance rx side performance
> by reclaiming buffers back to each queue specific pool. DMA
> mapping is done only for first allocation of buffers.
> As subsequent buffers allocation avoid DMA mapping,
> it results in performance improvement.
> 
> Image        |  Performance with Linux kernel Packet Generator

Is there any more detailed info for the performance data?
'kernel Packet Generator' means using pktgen module in the
net/core/pktgen.c? it seems pktgen is more for tx, is there
any abvious reason why the page pool optimization for rx have
brought about ten times improvement?

> ------------ | -----------------------------------------------
> Vannila      |   3Mpps
>              |
> with this    |   42Mpps
> change	     |
> -------------------------------------------------------------
> 

...

>  static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
>  			     dma_addr_t *dma)
>  {
>  	u8 *buf;
>  
> +	if (pool->page_pool)
> +		return otx2_alloc_pool_buf(pfvf, pool, dma);
> +
>  	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
>  	if (unlikely(!buf))
>  		return -ENOMEM;

It seems the above is dead code when using 'select PAGE_POOL', as
PAGE_POOL config is always selected by the driver?

> @@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
>  	}
>  }
>  

...

> @@ -1659,7 +1715,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
>  	req->bpid_per_chan = 0;
>  #endif
>  
> -

Nit: unrelated change here.

>  	return otx2_sync_mbox_msg(&pfvf->mbox);
>  }

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE:  Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
  2023-05-19  1:41 ` Yunsheng Lin
@ 2023-05-19  1:52   ` Ratheesh Kannoth
  2023-05-19  2:37     ` Yunsheng Lin
  0 siblings, 1 reply; 6+ messages in thread
From: Ratheesh Kannoth @ 2023-05-19  1:52 UTC (permalink / raw)
  To: Yunsheng Lin, netdev, linux-kernel
  Cc: Sunil Kovvuri Goutham, davem, edumazet, kuba, pabeni,
	Subbaraya Sundeep Bhatta, Geethasowjanya Akula, Srujana Challa,
	Hariprasad Kelam


> -----Original Message-----
> From: Yunsheng Lin <linyunsheng@huawei.com>
> Sent: Friday, May 19, 2023 7:12 AM
> To: Ratheesh Kannoth <rkannoth@marvell.com>; netdev@vger.kernel.org;
> linux-kernel@vger.kernel.org
> Cc: Sunil Kovvuri Goutham <sgoutham@marvell.com>;
> davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; Subbaraya Sundeep Bhatta <sbhatta@marvell.com>;
> Geethasowjanya Akula <gakula@marvell.com>; Srujana Challa
> <schalla@marvell.com>; Hariprasad Kelam <hkelam@marvell.com>
> Subject: [EXT] Re: [PATCH net-next v2] octeontx2-pf: Add support for page
> pool
> 
> External Email
> 
> ----------------------------------------------------------------------
> On 2023/5/18 13:51, Ratheesh Kannoth wrote:
> > Page pool for each rx queue enhance rx side performance by reclaiming
> > buffers back to each queue specific pool. DMA mapping is done only for
> > first allocation of buffers.
> > As subsequent buffers allocation avoid DMA mapping, it results in
> > performance improvement.
> >
> > Image        |  Performance with Linux kernel Packet Generator
> 
> Is there any more detailed info for the performance data?
> 'kernel Packet Generator' means using pktgen module in the
> net/core/pktgen.c? it seems pktgen is more for tx, is there any abvious
> reason why the page pool optimization for rx have brought about ten times
> improvement?
We used packet generator for TX machine.  Performance data is for RX DUT.  I will remove 
Packet generator text from the commit message as it gives ambiguous information
DUT  Rx     <-------------------------     TX  (Linux machine with packet generator)
 (page pool support) 

> 
> > ------------ | -----------------------------------------------
> > Vannila      |   3Mpps
> >              |
> > with this    |   42Mpps
> > change	     |
> > -------------------------------------------------------------
> >
> 
> ...
> 
> >  static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
> >  			     dma_addr_t *dma)
> >  {
> >  	u8 *buf;
> >
> > +	if (pool->page_pool)
> > +		return otx2_alloc_pool_buf(pfvf, pool, dma);
> > +
> >  	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
> >  	if (unlikely(!buf))
> >  		return -ENOMEM;
> 
> It seems the above is dead code when using 'select PAGE_POOL', as
> PAGE_POOL config is always selected by the driver?
_otx2_alloc_rbuf() is common code for RX and TX.  For RX,  pool->page_pool != NULL, so allocation is from page pool.


> > @@ -1205,10 +1226,28 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
> >  	}
> >  }
> >
> 
> ...
> 
> > @@ -1659,7 +1715,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf,
> bool enable)
> >  	req->bpid_per_chan = 0;
> >  #endif
> >
> > -
> 
> Nit: unrelated change here.
Sorry, This caused due to vim script;  will remove it.  

> >  	return otx2_sync_mbox_msg(&pfvf->mbox);  }

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
  2023-05-19  1:52   ` Ratheesh Kannoth
@ 2023-05-19  2:37     ` Yunsheng Lin
  2023-05-19  5:19       ` [EXT] " Sunil Kovvuri Goutham
  0 siblings, 1 reply; 6+ messages in thread
From: Yunsheng Lin @ 2023-05-19  2:37 UTC (permalink / raw)
  To: Ratheesh Kannoth, netdev, linux-kernel
  Cc: Sunil Kovvuri Goutham, davem, edumazet, kuba, pabeni,
	Subbaraya Sundeep Bhatta, Geethasowjanya Akula, Srujana Challa,
	Hariprasad Kelam

On 2023/5/19 9:52, Ratheesh Kannoth wrote:
>> ----------------------------------------------------------------------
>> On 2023/5/18 13:51, Ratheesh Kannoth wrote:
>>> Page pool for each rx queue enhance rx side performance by reclaiming
>>> buffers back to each queue specific pool. DMA mapping is done only for
>>> first allocation of buffers.
>>> As subsequent buffers allocation avoid DMA mapping, it results in
>>> performance improvement.
>>>
>>> Image        |  Performance with Linux kernel Packet Generator
>>
>> Is there any more detailed info for the performance data?
>> 'kernel Packet Generator' means using pktgen module in the
>> net/core/pktgen.c? it seems pktgen is more for tx, is there any abvious
>> reason why the page pool optimization for rx have brought about ten times
>> improvement?
> We used packet generator for TX machine.  Performance data is for RX DUT.  I will remove 
> Packet generator text from the commit message as it gives ambiguous information
> DUT  Rx     <-------------------------     TX  (Linux machine with packet generator)
>  (page pool support) 

Thanks for clarifying.
DUT is for 'Device Under Test'?
what does DUT do after it receive a packet? XDP DROP?

> 
>>
>>> ------------ | -----------------------------------------------
>>> Vannila      |   3Mpps
>>>              |
>>> with this    |   42Mpps
>>> change	     |
>>> -------------------------------------------------------------
>>>
>>
>> ...
>>
>>>  static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
>>>  			     dma_addr_t *dma)
>>>  {
>>>  	u8 *buf;
>>>
>>> +	if (pool->page_pool)
>>> +		return otx2_alloc_pool_buf(pfvf, pool, dma);
>>> +
>>>  	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
>>>  	if (unlikely(!buf))
>>>  		return -ENOMEM;
>>
>> It seems the above is dead code when using 'select PAGE_POOL', as
>> PAGE_POOL config is always selected by the driver?
> _otx2_alloc_rbuf() is common code for RX and TX.  For RX,  pool->page_pool != NULL, so allocation is from page pool.
> 

Am I missing something here? 'buf' is dma-mapped with
DMA_FROM_DEVICE, can it be used for TX?

Also, what does 'r' in _otx2_alloc_rbuf() mean?



^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [EXT] Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
  2023-05-19  2:37     ` Yunsheng Lin
@ 2023-05-19  5:19       ` Sunil Kovvuri Goutham
  2023-05-19  5:30         ` Ratheesh Kannoth
  0 siblings, 1 reply; 6+ messages in thread
From: Sunil Kovvuri Goutham @ 2023-05-19  5:19 UTC (permalink / raw)
  To: Yunsheng Lin, Ratheesh Kannoth, netdev, linux-kernel
  Cc: davem, edumazet, kuba, pabeni, Subbaraya Sundeep Bhatta,
	Geethasowjanya Akula, Srujana Challa, Hariprasad Kelam



> -----Original Message-----
> From: Yunsheng Lin <linyunsheng@huawei.com>
> Sent: Friday, May 19, 2023 8:07 AM
> To: Ratheesh Kannoth <rkannoth@marvell.com>; netdev@vger.kernel.org;
> linux-kernel@vger.kernel.org
> Cc: Sunil Kovvuri Goutham <sgoutham@marvell.com>; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Subbaraya
> Sundeep Bhatta <sbhatta@marvell.com>; Geethasowjanya Akula
> <gakula@marvell.com>; Srujana Challa <schalla@marvell.com>; Hariprasad
> Kelam <hkelam@marvell.com>
> Subject: [EXT] Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
> 
> External Email
> 
> ----------------------------------------------------------------------
> On 2023/5/19 9:52, Ratheesh Kannoth wrote:
> >> ---------------------------------------------------------------------
> >> - On 2023/5/18 13:51, Ratheesh Kannoth wrote:
> >>> Page pool for each rx queue enhance rx side performance by
> >>> reclaiming buffers back to each queue specific pool. DMA mapping is
> >>> done only for first allocation of buffers.
> >>> As subsequent buffers allocation avoid DMA mapping, it results in
> >>> performance improvement.
> >>>
> >>> Image        |  Performance with Linux kernel Packet Generator
> >>
> >> Is there any more detailed info for the performance data?
> >> 'kernel Packet Generator' means using pktgen module in the
> >> net/core/pktgen.c? it seems pktgen is more for tx, is there any
> >> abvious reason why the page pool optimization for rx have brought
> >> about ten times improvement?
> > We used packet generator for TX machine.  Performance data is for RX
> > DUT.  I will remove Packet generator text from the commit message as it gives
> ambiguous information
> > DUT  Rx     <-------------------------     TX  (Linux machine with packet generator)
> >  (page pool support)
> 
> Thanks for clarifying.
> DUT is for 'Device Under Test'?
> what does DUT do after it receive a packet? XDP DROP?
> 
> >
> >>
> >>> ------------ | -----------------------------------------------
> >>> Vannila      |   3Mpps
> >>>              |
> >>> with this    |   42Mpps
> >>> change	     |
> >>> -------------------------------------------------------------
> >>>
> >>
> >> ...
> >>
> >>>  static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
> >>>  			     dma_addr_t *dma)
> >>>  {
> >>>  	u8 *buf;
> >>>
> >>> +	if (pool->page_pool)
> >>> +		return otx2_alloc_pool_buf(pfvf, pool, dma);
> >>> +
> >>>  	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
> >>>  	if (unlikely(!buf))
> >>>  		return -ENOMEM;
> >>
> >> It seems the above is dead code when using 'select PAGE_POOL', as
> >> PAGE_POOL config is always selected by the driver?
> > _otx2_alloc_rbuf() is common code for RX and TX.  For RX,  pool->page_pool
> != NULL, so allocation is from page pool.
> >
> 
> Am I missing something here? 'buf' is dma-mapped with DMA_FROM_DEVICE,
> can it be used for TX?
> 
> Also, what does 'r' in _otx2_alloc_rbuf() mean?
> 

HW takes care of cache coherency between device and CPU, hence DMA_ATTR_SKIP_CPU_SYNC
was used. Direction of DMA doesn't matter here. Hence instead of duplicating the same API
' otx2_alloc_rbuf' was used for both Rx and Tx. 'r' stands for receive.

Thanks,
Sunil.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE:  Re: [PATCH net-next v2] octeontx2-pf: Add support for page pool
  2023-05-19  5:19       ` [EXT] " Sunil Kovvuri Goutham
@ 2023-05-19  5:30         ` Ratheesh Kannoth
  0 siblings, 0 replies; 6+ messages in thread
From: Ratheesh Kannoth @ 2023-05-19  5:30 UTC (permalink / raw)
  To: Sunil Kovvuri Goutham, Yunsheng Lin, netdev, linux-kernel
  Cc: davem, edumazet, kuba, pabeni, Subbaraya Sundeep Bhatta,
	Geethasowjanya Akula, Srujana Challa, Hariprasad Kelam

> -----Original Message-----
> From: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> Sent: Friday, May 19, 2023 10:50 AM
> To: Yunsheng Lin <linyunsheng@huawei.com>; Ratheesh Kannoth
> <rkannoth@marvell.com>; netdev@vger.kernel.org; linux-
> kernel@vger.kernel.org
> Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; Subbaraya Sundeep Bhatta <sbhatta@marvell.com>;
> Geethasowjanya Akula <gakula@marvell.com>; Srujana Challa
> <schalla@marvell.com>; Hariprasad Kelam <hkelam@marvell.com>
> Subject: RE: [EXT] Re: [PATCH net-next v2] octeontx2-pf: Add support for
> page pool
> 
> >
> > ----------------------------------------------------------------------
> > On 2023/5/19 9:52, Ratheesh Kannoth wrote:
> > >> -------------------------------------------------------------------
> > >> --
> > >> - On 2023/5/18 13:51, Ratheesh Kannoth wrote:
> > >>> Page pool for each rx queue enhance rx side performance by
> > >>> reclaiming buffers back to each queue specific pool. DMA mapping
> > >>> is done only for first allocation of buffers.
> > >>> As subsequent buffers allocation avoid DMA mapping, it results in
> > >>> performance improvement.
> > >>>
> > >>> Image        |  Performance with Linux kernel Packet Generator
> > >>
> > >> Is there any more detailed info for the performance data?
> > >> 'kernel Packet Generator' means using pktgen module in the
> > >> net/core/pktgen.c? it seems pktgen is more for tx, is there any
> > >> abvious reason why the page pool optimization for rx have brought
> > >> about ten times improvement?
> > > We used packet generator for TX machine.  Performance data is for RX
> > > DUT.  I will remove Packet generator text from the commit message as
> > > it gives
> > ambiguous information
> > > DUT  Rx     <-------------------------     TX  (Linux machine with packet
> generator)
> > >  (page pool support)
> >
> > Thanks for clarifying.
> > DUT is for 'Device Under Test'?
Yes

> > what does DUT do after it receive a packet? XDP DROP?
We did not use any XDP programs to drop the packets.  Stack drops them as there are no listeners for these packets. 


> > >
> > >>
> > >>> ------------ | -----------------------------------------------
> > >>> Vannila      |   3Mpps
> > >>>              |
> > >>> with this    |   42Mpps
> > >>> change	     |
> > >>> -------------------------------------------------------------
> > >>>
> > >>
> > >> ...
> > >>
> > >>>  static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool
> *pool,
> > >>>  			     dma_addr_t *dma)
> > >>>  {
> > >>>  	u8 *buf;
> > >>>
> > >>> +	if (pool->page_pool)
> > >>> +		return otx2_alloc_pool_buf(pfvf, pool, dma);
> > >>> +
> > >>>  	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
> > >>>  	if (unlikely(!buf))
> > >>>  		return -ENOMEM;
> > >>
> > >> It seems the above is dead code when using 'select PAGE_POOL', as
> > >> PAGE_POOL config is always selected by the driver?
> > > _otx2_alloc_rbuf() is common code for RX and TX.  For RX,
> > > pool->page_pool
> > != NULL, so allocation is from page pool.
> > >
> >
> > Am I missing something here? 'buf' is dma-mapped with
> DMA_FROM_DEVICE,
> > can it be used for TX?
> >
> > Also, what does 'r' in _otx2_alloc_rbuf() mean?
> >
> 
> HW takes care of cache coherency between device and CPU, hence
> DMA_ATTR_SKIP_CPU_SYNC was used. Direction of DMA doesn't matter
> here. Hence instead of duplicating the same API ' otx2_alloc_rbuf' was used
> for both Rx and Tx. 'r' stands for receive.
> 
> Thanks,
> Sunil.

-Ratheesh

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-05-19  5:31 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-18  5:51 [PATCH net-next v2] octeontx2-pf: Add support for page pool Ratheesh Kannoth
2023-05-19  1:41 ` Yunsheng Lin
2023-05-19  1:52   ` Ratheesh Kannoth
2023-05-19  2:37     ` Yunsheng Lin
2023-05-19  5:19       ` [EXT] " Sunil Kovvuri Goutham
2023-05-19  5:30         ` Ratheesh Kannoth

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).