From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jesper Dangaard Brouer Subject: [net-next PATCH RFC 2/8] net: mvneta: use page pool API for sw buffer manager Date: Fri, 07 Dec 2018 00:25:37 +0100 Message-ID: <154413873712.21735.9487067271289391052.stgit@firesoul> References: <154413868810.21735.572808840657728172.stgit@firesoul> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Cc: Toke =?utf-8?q?H=C3=B8iland-J=C3=B8rgensen?= , ard.biesheuvel@linaro.org, Jason Wang , ilias.apalodimas@linaro.org, =?utf-8?b?QmrDtnJu?= =?utf-8?b?VMO2cGVs?= , w@1wt.eu, Saeed Mahameed , mykyta.iziumtsev@gmail.com, Daniel Borkmann , Alexei Starovoitov , Tariq Toukan To: netdev@vger.kernel.org, "David S. Miller" , Jesper Dangaard Brouer Return-path: Received: from mx1.redhat.com ([209.132.183.28]:58946 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726134AbeLFXZk (ORCPT ); Thu, 6 Dec 2018 18:25:40 -0500 In-Reply-To: <154413868810.21735.572808840657728172.stgit@firesoul> Sender: netdev-owner@vger.kernel.org List-ID: From: Ilias Apalodimas Use the page_pool api for allocations and DMA handling instead of __dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page(). The page_pool API offers buffer recycling capabilities for XDP but allocates one page per packet, unless the driver splits and manages the allocated page. Although XDP is not a part of the driver yet, the current implementation is allocating one page per packet, thus there's no performance penalty from using the API. For now pages are unmapped via page_pool_unmap_page() before packets travel into the network stack, as it doesn't have a return hook yet. Given this call cleared the page_pool state, it is safe to let the page be returned to the normal page allocator. Signed-off-by: Ilias Apalodimas Signed-off-by: Jesper Dangaard Brouer --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvneta.c | 56 ++++++++++++++++++++++++--------- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 3238aa7f5dac..3325abe67465 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -60,6 +60,7 @@ config MVNETA depends on ARCH_MVEBU || COMPILE_TEST select MVMDIO select PHYLINK + select PAGE_POOL ---help--- This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5bfd349bf41a..2354421fe96f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -33,6 +33,7 @@ #include #include #include "mvneta_bm.h" +#include #include #include #include @@ -624,6 +625,9 @@ struct mvneta_rx_queue { struct sk_buff *skb; int left_size; + /* page pool */ + struct page_pool *page_pool; + /* error counters */ u32 skb_alloc_err; u32 refill_err; @@ -1813,17 +1817,11 @@ static int mvneta_rx_refill(struct mvneta_port *pp, dma_addr_t phys_addr; struct page *page; - page = __dev_alloc_page(gfp_mask); + page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) return -ENOMEM; - /* map page for use */ - phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - __free_page(page); - return -ENOMEM; - } + phys_addr = page_pool_get_dma_addr(page); phys_addr += pp->rx_offset_correction; mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); @@ -1892,10 +1890,11 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, if (!data || !(rx_desc->buf_phys_addr)) continue; - dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(data); + page_pool_put_page(rxq->page_pool, data, false); } + + if (rxq->page_pool) + page_pool_destroy(rxq->page_pool); } static inline @@ -2010,8 +2009,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_page(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + page_pool_unmap_page(rxq->page_pool, page); rxq->left_size -= frag_size; } } else { @@ -2041,8 +2039,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_page(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + page_pool_unmap_page(rxq->page_pool, page); rxq->left_size -= frag_size; } @@ -2828,11 +2825,37 @@ static int mvneta_poll(struct napi_struct *napi, int budget) return rx_done; } +static int mvneta_create_page_pool(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, int num) +{ + struct page_pool_params pp_params = { 0 }; + int err = 0; + + pp_params.order = 0; + /* internal DMA mapping in page_pool */ + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = num; + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pp->dev->dev.parent; + pp_params.dma_dir = DMA_FROM_DEVICE; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + } + + return err; +} + /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int num) { - int i; + int i = 0; + + if (mvneta_create_page_pool(pp, rxq, num)) + goto out; for (i = 0; i < num; i++) { memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); @@ -2848,6 +2871,7 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, /* Add this number of RX descriptors as non occupied (ready to * get packets) */ +out: mvneta_rxq_non_occup_desc_add(pp, rxq, i); return i;