From: Jesper Dangaard Brouer <brouer@redhat.com>
To: netdev@vger.kernel.org, "David S. Miller" <davem@davemloft.net>,
Jesper Dangaard Brouer <brouer@redhat.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>,
ard.biesheuvel@linaro.org, "Jason Wang" <jasowang@redhat.com>,
ilias.apalodimas@linaro.org, BjörnTöpel <bjorn.topel@intel.com>,
w@1wt.eu, "Saeed Mahameed" <saeedm@mellanox.com>,
mykyta.iziumtsev@gmail.com,
"Daniel Borkmann" <borkmann@iogearbox.net>,
"Alexei Starovoitov" <alexei.starovoitov@gmail.com>,
"Tariq Toukan" <tariqt@mellanox.com>
Subject: [net-next PATCH RFC 2/8] net: mvneta: use page pool API for sw buffer manager
Date: Fri, 07 Dec 2018 00:25:37 +0100 [thread overview]
Message-ID: <154413873712.21735.9487067271289391052.stgit@firesoul> (raw)
In-Reply-To: <154413868810.21735.572808840657728172.stgit@firesoul>
From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Use the page_pool api for allocations and DMA handling instead of
__dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().
The page_pool API offers buffer recycling capabilities for XDP but
allocates one page per packet, unless the driver splits and manages
the allocated page.
Although XDP is not a part of the driver yet, the current implementation
is allocating one page per packet, thus there's no performance penalty from
using the API.
For now pages are unmapped via page_pool_unmap_page() before packets
travel into the network stack, as it doesn't have a return hook yet.
Given this call cleared the page_pool state, it is safe to let the
page be returned to the normal page allocator.
Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
drivers/net/ethernet/marvell/Kconfig | 1 +
drivers/net/ethernet/marvell/mvneta.c | 56 ++++++++++++++++++++++++---------
2 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 3238aa7f5dac..3325abe67465 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,6 +60,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
select MVMDIO
select PHYLINK
+ select PAGE_POOL
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..2354421fe96f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -33,6 +33,7 @@
#include <linux/skbuff.h>
#include <net/hwbm.h>
#include "mvneta_bm.h"
+#include <net/page_pool.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
@@ -624,6 +625,9 @@ struct mvneta_rx_queue {
struct sk_buff *skb;
int left_size;
+ /* page pool */
+ struct page_pool *page_pool;
+
/* error counters */
u32 skb_alloc_err;
u32 refill_err;
@@ -1813,17 +1817,11 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
dma_addr_t phys_addr;
struct page *page;
- page = __dev_alloc_page(gfp_mask);
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
return -ENOMEM;
- /* map page for use */
- phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- __free_page(page);
- return -ENOMEM;
- }
+ phys_addr = page_pool_get_dma_addr(page);
phys_addr += pp->rx_offset_correction;
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
@@ -1892,10 +1890,11 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(data);
+ page_pool_put_page(rxq->page_pool, data, false);
}
+
+ if (rxq->page_pool)
+ page_pool_destroy(rxq->page_pool);
}
static inline
@@ -2010,8 +2009,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ page_pool_unmap_page(rxq->page_pool, page);
rxq->left_size -= frag_size;
}
} else {
@@ -2041,8 +2039,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ page_pool_unmap_page(rxq->page_pool, page);
rxq->left_size -= frag_size;
}
@@ -2828,11 +2825,37 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, int num)
+{
+ struct page_pool_params pp_params = { 0 };
+ int err = 0;
+
+ pp_params.order = 0;
+ /* internal DMA mapping in page_pool */
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = num;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pp->dev->dev.parent;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ }
+
+ return err;
+}
+
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- int i;
+ int i = 0;
+
+ if (mvneta_create_page_pool(pp, rxq, num))
+ goto out;
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
@@ -2848,6 +2871,7 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
/* Add this number of RX descriptors as non occupied (ready to
* get packets)
*/
+out:
mvneta_rxq_non_occup_desc_add(pp, rxq, i);
return i;
next prev parent reply other threads:[~2018-12-06 23:25 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-12-06 23:25 [net-next PATCH RFC 0/8] page_pool DMA handling and allow to recycles frames via SKB Jesper Dangaard Brouer
2018-12-06 23:25 ` [net-next PATCH RFC 1/8] page_pool: add helper functions for DMA Jesper Dangaard Brouer
2018-12-08 7:06 ` David Miller
2018-12-08 7:55 ` Ilias Apalodimas
2018-12-06 23:25 ` Jesper Dangaard Brouer [this message]
2018-12-06 23:25 ` [net-next PATCH RFC 3/8] xdp: reduce size of struct xdp_mem_info Jesper Dangaard Brouer
2018-12-06 23:25 ` [net-next PATCH RFC 4/8] net: core: add recycle capabilities on skbs via page_pool API Jesper Dangaard Brouer
2018-12-08 7:15 ` David Miller
2018-12-08 7:54 ` Ilias Apalodimas
2018-12-08 9:57 ` [net-next, RFC, " Florian Westphal
2018-12-08 11:36 ` Jesper Dangaard Brouer
2018-12-08 20:10 ` David Miller
2018-12-08 12:29 ` Eric Dumazet
2018-12-08 12:34 ` Eric Dumazet
2018-12-08 13:45 ` Jesper Dangaard Brouer
2018-12-08 14:57 ` Ilias Apalodimas
2018-12-08 17:07 ` Andrew Lunn
2018-12-08 19:26 ` Eric Dumazet
2018-12-08 20:11 ` Jesper Dangaard Brouer
2018-12-08 20:14 ` Ilias Apalodimas
2018-12-08 21:06 ` Willy Tarreau
2018-12-10 7:54 ` Ilias Apalodimas
2018-12-08 22:36 ` Eric Dumazet
2018-12-08 20:21 ` David Miller
2018-12-08 20:29 ` Ilias Apalodimas
2018-12-10 9:51 ` Saeed Mahameed
2018-12-06 23:25 ` [net-next PATCH RFC 5/8] net: mvneta: remove copybreak, prefetch and use build_skb Jesper Dangaard Brouer
2018-12-06 23:25 ` [net-next PATCH RFC 6/8] mvneta: activate page recycling via skb using page_pool Jesper Dangaard Brouer
2018-12-06 23:26 ` [net-next PATCH RFC 7/8] xdp: bpf: cpumap redirect must update skb->mem_info Jesper Dangaard Brouer
2018-12-06 23:26 ` [net-next PATCH RFC 8/8] veth: xdp_frames redirected into veth need to transfer xdp_mem_info Jesper Dangaard Brouer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=154413873712.21735.9487067271289391052.stgit@firesoul \
--to=brouer@redhat.com \
--cc=alexei.starovoitov@gmail.com \
--cc=ard.biesheuvel@linaro.org \
--cc=bjorn.topel@intel.com \
--cc=borkmann@iogearbox.net \
--cc=davem@davemloft.net \
--cc=ilias.apalodimas@linaro.org \
--cc=jasowang@redhat.com \
--cc=mykyta.iziumtsev@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=saeedm@mellanox.com \
--cc=tariqt@mellanox.com \
--cc=toke@toke.dk \
--cc=w@1wt.eu \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).