All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] net: mvneta: add basic XDP_DROP support
@ 2018-12-21 13:22 Domagoj Pintaric
  2018-12-21 17:05 ` David Miller
  2018-12-22 10:52 ` Jesper Dangaard Brouer
  0 siblings, 2 replies; 5+ messages in thread
From: Domagoj Pintaric @ 2018-12-21 13:22 UTC (permalink / raw)
  To: netdev; +Cc: Domagoj Pintaric, Luka Perkov, Thomas Petazzoni, David S . Miller

Add initial mvneta XDP support for hardware buffer management enabled
devices only.

Signed-off-by: Domagoj Pintaric <domagoj.pintaric@sartura.hr>
CC: Luka Perkov <luka.perkov@sartura.hr>
CC: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
CC: David S. Miller <davem@davemloft.net>
---
v2:
- fixed unused value warning
- added additional hardware buffer management check

---
 drivers/net/ethernet/marvell/mvneta.c | 70 +++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 61b23497f836..00c890bdf31e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -31,6 +31,7 @@
 #include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/bpf.h>
 #include <net/hwbm.h>
 #include "mvneta_bm.h"
 #include <net/ip.h>
@@ -454,6 +455,8 @@ struct mvneta_port {
 	bool neta_armada3700;
 	u16 rx_offset_correction;
 	const struct mbus_dram_target_info *dram_target_info;
+
+	struct bpf_prog *xdp_prog;
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -626,6 +629,8 @@ struct mvneta_rx_queue {
 	/* error counters */
 	u32 skb_alloc_err;
 	u32 refill_err;
+
+	struct bpf_prog *xdp_prog;
 };
 
 static enum cpuhp_state online_hpstate;
@@ -2099,6 +2104,7 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 	int rx_done;
 	u32 rcvd_pkts = 0;
 	u32 rcvd_bytes = 0;
+	struct bpf_prog *xdp_prog;
 
 	/* Get number of received packets */
 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2140,6 +2146,29 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 			continue;
 		}
 
+		xdp_prog = READ_ONCE(rxq->xdp_prog);
+		if (xdp_prog) {
+			struct xdp_buff xdp;
+			enum xdp_action act;
+
+			xdp.data = data + MVNETA_MH_SIZE + NET_SKB_PAD;
+			xdp.data_end = xdp.data + rx_bytes;
+
+			act = bpf_prog_run_xdp(xdp_prog, &xdp);
+			switch (act) {
+			case XDP_PASS:
+				break;
+			default:
+				bpf_warn_invalid_xdp_action(act);
+				/* fall through */
+			case XDP_DROP:
+				/* Return the buffer to the pool */
+				mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+						      rx_desc->buf_phys_addr);
+				continue;
+			}
+		}
+
 		if (rx_bytes <= rx_copybreak) {
 			/* better copy a small frame and not unmap the DMA region */
 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
@@ -2935,6 +2964,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 
 	mvneta_rxq_hw_init(pp, rxq);
 
+	rxq->xdp_prog = pp->xdp_prog;
+
 	return 0;
 }
 
@@ -2961,6 +2992,7 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
 	rxq->refill_num        = 0;
 	rxq->skb               = NULL;
 	rxq->left_size         = 0;
+	rxq->xdp_prog          = NULL;
 }
 
 static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3878,6 +3910,43 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
 }
 
+static int mvneta_xdp_set(struct net_device *dev, struct bpf_prog *xdp_prog)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	struct bpf_prog *xdp_prog_old;
+	int queue;
+
+	if (!pp->bm_priv)
+		return -EINVAL;
+
+	xdp_prog_old = xchg(&pp->xdp_prog, xdp_prog);
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+		(void)xchg(&rxq->xdp_prog, pp->xdp_prog);
+	}
+
+	if (xdp_prog_old)
+		bpf_prog_put(xdp_prog_old);
+
+	return 0;
+}
+
+static int mvneta_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	switch (bpf->command) {
+	case XDP_SETUP_PROG:
+		return mvneta_xdp_set(dev, bpf->prog);
+	case XDP_QUERY_PROG:
+		bpf->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
 /* Ethtool methods */
 
 /* Set link ksettings (phy address, speed) for ethtools */
@@ -4275,6 +4344,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_fix_features    = mvneta_fix_features,
 	.ndo_get_stats64     = mvneta_get_stats64,
 	.ndo_do_ioctl        = mvneta_ioctl,
+	.ndo_bpf             = mvneta_bpf,
 };
 
 static const struct ethtool_ops mvneta_eth_tool_ops = {
-- 
2.19.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: mvneta: add basic XDP_DROP support
  2018-12-21 13:22 [PATCH v2] net: mvneta: add basic XDP_DROP support Domagoj Pintaric
@ 2018-12-21 17:05 ` David Miller
  2018-12-22 10:52 ` Jesper Dangaard Brouer
  1 sibling, 0 replies; 5+ messages in thread
From: David Miller @ 2018-12-21 17:05 UTC (permalink / raw)
  To: domagoj.pintaric; +Cc: netdev, luka.perkov, thomas.petazzoni

From: Domagoj Pintaric <domagoj.pintaric@sartura.hr>
Date: Fri, 21 Dec 2018 14:22:23 +0100

> Add initial mvneta XDP support for hardware buffer management enabled
> devices only.
> 
> Signed-off-by: Domagoj Pintaric <domagoj.pintaric@sartura.hr>
> CC: Luka Perkov <luka.perkov@sartura.hr>
> CC: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
> CC: David S. Miller <davem@davemloft.net>
> ---
> v2:
> - fixed unused value warning
> - added additional hardware buffer management check

Since the net-next tree is closed, this new feature will have to wait
until the net-next tree opens back up again.  So please resubmit this
at that time.

Meanwhile you can add support for the rest of the XDP features and do
more testing :-)

Thank you.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: mvneta: add basic XDP_DROP support
  2018-12-21 13:22 [PATCH v2] net: mvneta: add basic XDP_DROP support Domagoj Pintaric
  2018-12-21 17:05 ` David Miller
@ 2018-12-22 10:52 ` Jesper Dangaard Brouer
  2018-12-24  6:59   ` Ilias Apalodimas
  2018-12-24 10:16   ` Luka Perkov
  1 sibling, 2 replies; 5+ messages in thread
From: Jesper Dangaard Brouer @ 2018-12-22 10:52 UTC (permalink / raw)
  To: Domagoj Pintaric
  Cc: brouer, netdev, Luka Perkov, Thomas Petazzoni, David S . Miller,
	Ilias Apalodimas, Marcin Wojtas

On Fri, 21 Dec 2018 14:22:23 +0100
Domagoj Pintaric <domagoj.pintaric@sartura.hr> wrote:

> Add initial mvneta XDP support for hardware buffer management enabled
> devices only.

Hi Domagoj,

I would really appreciate if we could coordinate our work on the mvneta
driver.  Ilias (Cc'ed) and I are also working on adding XDP support for
this driver, although this is the software-buffer side of the driver we
have functioning now.

You can directly follow our progress here: [1][2]
 [1] https://github.com/xdp-project/xdp-project/blob/master/areas/arm64/board_espressobin04_bench_xdp.org
 [2] https://github.com/apalos/bpf-next/commits/mvneta_04_page_pool_recycle_xdp

You XDP-setup function is actually more correct that ours[3], as you
handle BPF per queue (which we were planning to fix before upstreaming).

That said, adding XDP_DROP is easy, but I want to see more of the XDP
features/actions added, as those require a lot more work.  I always
worry that a driver will stop at just XDP_DROP, so what are your plans
for adding the harder features? 

Even-thought XDP_DROP looks easy in this patch, then you are actually
doing some wrong, as XDP can also modify frames before doing XDP_PASS,
and (1) you have non-standard-head room (MVNETA_MH_SIZE + NET_SKB_PAD),
(2) and you don't handle if XDP changed the xdp.data header pointer,
and (3) you backing memory either comes from page-fragments or kmalloc
which also goes against the XDP memory requirements.

[3] https://github.com/apalos/bpf-next/commit/4b567e74552d3cdf55
-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: mvneta: add basic XDP_DROP support
  2018-12-22 10:52 ` Jesper Dangaard Brouer
@ 2018-12-24  6:59   ` Ilias Apalodimas
  2018-12-24 10:16   ` Luka Perkov
  1 sibling, 0 replies; 5+ messages in thread
From: Ilias Apalodimas @ 2018-12-24  6:59 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Domagoj Pintaric, netdev, Luka Perkov, Thomas Petazzoni,
	David S . Miller, Marcin Wojtas

Hi Domagoj,

> > Add initial mvneta XDP support for hardware buffer management enabled
> > devices only.
> 
> Hi Domagoj,
> 
> I would really appreciate if we could coordinate our work on the mvneta
> driver.  Ilias (Cc'ed) and I are also working on adding XDP support for
> this driver, although this is the software-buffer side of the driver we
> have functioning now.

Yes please, Jesper and i didn't have access to hardware with BM. Let's sync up 
and try to provide XDP for both.

> 
> You can directly follow our progress here: [1][2]
>  [1] https://github.com/xdp-project/xdp-project/blob/master/areas/arm64/board_espressobin04_bench_xdp.org
>  [2] https://github.com/apalos/bpf-next/commits/mvneta_04_page_pool_recycle_xdp
> 
> You XDP-setup function is actually more correct that ours[3], as you
> handle BPF per queue (which we were planning to fix before upstreaming).
> 
> That said, adding XDP_DROP is easy, but I want to see more of the XDP
> features/actions added, as those require a lot more work.  I always
> worry that a driver will stop at just XDP_DROP, so what are your plans
> for adding the harder features? 
> 
> Even-thought XDP_DROP looks easy in this patch, then you are actually
> doing some wrong, as XDP can also modify frames before doing XDP_PASS,
> and (1) you have non-standard-head room (MVNETA_MH_SIZE + NET_SKB_PAD),
> (2) and you don't handle if XDP changed the xdp.data header pointer,
> and (3) you backing memory either comes from page-fragments or kmalloc
> which also goes against the XDP memory requirements.
> [3] https://github.com/apalos/bpf-next/commit/4b567e74552d3cdf55

Our patches use the page_pool API for this (the mvneta SWBM part of the driver 
was already allocating pages for backing up descriptors)

Thanks!
/Ilias

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: mvneta: add basic XDP_DROP support
  2018-12-22 10:52 ` Jesper Dangaard Brouer
  2018-12-24  6:59   ` Ilias Apalodimas
@ 2018-12-24 10:16   ` Luka Perkov
  1 sibling, 0 replies; 5+ messages in thread
From: Luka Perkov @ 2018-12-24 10:16 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Domagoj Pintaric, netdev, Thomas Petazzoni, David S . Miller,
	Ilias Apalodimas, Marcin Wojtas, damir.samardzic

Hi Jesper,

On Sat, Dec 22, 2018 at 11:52:54AM +0100, Jesper Dangaard Brouer wrote:
> On Fri, 21 Dec 2018 14:22:23 +0100
> Domagoj Pintaric <domagoj.pintaric@sartura.hr> wrote:
> 
> > Add initial mvneta XDP support for hardware buffer management enabled
> > devices only.
> 
> Hi Domagoj,
> 
> I would really appreciate if we could coordinate our work on the mvneta
> driver.  Ilias (Cc'ed) and I are also working on adding XDP support for
> this driver, although this is the software-buffer side of the driver we
> have functioning now.
> 
> You can directly follow our progress here: [1][2]
>  [1] https://github.com/xdp-project/xdp-project/blob/master/areas/arm64/board_espressobin04_bench_xdp.org
>  [2] https://github.com/apalos/bpf-next/commits/mvneta_04_page_pool_recycle_xdp
> 
> You XDP-setup function is actually more correct that ours[3], as you
> handle BPF per queue (which we were planning to fix before upstreaming).
> 
> That said, adding XDP_DROP is easy, but I want to see more of the XDP
> features/actions added, as those require a lot more work.  I always
> worry that a driver will stop at just XDP_DROP, so what are your plans
> for adding the harder features? 

Thank you for point out the you already made so much progress as we were
not aware of it. Generally speaking, in Sartura our goal was to leverage
all the development done in eBPF and XDP mainly for datacenter use-cases
and use the technology in embedded systems where our primary focus is.
That said, in OpenWrt summit two months ago Damir gave a talk about
these technologies [1].

You are correct that we have sent a patch only for the easy part. But
that is only what we have implemented so far :) As this was low-priority
project on our side we didn't have concrete timelines for other
features. Looking at what you have done and what we have I'd be happy to
see if we can somehow divide the efforts in most optimal way.

Except the mvneta driver (and we have a lot of boards utilizing that
driver) we would also like to collaborate in mainline XDP support for
mvpp2 which I have seen you have in plans as well.


Thanks,
Luka


[1] https://openwrtsummit.files.wordpress.com/2018/11/20181029-state-of-fast-path-networking-in-linux.pdf
 
> Even-thought XDP_DROP looks easy in this patch, then you are actually
> doing some wrong, as XDP can also modify frames before doing XDP_PASS,
> and (1) you have non-standard-head room (MVNETA_MH_SIZE + NET_SKB_PAD),
> (2) and you don't handle if XDP changed the xdp.data header pointer,
> and (3) you backing memory either comes from page-fragments or kmalloc
> which also goes against the XDP memory requirements.
> 
> [3] https://github.com/apalos/bpf-next/commit/4b567e74552d3cdf55
> -- 
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn: http://www.linkedin.com/in/brouer

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-12-24 10:16 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-21 13:22 [PATCH v2] net: mvneta: add basic XDP_DROP support Domagoj Pintaric
2018-12-21 17:05 ` David Miller
2018-12-22 10:52 ` Jesper Dangaard Brouer
2018-12-24  6:59   ` Ilias Apalodimas
2018-12-24 10:16   ` Luka Perkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.