All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
@ 2018-11-26 16:27 Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
                   ` (9 more replies)
  0 siblings, 10 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
actions are supported for now. Frame header changes are also
allowed.

v2: - count the XDP packets in the rx/tx inteface stats
    - add message with the maximum supported MTU value for XDP

Ioana Radulescu (8):
  dpaa2-eth: Add basic XDP support
  dpaa2-eth: Allow XDP header adjustments
  dpaa2-eth: Move function
  dpaa2-eth: Release buffers back to pool on XDP_DROP
  dpaa2-eth: Map Rx buffers as bidirectional
  dpaa2-eth: Add support for XDP_TX
  dpaa2-eth: Cleanup channel stats
  dpaa2-eth: Add xdp counters

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c   | 349 +++++++++++++++++++--
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h   |  20 +-
 .../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c   |  19 +-
 3 files changed, 350 insertions(+), 38 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 1/8] dpaa2-eth: Add basic XDP support
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-27 15:45   ` Camelia Alexandra Groza
  2018-11-28 16:11   ` David Ahern
  2018-11-26 16:27 ` [PATCH v2 net-next 2/8] dpaa2-eth: Allow XDP header adjustments Ioana Ciocoi Radulescu
                   ` (8 subsequent siblings)
  9 siblings, 2 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

We keep one XDP program reference per channel. The only actions
supported for now are XDP_DROP and XDP_PASS.

Until now we didn't enforce a maximum size for Rx frames based
on MTU value. Change that, since for XDP mode we must ensure no
scatter-gather frames can be received.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: - xdp packets count towards the rx packets and bytes counters
    - add warning message with the maximum supported MTU value for XDP

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 189 ++++++++++++++++++++++-
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |   6 +
 2 files changed, 194 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 640967a..d3cfed4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -13,7 +13,8 @@
 #include <linux/iommu.h>
 #include <linux/net_tstamp.h>
 #include <linux/fsl/mc.h>
-
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 #include <net/sock.h>
 
 #include "dpaa2-eth.h"
@@ -199,6 +200,45 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 	return skb;
 }
 
+static u32 run_xdp(struct dpaa2_eth_priv *priv,
+		   struct dpaa2_eth_channel *ch,
+		   struct dpaa2_fd *fd, void *vaddr)
+{
+	struct bpf_prog *xdp_prog;
+	struct xdp_buff xdp;
+	u32 xdp_act = XDP_PASS;
+
+	rcu_read_lock();
+
+	xdp_prog = READ_ONCE(ch->xdp.prog);
+	if (!xdp_prog)
+		goto out;
+
+	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+	xdp.data_hard_start = xdp.data;
+	xdp_set_data_meta_invalid(&xdp);
+
+	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+	switch (xdp_act) {
+	case XDP_PASS:
+		break;
+	default:
+		bpf_warn_invalid_xdp_action(xdp_act);
+	case XDP_ABORTED:
+		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+	case XDP_DROP:
+		ch->buf_count--;
+		free_rx_fd(priv, fd, vaddr);
+		break;
+	}
+
+out:
+	rcu_read_unlock();
+	return xdp_act;
+}
+
 /* Main Rx frame processing routine */
 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 			 struct dpaa2_eth_channel *ch,
@@ -215,6 +255,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	struct dpaa2_fas *fas;
 	void *buf_data;
 	u32 status = 0;
+	u32 xdp_act;
 
 	/* Tracing point */
 	trace_dpaa2_rx_fd(priv->net_dev, fd);
@@ -231,8 +272,17 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
 	if (fd_format == dpaa2_fd_single) {
+		xdp_act = run_xdp(priv, ch, (struct dpaa2_fd *)fd, vaddr);
+		if (xdp_act != XDP_PASS) {
+			percpu_stats->rx_packets++;
+			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+			return;
+		}
+
 		skb = build_linear_skb(ch, fd, vaddr);
 	} else if (fd_format == dpaa2_fd_sg) {
+		WARN_ON(priv->xdp_prog);
+
 		skb = build_frag_skb(priv, ch, buf_data);
 		skb_free_frag(vaddr);
 		percpu_extras->rx_sg_frames++;
@@ -1427,6 +1477,141 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 	return -EINVAL;
 }
 
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+	int mfl, linear_mfl;
+
+	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+		     dpaa2_eth_rx_head_room(priv);
+
+	if (mfl > linear_mfl) {
+		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+			    linear_mfl - VLAN_ETH_HLEN);
+		return false;
+	}
+
+	return true;
+}
+
+static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+	int mfl, err;
+
+	/* We enforce a maximum Rx frame length based on MTU only if we have
+	 * an XDP program attached (in order to avoid Rx S/G frames).
+	 * Otherwise, we accept all incoming frames as long as they are not
+	 * larger than maximum size supported in hardware
+	 */
+	if (has_xdp)
+		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+	else
+		mfl = DPAA2_ETH_MFL;
+
+	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+	int err;
+
+	if (!priv->xdp_prog)
+		goto out;
+
+	if (!xdp_mtu_valid(priv, new_mtu))
+		return -EINVAL;
+
+	err = set_rx_mfl(priv, new_mtu, true);
+	if (err)
+		return err;
+
+out:
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+	struct dpaa2_eth_channel *ch;
+	struct bpf_prog *old;
+	bool up, need_update;
+	int i, err;
+
+	if (prog && !xdp_mtu_valid(priv, dev->mtu))
+		return -EINVAL;
+
+	if (prog) {
+		prog = bpf_prog_add(prog, priv->num_channels);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+	}
+
+	up = netif_running(dev);
+	need_update = (!!priv->xdp_prog != !!prog);
+
+	if (up)
+		dpaa2_eth_stop(dev);
+
+	/* While in xdp mode, enforce a maximum Rx frame size based on MTU */
+	if (need_update) {
+		err = set_rx_mfl(priv, dev->mtu, !!prog);
+		if (err)
+			goto out_err;
+	}
+
+	old = xchg(&priv->xdp_prog, prog);
+	if (old)
+		bpf_prog_put(old);
+
+	for (i = 0; i < priv->num_channels; i++) {
+		ch = priv->channel[i];
+		old = xchg(&ch->xdp.prog, prog);
+		if (old)
+			bpf_prog_put(old);
+	}
+
+	if (up) {
+		err = dpaa2_eth_open(dev);
+		if (err)
+			return err;
+	}
+
+	return 0;
+
+out_err:
+	if (prog)
+		bpf_prog_sub(prog, priv->num_channels);
+	if (up)
+		dpaa2_eth_open(dev);
+
+	return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return setup_xdp(dev, xdp->prog);
+	case XDP_QUERY_PROG:
+		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static const struct net_device_ops dpaa2_eth_ops = {
 	.ndo_open = dpaa2_eth_open,
 	.ndo_start_xmit = dpaa2_eth_tx,
@@ -1436,6 +1621,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
 	.ndo_set_features = dpaa2_eth_set_features,
 	.ndo_do_ioctl = dpaa2_eth_ioctl,
+	.ndo_change_mtu = dpaa2_eth_change_mtu,
+	.ndo_bpf = dpaa2_eth_xdp,
 };
 
 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 16545e9..2873a15 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -283,6 +283,10 @@ struct dpaa2_eth_fq {
 	struct dpaa2_eth_fq_stats stats;
 };
 
+struct dpaa2_eth_ch_xdp {
+	struct bpf_prog *prog;
+};
+
 struct dpaa2_eth_channel {
 	struct dpaa2_io_notification_ctx nctx;
 	struct fsl_mc_device *dpcon;
@@ -294,6 +298,7 @@ struct dpaa2_eth_channel {
 	struct dpaa2_eth_priv *priv;
 	int buf_count;
 	struct dpaa2_eth_ch_stats stats;
+	struct dpaa2_eth_ch_xdp xdp;
 };
 
 struct dpaa2_eth_dist_fields {
@@ -353,6 +358,7 @@ struct dpaa2_eth_priv {
 	u64 rx_hash_fields;
 	struct dpaa2_eth_cls_rule *cls_rules;
 	u8 rx_cls_enabled;
+	struct bpf_prog *xdp_prog;
 };
 
 #define DPAA2_RXH_SUPPORTED	(RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 2/8] dpaa2-eth: Allow XDP header adjustments
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 4/8] dpaa2-eth: Release buffers back to pool on XDP_DROP Ioana Ciocoi Radulescu
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Reserve XDP_PACKET_HEADROOM bytes in Rx buffers to allow XDP
programs to increase frame header size.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 43 ++++++++++++++++++++++--
 1 file changed, 40 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d3cfed4..008cdf8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -216,11 +216,15 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 
 	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
 	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
-	xdp.data_hard_start = xdp.data;
+	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
 	xdp_set_data_meta_invalid(&xdp);
 
 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
+	/* xdp.data pointer may have changed */
+	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
 	switch (xdp_act) {
 	case XDP_PASS:
 		break;
@@ -1483,7 +1487,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
 
 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
 	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
-		     dpaa2_eth_rx_head_room(priv);
+		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
 
 	if (mfl > linear_mfl) {
 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
@@ -1537,6 +1541,32 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
 	return 0;
 }
 
+static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+	struct dpni_buffer_layout buf_layout = {0};
+	int err;
+
+	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &buf_layout);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+		return err;
+	}
+
+	/* Reserve extra headroom for XDP header size changes */
+	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+				     DPNI_QUEUE_RX, &buf_layout);
+	if (err) {
+		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
 {
 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
@@ -1560,11 +1590,18 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
 	if (up)
 		dpaa2_eth_stop(dev);
 
-	/* While in xdp mode, enforce a maximum Rx frame size based on MTU */
+	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
+	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+	 * so we are sure no old format buffers will be used from now on.
+	 */
 	if (need_update) {
 		err = set_rx_mfl(priv, dev->mtu, !!prog);
 		if (err)
 			goto out_err;
+		err = update_rx_buffer_headroom(priv, !!prog);
+		if (err)
+			goto out_err;
 	}
 
 	old = xchg(&priv->xdp_prog, prog);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 3/8] dpaa2-eth: Move function
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (2 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 4/8] dpaa2-eth: Release buffers back to pool on XDP_DROP Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 5/8] dpaa2-eth: Map Rx buffers as bidirectional Ioana Ciocoi Radulescu
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

We'll use function free_bufs() on the XDP path as well, so move
it higher in order to avoid a forward declaration.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 34 ++++++++++++------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 008cdf8..174c960 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -200,6 +200,23 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 	return skb;
 }
 
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+	struct device *dev = priv->net_dev->dev.parent;
+	void *vaddr;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+		dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+		skb_free_frag(vaddr);
+	}
+}
+
 static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		   struct dpaa2_eth_channel *ch,
 		   struct dpaa2_fd *fd, void *vaddr)
@@ -797,23 +814,6 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
 	return 0;
 }
 
-/* Free buffers acquired from the buffer pool or which were meant to
- * be released in the pool
- */
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
-{
-	struct device *dev = priv->net_dev->dev.parent;
-	void *vaddr;
-	int i;
-
-	for (i = 0; i < count; i++) {
-		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
-		dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
-		skb_free_frag(vaddr);
-	}
-}
-
 /* Perform a single release command to add buffers
  * to the specified buffer pool
  */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 4/8] dpaa2-eth: Release buffers back to pool on XDP_DROP
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 2/8] dpaa2-eth: Allow XDP header adjustments Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 3/8] dpaa2-eth: Move function Ioana Ciocoi Radulescu
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Instead of freeing the RX buffers, release them back into the pool.
We wait for the maximum number of buffers supported by a single
release command to accumulate before issuing the command.

Also, don't unmap the Rx buffers at the beginning of the Rx routine
anymore, since that would require remapping them before release.
Instead, just do a DMA sync at first and only unmap if the frame is
meant for the stack.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 34 +++++++++++++++++++++---
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |  2 ++
 2 files changed, 33 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 174c960..ac4cb81 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -217,10 +217,34 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
 	}
 }
 
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+			    struct dpaa2_eth_channel *ch,
+			    dma_addr_t addr)
+{
+	int err;
+
+	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+		return;
+
+	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+					       ch->xdp.drop_bufs,
+					       ch->xdp.drop_cnt)) == -EBUSY)
+		cpu_relax();
+
+	if (err) {
+		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+		ch->buf_count -= ch->xdp.drop_cnt;
+	}
+
+	ch->xdp.drop_cnt = 0;
+}
+
 static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		   struct dpaa2_eth_channel *ch,
 		   struct dpaa2_fd *fd, void *vaddr)
 {
+	dma_addr_t addr = dpaa2_fd_get_addr(fd);
 	struct bpf_prog *xdp_prog;
 	struct xdp_buff xdp;
 	u32 xdp_act = XDP_PASS;
@@ -250,8 +274,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 	case XDP_ABORTED:
 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
 	case XDP_DROP:
-		ch->buf_count--;
-		free_rx_fd(priv, fd, vaddr);
+		xdp_release_buf(priv, ch, addr);
 		break;
 	}
 
@@ -282,7 +305,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	trace_dpaa2_rx_fd(priv->net_dev, fd);
 
 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+				DMA_FROM_DEVICE);
 
 	fas = dpaa2_get_fas(vaddr, false);
 	prefetch(fas);
@@ -300,10 +324,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 			return;
 		}
 
+		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
 		skb = build_linear_skb(ch, fd, vaddr);
 	} else if (fd_format == dpaa2_fd_sg) {
 		WARN_ON(priv->xdp_prog);
 
+		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+				 DMA_FROM_DEVICE);
 		skb = build_frag_skb(priv, ch, buf_data);
 		skb_free_frag(vaddr);
 		percpu_extras->rx_sg_frames++;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2873a15..23cf9d9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -285,6 +285,8 @@ struct dpaa2_eth_fq {
 
 struct dpaa2_eth_ch_xdp {
 	struct bpf_prog *prog;
+	u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+	int drop_cnt;
 };
 
 struct dpaa2_eth_channel {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 5/8] dpaa2-eth: Map Rx buffers as bidirectional
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (3 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 3/8] dpaa2-eth: Move function Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 6/8] dpaa2-eth: Add support for XDP_TX Ioana Ciocoi Radulescu
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

In order to support enqueueing Rx FDs back to hardware, we need to
DMA map Rx buffers as bidirectional.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index ac4cb81..c2e880b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -87,7 +87,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
 		addr = dpaa2_sg_get_addr(&sgt[i]);
 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
+				 DMA_BIDIRECTIONAL);
 
 		skb_free_frag(sg_vaddr);
 		if (dpaa2_sg_is_final(&sgt[i]))
@@ -145,7 +145,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 		sg_addr = dpaa2_sg_get_addr(sge);
 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
 		dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
+				 DMA_BIDIRECTIONAL);
 
 		sg_length = dpaa2_sg_get_len(sge);
 
@@ -212,7 +212,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
 	for (i = 0; i < count; i++) {
 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
 		dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
+				 DMA_BIDIRECTIONAL);
 		skb_free_frag(vaddr);
 	}
 }
@@ -306,7 +306,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 
 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-				DMA_FROM_DEVICE);
+				DMA_BIDIRECTIONAL);
 
 	fas = dpaa2_get_fas(vaddr, false);
 	prefetch(fas);
@@ -325,13 +325,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 		}
 
 		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
+				 DMA_BIDIRECTIONAL);
 		skb = build_linear_skb(ch, fd, vaddr);
 	} else if (fd_format == dpaa2_fd_sg) {
 		WARN_ON(priv->xdp_prog);
 
 		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-				 DMA_FROM_DEVICE);
+				 DMA_BIDIRECTIONAL);
 		skb = build_frag_skb(priv, ch, buf_data);
 		skb_free_frag(vaddr);
 		percpu_extras->rx_sg_frames++;
@@ -865,7 +865,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
 		buf = PTR_ALIGN(buf, priv->rx_buf_align);
 
 		addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-				      DMA_FROM_DEVICE);
+				      DMA_BIDIRECTIONAL);
 		if (unlikely(dma_mapping_error(dev, addr)))
 			goto err_map;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 6/8] dpaa2-eth: Add support for XDP_TX
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (4 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 5/8] dpaa2-eth: Map Rx buffers as bidirectional Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 7/8] dpaa2-eth: Cleanup channel stats Ioana Ciocoi Radulescu
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Send frames back on the same port for XDP_TX action.
Since the frame buffers have been allocated by us, we can recycle
them directly into the Rx buffer pool instead of requesting a
confirmation frame upon transmission complete.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: XDP_TX packets count towards the tx packets and bytes counters

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 51 +++++++++++++++++++++++-
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |  2 +
 2 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c2e880b..bc582c4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -240,14 +240,53 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
 	ch->xdp.drop_cnt = 0;
 }
 
+static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
+		       void *buf_start, u16 queue_id)
+{
+	struct dpaa2_eth_fq *fq;
+	struct dpaa2_faead *faead;
+	u32 ctrl, frc;
+	int i, err;
+
+	/* Mark the egress frame hardware annotation area as valid */
+	frc = dpaa2_fd_get_frc(fd);
+	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+	/* Instruct hardware to release the FD buffer directly into
+	 * the buffer pool once transmission is completed, instead of
+	 * sending a Tx confirmation frame to us
+	 */
+	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+	faead = dpaa2_get_faead(buf_start, false);
+	faead->ctrl = cpu_to_le32(ctrl);
+	faead->conf_fqid = 0;
+
+	fq = &priv->fq[queue_id];
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+						  priv->tx_qdid, 0,
+						  fq->tx_qdbin, fd);
+		if (err != -EBUSY)
+			break;
+	}
+
+	return err;
+}
+
 static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		   struct dpaa2_eth_channel *ch,
+		   struct dpaa2_eth_fq *rx_fq,
 		   struct dpaa2_fd *fd, void *vaddr)
 {
 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
+	struct rtnl_link_stats64 *percpu_stats;
 	struct bpf_prog *xdp_prog;
 	struct xdp_buff xdp;
 	u32 xdp_act = XDP_PASS;
+	int err;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 
 	rcu_read_lock();
 
@@ -269,6 +308,16 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 	switch (xdp_act) {
 	case XDP_PASS:
 		break;
+	case XDP_TX:
+		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
+		if (err) {
+			xdp_release_buf(priv, ch, addr);
+			percpu_stats->tx_errors++;
+		} else {
+			percpu_stats->tx_packets++;
+			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+		}
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(xdp_act);
 	case XDP_ABORTED:
@@ -317,7 +366,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
 	if (fd_format == dpaa2_fd_single) {
-		xdp_act = run_xdp(priv, ch, (struct dpaa2_fd *)fd, vaddr);
+		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
 		if (xdp_act != XDP_PASS) {
 			percpu_stats->rx_packets++;
 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 23cf9d9..5530a0e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -139,7 +139,9 @@ struct dpaa2_faead {
 };
 
 #define DPAA2_FAEAD_A2V			0x20000000
+#define DPAA2_FAEAD_A4V			0x08000000
 #define DPAA2_FAEAD_UPDV		0x00001000
+#define DPAA2_FAEAD_EBDDV		0x00002000
 #define DPAA2_FAEAD_UPD			0x00000010
 
 /* Accessors for the hardware annotation fields that we use */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 7/8] dpaa2-eth: Cleanup channel stats
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (5 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 6/8] dpaa2-eth: Add support for XDP_TX Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-26 16:27 ` [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters Ioana Ciocoi Radulescu
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Remove unused counter. Reorder fields in channel stats structure
to match the ethtool strings order and make it easier to print them
with ethtool -S.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c     |  1 -
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h     |  6 ++----
 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 16 +++++-----------
 3 files changed, 7 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index bc582c4..d2bc5da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -467,7 +467,6 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
 		return 0;
 
 	fq->stats.frames += cleaned;
-	ch->stats.frames += cleaned;
 
 	/* A dequeue operation only pulls frames from a single queue
 	 * into the store. Return the frame queue as an out param.
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 5530a0e..41a2a0d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -245,12 +245,10 @@ struct dpaa2_eth_fq_stats {
 struct dpaa2_eth_ch_stats {
 	/* Volatile dequeues retried due to portal busy */
 	__u64 dequeue_portal_busy;
-	/* Number of CDANs; useful to estimate avg NAPI len */
-	__u64 cdan;
-	/* Number of frames received on queues from this channel */
-	__u64 frames;
 	/* Pull errors */
 	__u64 pull_err;
+	/* Number of CDANs; useful to estimate avg NAPI len */
+	__u64 cdan;
 };
 
 /* Maximum number of queues associated with a DPNI */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 26bd5a2..79eeebe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -174,8 +174,6 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 	int j, k, err;
 	int num_cnt;
 	union dpni_statistics dpni_stats;
-	u64 cdan = 0;
-	u64 portal_busy = 0, pull_err = 0;
 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 	struct dpaa2_eth_drv_stats *extras;
 	struct dpaa2_eth_ch_stats *ch_stats;
@@ -212,16 +210,12 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 	}
 	i += j;
 
-	for (j = 0; j < priv->num_channels; j++) {
-		ch_stats = &priv->channel[j]->stats;
-		cdan += ch_stats->cdan;
-		portal_busy += ch_stats->dequeue_portal_busy;
-		pull_err += ch_stats->pull_err;
+	/* Per-channel stats */
+	for (k = 0; k < priv->num_channels; k++) {
+		ch_stats = &priv->channel[k]->stats;
+		for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+			*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
 	}
-
-	*(data + i++) = portal_busy;
-	*(data + i++) = pull_err;
-	*(data + i++) = cdan;
 }
 
 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (6 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 7/8] dpaa2-eth: Cleanup channel stats Ioana Ciocoi Radulescu
@ 2018-11-26 16:27 ` Ioana Ciocoi Radulescu
  2018-11-28 16:11   ` David Ahern
  2018-11-28  0:24 ` [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support David Miller
  2018-12-05 15:45 ` Jesper Dangaard Brouer
  9 siblings, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-26 16:27 UTC (permalink / raw)
  To: netdev, davem; +Cc: Ioana Ciornei, dsahern, Camelia Alexandra Groza

Add counters for xdp processed frames to the channel statistics.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
v2: no changes

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c     | 3 +++
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h     | 4 ++++
 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 3 +++
 3 files changed, 10 insertions(+)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d2bc5da..be84171 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -313,9 +313,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		if (err) {
 			xdp_release_buf(priv, ch, addr);
 			percpu_stats->tx_errors++;
+			ch->stats.xdp_tx_err++;
 		} else {
 			percpu_stats->tx_packets++;
 			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+			ch->stats.xdp_tx++;
 		}
 		break;
 	default:
@@ -324,6 +326,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
 	case XDP_DROP:
 		xdp_release_buf(priv, ch, addr);
+		ch->stats.xdp_drop++;
 		break;
 	}
 
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 41a2a0d..69c965d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -249,6 +249,10 @@ struct dpaa2_eth_ch_stats {
 	__u64 pull_err;
 	/* Number of CDANs; useful to estimate avg NAPI len */
 	__u64 cdan;
+	/* XDP counters */
+	__u64 xdp_drop;
+	__u64 xdp_tx;
+	__u64 xdp_tx_err;
 };
 
 /* Maximum number of queues associated with a DPNI */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 79eeebe..0c831bf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -45,6 +45,9 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
 	"[drv] dequeue portal busy",
 	"[drv] channel pull errors",
 	"[drv] cdan",
+	"[drv] xdp drop",
+	"[drv] xdp tx",
+	"[drv] xdp tx errors",
 };
 
 #define DPAA2_ETH_NUM_EXTRA_STATS	ARRAY_SIZE(dpaa2_ethtool_extras)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 1/8] dpaa2-eth: Add basic XDP support
  2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
@ 2018-11-27 15:45   ` Camelia Alexandra Groza
  2018-11-28 16:11   ` David Ahern
  1 sibling, 0 replies; 31+ messages in thread
From: Camelia Alexandra Groza @ 2018-11-27 15:45 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu, netdev, davem; +Cc: Ioana Ciornei, dsahern

> -----Original Message-----
> From: Ioana Ciocoi Radulescu
> Sent: Monday, November 26, 2018 18:27
> To: netdev@vger.kernel.org; davem@davemloft.net
> Cc: Ioana Ciornei <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia
> Alexandra Groza <camelia.groza@nxp.com>
> Subject: [PATCH v2 net-next 1/8] dpaa2-eth: Add basic XDP support
> 
> We keep one XDP program reference per channel. The only actions
> supported for now are XDP_DROP and XDP_PASS.
> 
> Until now we didn't enforce a maximum size for Rx frames based
> on MTU value. Change that, since for XDP mode we must ensure no
> scatter-gather frames can be received.
> 
> Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>

Acked-by: Camelia Groza <camelia.groza@nxp.com>

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (7 preceding siblings ...)
  2018-11-26 16:27 ` [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters Ioana Ciocoi Radulescu
@ 2018-11-28  0:24 ` David Miller
  2018-11-28  9:18   ` Ioana Ciocoi Radulescu
  2018-12-05 15:45 ` Jesper Dangaard Brouer
  9 siblings, 1 reply; 31+ messages in thread
From: David Miller @ 2018-11-28  0:24 UTC (permalink / raw)
  To: ruxandra.radulescu; +Cc: netdev, ioana.ciornei, dsahern, camelia.groza

From: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
Date: Mon, 26 Nov 2018 16:27:28 +0000

> Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
> actions are supported for now. Frame header changes are also
> allowed.
> 
> v2: - count the XDP packets in the rx/tx inteface stats
>     - add message with the maximum supported MTU value for XDP

This doesn't apply cleanly to net-next.

Could you please do a quick respin so I can apply this?

Thanks!

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-11-28  0:24 ` [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support David Miller
@ 2018-11-28  9:18   ` Ioana Ciocoi Radulescu
  2018-11-28 16:10     ` David Ahern
  2018-11-28 18:57     ` David Miller
  0 siblings, 2 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-11-28  9:18 UTC (permalink / raw)
  To: David Miller; +Cc: netdev, Ioana Ciornei, dsahern, Camelia Alexandra Groza

> -----Original Message-----
> From: David Miller <davem@davemloft.net>
> Sent: Wednesday, November 28, 2018 2:25 AM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: netdev@vger.kernel.org; Ioana Ciornei <ioana.ciornei@nxp.com>;
> dsahern@gmail.com; Camelia Alexandra Groza <camelia.groza@nxp.com>
> Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> 
> From: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Date: Mon, 26 Nov 2018 16:27:28 +0000
> 
> > Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
> > actions are supported for now. Frame header changes are also
> > allowed.
> >
> > v2: - count the XDP packets in the rx/tx inteface stats
> >     - add message with the maximum supported MTU value for XDP
> 
> This doesn't apply cleanly to net-next.
> 
> Could you please do a quick respin so I can apply this?

They apply cleanly for me. To doublecheck, I've downloaded the mbox
patches from patchwork and applied them on net-next.git, master branch
(commit 86d1d8b72c).
I'm obviously doing something wrong, but I don't know what.

Thanks,
Ioana

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-11-28  9:18   ` Ioana Ciocoi Radulescu
@ 2018-11-28 16:10     ` David Ahern
  2018-11-28 18:57     ` David Miller
  1 sibling, 0 replies; 31+ messages in thread
From: David Ahern @ 2018-11-28 16:10 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu, David Miller
  Cc: netdev, Ioana Ciornei, Camelia Alexandra Groza

On 11/28/18 2:18 AM, Ioana Ciocoi Radulescu wrote:
>> -----Original Message-----
>> From: David Miller <davem@davemloft.net>
>> Sent: Wednesday, November 28, 2018 2:25 AM
>> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
>> Cc: netdev@vger.kernel.org; Ioana Ciornei <ioana.ciornei@nxp.com>;
>> dsahern@gmail.com; Camelia Alexandra Groza <camelia.groza@nxp.com>
>> Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
>>
>> From: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
>> Date: Mon, 26 Nov 2018 16:27:28 +0000
>>
>>> Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
>>> actions are supported for now. Frame header changes are also
>>> allowed.
>>>
>>> v2: - count the XDP packets in the rx/tx inteface stats
>>>     - add message with the maximum supported MTU value for XDP
>>
>> This doesn't apply cleanly to net-next.
>>
>> Could you please do a quick respin so I can apply this?
> 
> They apply cleanly for me. To doublecheck, I've downloaded the mbox
> patches from patchwork and applied them on net-next.git, master branch
> (commit 86d1d8b72c).
> I'm obviously doing something wrong, but I don't know what.

same here. All patches applied cleanly to net-next.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters
  2018-11-26 16:27 ` [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters Ioana Ciocoi Radulescu
@ 2018-11-28 16:11   ` David Ahern
  0 siblings, 0 replies; 31+ messages in thread
From: David Ahern @ 2018-11-28 16:11 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu, netdev, davem
  Cc: Ioana Ciornei, Camelia Alexandra Groza

On 11/26/18 9:27 AM, Ioana Ciocoi Radulescu wrote:
> Add counters for xdp processed frames to the channel statistics.
> 
> Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
> ---
> v2: no changes
> 

Reviewed-by: David Ahern <dsahern@gmail.com>

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 1/8] dpaa2-eth: Add basic XDP support
  2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
  2018-11-27 15:45   ` Camelia Alexandra Groza
@ 2018-11-28 16:11   ` David Ahern
  1 sibling, 0 replies; 31+ messages in thread
From: David Ahern @ 2018-11-28 16:11 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu, netdev, davem
  Cc: Ioana Ciornei, Camelia Alexandra Groza

On 11/26/18 9:27 AM, Ioana Ciocoi Radulescu wrote:
> We keep one XDP program reference per channel. The only actions
> supported for now are XDP_DROP and XDP_PASS.
> 
> Until now we didn't enforce a maximum size for Rx frames based
> on MTU value. Change that, since for XDP mode we must ensure no
> scatter-gather frames can be received.
> 
> Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
> ---
> v2: - xdp packets count towards the rx packets and bytes counters
>     - add warning message with the maximum supported MTU value for XDP
> 
>  drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 189 ++++++++++++++++++++++-
>  drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |   6 +
>  2 files changed, 194 insertions(+), 1 deletion(-)
> 

Reviewed-by: David Ahern <dsahern@gmail.com>

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-11-28  9:18   ` Ioana Ciocoi Radulescu
  2018-11-28 16:10     ` David Ahern
@ 2018-11-28 18:57     ` David Miller
  1 sibling, 0 replies; 31+ messages in thread
From: David Miller @ 2018-11-28 18:57 UTC (permalink / raw)
  To: ruxandra.radulescu; +Cc: netdev, ioana.ciornei, dsahern, camelia.groza

From: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
Date: Wed, 28 Nov 2018 09:18:28 +0000

> They apply cleanly for me.

I figured out what happend.

The patches were mis-ordered (specifically patches #3 and #4) when I added
them to the patchwork bundle, and that is what causes them to fail.

Series applied, thanks!

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
                   ` (8 preceding siblings ...)
  2018-11-28  0:24 ` [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support David Miller
@ 2018-12-05 15:45 ` Jesper Dangaard Brouer
  2018-12-07 16:54   ` Ioana Ciocoi Radulescu
  9 siblings, 1 reply; 31+ messages in thread
From: Jesper Dangaard Brouer @ 2018-12-05 15:45 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza, Ilias Apalodimas

On Mon, 26 Nov 2018 16:27:28 +0000
Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com> wrote:

> Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
> actions are supported for now. Frame header changes are also
> allowed.

Do you have any XDP performance benchmarks on this hardware?

Also what boards (and arch's) are using this dpaa2-eth driver?
Any devel board I can buy?


p.s. Ilias and I are coding up page_pool and XDP support for Marvell
mvneta driver, which is avail on a number of avail boards, see here[1]

[1] https://github.com/xdp-project/xdp-project/blob/master/areas/arm64/arm01_selecting_hardware.org
-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-05 15:45 ` Jesper Dangaard Brouer
@ 2018-12-07 16:54   ` Ioana Ciocoi Radulescu
  2018-12-07 17:20     ` Ilias Apalodimas
  0 siblings, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-12-07 16:54 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: netdev, davem, Ioana Ciornei, dsahern, Camelia Alexandra Groza,
	Ilias Apalodimas

> -----Original Message-----
> From: Jesper Dangaard Brouer <brouer@redhat.com>
> Sent: Wednesday, December 5, 2018 5:45 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: brouer@redhat.com; netdev@vger.kernel.org; davem@davemloft.net;
> Ioana Ciornei <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia
> Alexandra Groza <camelia.groza@nxp.com>; Ilias Apalodimas
> <ilias.apalodimas@linaro.org>
> Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> 
> On Mon, 26 Nov 2018 16:27:28 +0000
> Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com> wrote:
> 
> > Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
> > actions are supported for now. Frame header changes are also
> > allowed.
> 
> Do you have any XDP performance benchmarks on this hardware?

We have some preliminary perf data that doesn't look great,
but we hope to improve it :)

On a LS2088A with A72 cores @2GHz (numbers in Mpps):
				1core		8cores
-------------------------------------------------------------------------
XDP_DROP (no touching data)	5.68		29.6 (linerate)
XDP_DROP (xdp1 sample)	3.46		25.18
XDP_TX(xdp2 sample)		1.71		13.26

For comparison, plain IP forwarding through the stack
is currently around 0.5Mpps (1c) / 3.8Mpps (8c).

>
> Also what boards (and arch's) are using this dpaa2-eth driver?

Currently supported LS2088A, LS1088A, soon LX2160A (all with
ARM64 cores).

> Any devel board I can buy?

I should have an answer for this early next week and will
get back to you.

Thanks,
Ioana

> 
> 
> p.s. Ilias and I are coding up page_pool and XDP support for Marvell
> mvneta driver, which is avail on a number of avail boards, see here[1]
> 
> [1]
> https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit
> hub.com%2Fxdp-project%2Fxdp-
> project%2Fblob%2Fmaster%2Fareas%2Farm64%2Farm01_selecting_hardwar
> e.org&amp;data=02%7C01%7Cruxandra.radulescu%40nxp.com%7C546868ba
> aa074902ded608d65ac8a594%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7
> C0%7C636796215148994553&amp;sdata=za6xUoIrv2jo%2BbvuKjXfpOXeQ3tw
> 96bZZzRB2Vny1iw%3D&amp;reserved=0
> --
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn:
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fww
> w.linkedin.com%2Fin%2Fbrouer&amp;data=02%7C01%7Cruxandra.radulescu
> %40nxp.com%7C546868baaa074902ded608d65ac8a594%7C686ea1d3bc2b4c6f
> a92cd99c5c301635%7C0%7C0%7C636796215148994553&amp;sdata=vTe2jd3V
> FXUpEVPLkbGN6i2OyyPfhQ9HacCaPZbm%2Bk8%3D&amp;reserved=0

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-07 16:54   ` Ioana Ciocoi Radulescu
@ 2018-12-07 17:20     ` Ilias Apalodimas
  2018-12-07 17:42       ` Ioana Ciocoi Radulescu
  0 siblings, 1 reply; 31+ messages in thread
From: Ilias Apalodimas @ 2018-12-07 17:20 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

Hi Ioana,
> > 
> > > Add support for XDP programs. Only XDP_PASS, XDP_DROP and XDP_TX
> > > actions are supported for now. Frame header changes are also
> > > allowed.

I only did a quick grep around the driver so i might be missing something, 
but i can only see allocations via napi_alloc_frag(). XDP requires pages 
(either a single page per packet or a driver that does the page management of
its own and fits 2 frames in a single page, assuming 4kb pages). 
Am i missing something on the driver? 

> > 
> > Do you have any XDP performance benchmarks on this hardware?
> 
> We have some preliminary perf data that doesn't look great,
> but we hope to improve it :)

As Jesper said we are doing similar work on a cortex a-53 and plan to work on
a-72 as well. We might be able to help out.

/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-07 17:20     ` Ilias Apalodimas
@ 2018-12-07 17:42       ` Ioana Ciocoi Radulescu
  2018-12-07 17:51         ` Ilias Apalodimas
  0 siblings, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-12-07 17:42 UTC (permalink / raw)
  To: Ilias Apalodimas
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza



> -----Original Message-----
> From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Sent: Friday, December 7, 2018 7:20 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: Jesper Dangaard Brouer <brouer@redhat.com>;
> netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> <camelia.groza@nxp.com>
> Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> 
> Hi Ioana,
> > >
> > > > Add support for XDP programs. Only XDP_PASS, XDP_DROP and
> XDP_TX
> > > > actions are supported for now. Frame header changes are also
> > > > allowed.
> 
> I only did a quick grep around the driver so i might be missing something,
> but i can only see allocations via napi_alloc_frag(). XDP requires pages
> (either a single page per packet or a driver that does the page management
> of
> its own and fits 2 frames in a single page, assuming 4kb pages).
> Am i missing something on the driver?

No, I guess I'm the one missing stuff, I didn't realise single page per packet
is a hard requirement for XDP. Could you point me to more info on this?

Thanks,
Ioana

> 
> > >
> > > Do you have any XDP performance benchmarks on this hardware?
> >
> > We have some preliminary perf data that doesn't look great,
> > but we hope to improve it :)
> 
> As Jesper said we are doing similar work on a cortex a-53 and plan to work on
> a-72 as well. We might be able to help out.
> 
> /Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-07 17:42       ` Ioana Ciocoi Radulescu
@ 2018-12-07 17:51         ` Ilias Apalodimas
  2018-12-07 18:07           ` Ioana Ciocoi Radulescu
  0 siblings, 1 reply; 31+ messages in thread
From: Ilias Apalodimas @ 2018-12-07 17:51 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

Hi Ioana,
> > > >
> > I only did a quick grep around the driver so i might be missing something,
> > but i can only see allocations via napi_alloc_frag(). XDP requires pages
> > (either a single page per packet or a driver that does the page management
> > of
> > its own and fits 2 frames in a single page, assuming 4kb pages).
> > Am i missing something on the driver?
> 
> No, I guess I'm the one missing stuff, I didn't realise single page per packet
> is a hard requirement for XDP. Could you point me to more info on this?
> 

Well if you don't have to use 64kb pages you can use the page_pool API (only
used from mlx5 atm) and get the xdp recycling for free. The memory 'waste' for
4kb pages isn't too much if the platforms the driver sits on have decent amounts
of memory  (and the number of descriptors used is not too high).
We still have work in progress with Jesper (just posted an RFC)with improvements
on the API.
Using it is fairly straightforward. This is a patchset on marvell's mvneta
driver with the API changes needed: 
https://www.spinics.net/lists/netdev/msg538285.html

If you need 64kb pages you would have to introduce page recycling and sharing 
like intel/mlx drivers on your driver.

/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-07 17:51         ` Ilias Apalodimas
@ 2018-12-07 18:07           ` Ioana Ciocoi Radulescu
  2018-12-13 17:43             ` Ioana Ciocoi Radulescu
  2018-12-21 15:31             ` Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support) Jesper Dangaard Brouer
  0 siblings, 2 replies; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-12-07 18:07 UTC (permalink / raw)
  To: Ilias Apalodimas
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

> -----Original Message-----
> From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Sent: Friday, December 7, 2018 7:52 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: Jesper Dangaard Brouer <brouer@redhat.com>;
> netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> <camelia.groza@nxp.com>
> Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> 
> Hi Ioana,
> > > > >
> > > I only did a quick grep around the driver so i might be missing something,
> > > but i can only see allocations via napi_alloc_frag(). XDP requires pages
> > > (either a single page per packet or a driver that does the page
> management
> > > of
> > > its own and fits 2 frames in a single page, assuming 4kb pages).
> > > Am i missing something on the driver?
> >
> > No, I guess I'm the one missing stuff, I didn't realise single page per packet
> > is a hard requirement for XDP. Could you point me to more info on this?
> >
> 
> Well if you don't have to use 64kb pages you can use the page_pool API (only
> used from mlx5 atm) and get the xdp recycling for free. The memory 'waste'
> for
> 4kb pages isn't too much if the platforms the driver sits on have decent
> amounts
> of memory  (and the number of descriptors used is not too high).
> We still have work in progress with Jesper (just posted an RFC)with
> improvements
> on the API.
> Using it is fairly straightforward. This is a patchset on marvell's mvneta
> driver with the API changes needed:
> https://www.spinics.net/lists/netdev/msg538285.html
> 
> If you need 64kb pages you would have to introduce page recycling and
> sharing
> like intel/mlx drivers on your driver.

Thanks a lot for the info, will look into this. Do you have any pointers
as to why the full page restriction exists in the first place? Sorry if it's
a dumb question, but I haven't found details on this and I'd really like
to understand it.

Thanks
Ioana

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-07 18:07           ` Ioana Ciocoi Radulescu
@ 2018-12-13 17:43             ` Ioana Ciocoi Radulescu
  2018-12-13 18:47               ` Ilias Apalodimas
  2018-12-21 15:31             ` Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support) Jesper Dangaard Brouer
  1 sibling, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2018-12-13 17:43 UTC (permalink / raw)
  To: Ilias Apalodimas
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

> -----Original Message-----
> From: Ioana Ciocoi Radulescu
> Sent: Friday, December 7, 2018 8:08 PM
> To: 'Ilias Apalodimas' <ilias.apalodimas@linaro.org>
> Cc: Jesper Dangaard Brouer <brouer@redhat.com>;
> netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> <camelia.groza@nxp.com>
> Subject: RE: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> 
> > -----Original Message-----
> > From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> > Sent: Friday, December 7, 2018 7:52 PM
> > To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> > Cc: Jesper Dangaard Brouer <brouer@redhat.com>;
> > netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> > <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> > <camelia.groza@nxp.com>
> > Subject: Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
> >
> > Hi Ioana,
> > > > > >
> > > > I only did a quick grep around the driver so i might be missing
> something,
> > > > but i can only see allocations via napi_alloc_frag(). XDP requires pages
> > > > (either a single page per packet or a driver that does the page
> > management
> > > > of
> > > > its own and fits 2 frames in a single page, assuming 4kb pages).
> > > > Am i missing something on the driver?
> > >
> > > No, I guess I'm the one missing stuff, I didn't realise single page per
> packet
> > > is a hard requirement for XDP. Could you point me to more info on this?
> > >
> >
> > Well if you don't have to use 64kb pages you can use the page_pool API
> (only
> > used from mlx5 atm) and get the xdp recycling for free. The memory
> 'waste'
> > for
> > 4kb pages isn't too much if the platforms the driver sits on have decent
> > amounts
> > of memory  (and the number of descriptors used is not too high).
> > We still have work in progress with Jesper (just posted an RFC)with
> > improvements
> > on the API.
> > Using it is fairly straightforward. This is a patchset on marvell's mvneta
> > driver with the API changes needed:
> > https://www.spinics.net/lists/netdev/msg538285.html
> >
> > If you need 64kb pages you would have to introduce page recycling and
> > sharing
> > like intel/mlx drivers on your driver.
> 
> Thanks a lot for the info, will look into this. Do you have any pointers
> as to why the full page restriction exists in the first place? Sorry if it's
> a dumb question, but I haven't found details on this and I'd really like
> to understand it.

After a quick glance, not sure we can use page_pool API.

The problem is our driver is not ring-based: we have a single
buffer pool used by all Rx queues, so using page_pool allocations
would imply adding a layer of synchronization in our driver.

I'm still trying to figure out how deep is the trouble we're in
for not using single page per packet in our driver, considering
we don't support XDP_REDIRECT yet. Guess I'll wait for Jasper's
answer for this.

Thanks,
Ioana

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support
  2018-12-13 17:43             ` Ioana Ciocoi Radulescu
@ 2018-12-13 18:47               ` Ilias Apalodimas
  0 siblings, 0 replies; 31+ messages in thread
From: Ilias Apalodimas @ 2018-12-13 18:47 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

Hi Ioanna
> > >
> > > Well if you don't have to use 64kb pages you can use the page_pool API
> > (only
> > > used from mlx5 atm) and get the xdp recycling for free. The memory
> > 'waste'
> > > for
> > > 4kb pages isn't too much if the platforms the driver sits on have decent
> > > amounts
> > > of memory  (and the number of descriptors used is not too high).
> > > We still have work in progress with Jesper (just posted an RFC)with
> > > improvements
> > > on the API.
> > > Using it is fairly straightforward. This is a patchset on marvell's mvneta
> > > driver with the API changes needed:
> > > https://www.spinics.net/lists/netdev/msg538285.html
> > >
> > > If you need 64kb pages you would have to introduce page recycling and
> > > sharing
> > > like intel/mlx drivers on your driver.
> > 
> > Thanks a lot for the info, will look into this. Do you have any pointers
> > as to why the full page restriction exists in the first place? Sorry if it's
> > a dumb question, but I haven't found details on this and I'd really like
> > to understand it.
> 
> After a quick glance, not sure we can use page_pool API.
> 
> The problem is our driver is not ring-based: we have a single
> buffer pool used by all Rx queues, so using page_pool allocations
> would imply adding a layer of synchronization in our driver.

We had similar concerns a while ago. Have a look at:
https://www.spinics.net/lists/netdev/msg481494.html
https://www.mail-archive.com/netdev@vger.kernel.org/msg236820.html

Jesper and i have briefly discussed on this and this type of hardware is
something we need to consider for page_pool API.

> 
> I'm still trying to figure out how deep is the trouble we're in
> for not using single page per packet in our driver, considering
> we don't support XDP_REDIRECT yet. Guess I'll wait for Jasper's
> answer for this.
I might be wrong, but i don't think anything apart from performance will go
'break', since no memory is sent to the userspace (no XDP_REDIRECT implemented).
Jesper will probably be able to think of any corner cases i might be ignoring.

Then again you write a driver, test it and you'll end up rewriting and
re-testing if you ever need the feature. 

/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2018-12-07 18:07           ` Ioana Ciocoi Radulescu
  2018-12-13 17:43             ` Ioana Ciocoi Radulescu
@ 2018-12-21 15:31             ` Jesper Dangaard Brouer
  2019-01-07 10:34               ` Ioana Ciocoi Radulescu
  2019-01-09 14:22               ` Madalin-cristian Bucur
  1 sibling, 2 replies; 31+ messages in thread
From: Jesper Dangaard Brouer @ 2018-12-21 15:31 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Ilias Apalodimas, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza, brouer

On Fri, 7 Dec 2018 18:07:49 +0000
Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com> wrote:

> Thanks a lot for the info, will look into this. Do you have any
> pointers as to why the full page restriction exists in the first
> place? Sorry if it's a dumb question, but I haven't found details on
> this and I'd really like to understand it.

Hi Ioana,

I promised (offlist) that I would get back to you explaining the XDP
page-requirement...

There are several reasons for XDP to require frames are backed by a
page.  It started out with a focus on gaining speed via simplicity.

The overall requirement is: XDP frame in physical contigious memory
 - which is a requirement from BPF Direct-Access, for validating correcness.
 - Implying you cannot split packet data over several pages.

An important part of the page-requirement is to allow creating SKB's
outside the driver code.  This happen today in both cpumap and veth
(when doing XDP_REDIRECT).  And we need to control and limit the
variations, to avoid having to handle all kind of SKB schemes.
Specifically we need enough tailroom for the skb-shared-info.

In the beginning we had the requirement of: 1-page per XDP frame.
 - Gave us a simplified memory model
 - Allow us to not touch atomic refcnt on page (always 1)
 - Fixed 256 bytes headroom
 - This gave us a lot of tailroom, expanding tail was trivial.

Eventually ixgbe+i40e force us to use a split-page model, allowing two
frames per page.
 - This started to complicate memory model
 - This unfortunately gave issue of unknown tailroom, which killed the
   tailroom expand option.
 - Changes XDP headroom to be variable (192 or 256 bytes)

E.g. I really want to allow bpf_xdp_adjust_tail() to *expand* the
frame size, but after allowing the split-page model, we couldn't allow
this easily.  And SKB alloc in cpumap/veth was also complicated by not
knowing (implicit) xdp_frame "hard-end".  (We might have to extend
xdp_buff with "data_hard_end").

-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2018-12-21 15:31             ` Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support) Jesper Dangaard Brouer
@ 2019-01-07 10:34               ` Ioana Ciocoi Radulescu
  2019-01-07 10:45                 ` Ilias Apalodimas
  2019-01-09 14:22               ` Madalin-cristian Bucur
  1 sibling, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2019-01-07 10:34 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Ilias Apalodimas, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

> -----Original Message-----
> From: Jesper Dangaard Brouer <brouer@redhat.com>
> Sent: Friday, December 21, 2018 5:31 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org>; netdev@vger.kernel.org;
> davem@davemloft.net; Ioana Ciornei <ioana.ciornei@nxp.com>;
> dsahern@gmail.com; Camelia Alexandra Groza <camelia.groza@nxp.com>;
> brouer@redhat.com
> Subject: Explaining the XDP page-requirement (Was: [PATCH v2 net-next
> 0/8] dpaa2-eth: Introduce XDP support)
> 
> On Fri, 7 Dec 2018 18:07:49 +0000
> Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com> wrote:
> 
> > Thanks a lot for the info, will look into this. Do you have any
> > pointers as to why the full page restriction exists in the first
> > place? Sorry if it's a dumb question, but I haven't found details on
> > this and I'd really like to understand it.
> 
> Hi Ioana,
> 
> I promised (offlist) that I would get back to you explaining the XDP
> page-requirement...
> 
> There are several reasons for XDP to require frames are backed by a
> page.  It started out with a focus on gaining speed via simplicity.
> 
> The overall requirement is: XDP frame in physical contigious memory
>  - which is a requirement from BPF Direct-Access, for validating correcness.
>  - Implying you cannot split packet data over several pages.
> 
> An important part of the page-requirement is to allow creating SKB's
> outside the driver code.  This happen today in both cpumap and veth
> (when doing XDP_REDIRECT).  And we need to control and limit the
> variations, to avoid having to handle all kind of SKB schemes.
> Specifically we need enough tailroom for the skb-shared-info.
> 
> In the beginning we had the requirement of: 1-page per XDP frame.
>  - Gave us a simplified memory model
>  - Allow us to not touch atomic refcnt on page (always 1)
>  - Fixed 256 bytes headroom
>  - This gave us a lot of tailroom, expanding tail was trivial.
> 
> Eventually ixgbe+i40e force us to use a split-page model, allowing two
> frames per page.
>  - This started to complicate memory model
>  - This unfortunately gave issue of unknown tailroom, which killed the
>    tailroom expand option.
>  - Changes XDP headroom to be variable (192 or 256 bytes)
> 
> E.g. I really want to allow bpf_xdp_adjust_tail() to *expand* the
> frame size, but after allowing the split-page model, we couldn't allow
> this easily.  And SKB alloc in cpumap/veth was also complicated by not
> knowing (implicit) xdp_frame "hard-end".  (We might have to extend
> xdp_buff with "data_hard_end").
> 

Thanks a lot, that's great info, especially for someone who hasn't followed
so closely xdp development from its beginning.

I'll look into updating the dpaa2-eth driver to use one page per frame and
see how that goes.

Thanks,
Ioana

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2019-01-07 10:34               ` Ioana Ciocoi Radulescu
@ 2019-01-07 10:45                 ` Ilias Apalodimas
  2019-01-07 10:49                   ` Ioana Ciocoi Radulescu
  0 siblings, 1 reply; 31+ messages in thread
From: Ilias Apalodimas @ 2019-01-07 10:45 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

Hi Ioana,
> > > Thanks a lot for the info, will look into this. Do you have any
> > > pointers as to why the full page restriction exists in the first
> > > place? Sorry if it's a dumb question, but I haven't found details on
> > > this and I'd really like to understand it.
> > 
> > Hi Ioana,
> > 
> > I promised (offlist) that I would get back to you explaining the XDP
> > page-requirement...
> > 
> > There are several reasons for XDP to require frames are backed by a
> > page.  It started out with a focus on gaining speed via simplicity.
> > 
> > The overall requirement is: XDP frame in physical contigious memory
> >  - which is a requirement from BPF Direct-Access, for validating correcness.
> >  - Implying you cannot split packet data over several pages.
> > 
> > An important part of the page-requirement is to allow creating SKB's
> > outside the driver code.  This happen today in both cpumap and veth
> > (when doing XDP_REDIRECT).  And we need to control and limit the
> > variations, to avoid having to handle all kind of SKB schemes.
> > Specifically we need enough tailroom for the skb-shared-info.
> > 
> > In the beginning we had the requirement of: 1-page per XDP frame.
> >  - Gave us a simplified memory model
> >  - Allow us to not touch atomic refcnt on page (always 1)
> >  - Fixed 256 bytes headroom
> >  - This gave us a lot of tailroom, expanding tail was trivial.
> > 
> > Eventually ixgbe+i40e force us to use a split-page model, allowing two
> > frames per page.
> >  - This started to complicate memory model
> >  - This unfortunately gave issue of unknown tailroom, which killed the
> >    tailroom expand option.
> >  - Changes XDP headroom to be variable (192 or 256 bytes)
> > 
> > E.g. I really want to allow bpf_xdp_adjust_tail() to *expand* the
> > frame size, but after allowing the split-page model, we couldn't allow
> > this easily.  And SKB alloc in cpumap/veth was also complicated by not
> > knowing (implicit) xdp_frame "hard-end".  (We might have to extend
> > xdp_buff with "data_hard_end").
> > 
> 
> Thanks a lot, that's great info, especially for someone who hasn't followed
> so closely xdp development from its beginning.
> 
> I'll look into updating the dpaa2-eth driver to use one page per frame and
> see how that goes.

If you have time, we can discuss merging whatever hardware features are not
supported in the page_pool API and use that to allocate pages?

Regards
/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2019-01-07 10:45                 ` Ilias Apalodimas
@ 2019-01-07 10:49                   ` Ioana Ciocoi Radulescu
  2019-01-07 10:53                     ` Ilias Apalodimas
  0 siblings, 1 reply; 31+ messages in thread
From: Ioana Ciocoi Radulescu @ 2019-01-07 10:49 UTC (permalink / raw)
  To: Ilias Apalodimas
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

> -----Original Message-----
> From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Sent: Monday, January 7, 2019 12:45 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: Jesper Dangaard Brouer <brouer@redhat.com>;
> netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> <camelia.groza@nxp.com>
> Subject: Re: Explaining the XDP page-requirement (Was: [PATCH v2 net-next
> 0/8] dpaa2-eth: Introduce XDP support)
> 
> Hi Ioana,
> > > > Thanks a lot for the info, will look into this. Do you have any
> > > > pointers as to why the full page restriction exists in the first
> > > > place? Sorry if it's a dumb question, but I haven't found details on
> > > > this and I'd really like to understand it.
> > >
> > > Hi Ioana,
> > >
> > > I promised (offlist) that I would get back to you explaining the XDP
> > > page-requirement...
> > >
> > > There are several reasons for XDP to require frames are backed by a
> > > page.  It started out with a focus on gaining speed via simplicity.
> > >
> > > The overall requirement is: XDP frame in physical contigious memory
> > >  - which is a requirement from BPF Direct-Access, for validating
> correcness.
> > >  - Implying you cannot split packet data over several pages.
> > >
> > > An important part of the page-requirement is to allow creating SKB's
> > > outside the driver code.  This happen today in both cpumap and veth
> > > (when doing XDP_REDIRECT).  And we need to control and limit the
> > > variations, to avoid having to handle all kind of SKB schemes.
> > > Specifically we need enough tailroom for the skb-shared-info.
> > >
> > > In the beginning we had the requirement of: 1-page per XDP frame.
> > >  - Gave us a simplified memory model
> > >  - Allow us to not touch atomic refcnt on page (always 1)
> > >  - Fixed 256 bytes headroom
> > >  - This gave us a lot of tailroom, expanding tail was trivial.
> > >
> > > Eventually ixgbe+i40e force us to use a split-page model, allowing two
> > > frames per page.
> > >  - This started to complicate memory model
> > >  - This unfortunately gave issue of unknown tailroom, which killed the
> > >    tailroom expand option.
> > >  - Changes XDP headroom to be variable (192 or 256 bytes)
> > >
> > > E.g. I really want to allow bpf_xdp_adjust_tail() to *expand* the
> > > frame size, but after allowing the split-page model, we couldn't allow
> > > this easily.  And SKB alloc in cpumap/veth was also complicated by not
> > > knowing (implicit) xdp_frame "hard-end".  (We might have to extend
> > > xdp_buff with "data_hard_end").
> > >
> >
> > Thanks a lot, that's great info, especially for someone who hasn't followed
> > so closely xdp development from its beginning.
> >
> > I'll look into updating the dpaa2-eth driver to use one page per frame and
> > see how that goes.
> 
> If you have time, we can discuss merging whatever hardware features are
> not
> supported in the page_pool API and use that to allocate pages?

Sure. I'd like to first transition to plain page allocations instead of
napi_alloc_frag() and validate that's ok (I've been meaning to try that for
a while now but haven't got around to it yet), and then we can explore
how that might be integrated in page_pool.

Thanks,
Ioana

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2019-01-07 10:49                   ` Ioana Ciocoi Radulescu
@ 2019-01-07 10:53                     ` Ilias Apalodimas
  0 siblings, 0 replies; 31+ messages in thread
From: Ilias Apalodimas @ 2019-01-07 10:53 UTC (permalink / raw)
  To: Ioana Ciocoi Radulescu
  Cc: Jesper Dangaard Brouer, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

Hi Ioana,
> > > Thanks a lot, that's great info, especially for someone who hasn't followed
> > > so closely xdp development from its beginning.
> > >
> > > I'll look into updating the dpaa2-eth driver to use one page per frame and
> > > see how that goes.
> > 
> > If you have time, we can discuss merging whatever hardware features are
> > not
> > supported in the page_pool API and use that to allocate pages?
> 
> Sure. I'd like to first transition to plain page allocations instead of
> napi_alloc_frag() and validate that's ok (I've been meaning to try that for
> a while now but haven't got around to it yet), and then we can explore
> how that might be integrated in page_pool.

Great, ping us if you need anything

Thanks
/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2018-12-21 15:31             ` Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support) Jesper Dangaard Brouer
  2019-01-07 10:34               ` Ioana Ciocoi Radulescu
@ 2019-01-09 14:22               ` Madalin-cristian Bucur
  2019-01-09 14:25                 ` Ilias Apalodimas
  1 sibling, 1 reply; 31+ messages in thread
From: Madalin-cristian Bucur @ 2019-01-09 14:22 UTC (permalink / raw)
  To: Jesper Dangaard Brouer, Ioana Ciocoi Radulescu
  Cc: Ilias Apalodimas, netdev, davem, Ioana Ciornei, dsahern,
	Camelia Alexandra Groza

> -----Original Message-----
> From: netdev-owner@vger.kernel.org <netdev-owner@vger.kernel.org> On
> Behalf Of Jesper Dangaard Brouer
> Sent: Friday, December 21, 2018 5:31 PM
> To: Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
> Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org>;
> netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciornei
> <ioana.ciornei@nxp.com>; dsahern@gmail.com; Camelia Alexandra Groza
> <camelia.groza@nxp.com>; brouer@redhat.com
> Subject: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8]
> dpaa2-eth: Introduce XDP support)
> 
> On Fri, 7 Dec 2018 18:07:49 +0000
> Ioana Ciocoi Radulescu <ruxandra.radulescu@nxp.com> wrote:
> 
> > Thanks a lot for the info, will look into this. Do you have any
> > pointers as to why the full page restriction exists in the first
> > place? Sorry if it's a dumb question, but I haven't found details on
> > this and I'd really like to understand it.
> 
> Hi Ioana,
> 
> I promised (offlist) that I would get back to you explaining the XDP
> page-requirement...
> 
> There are several reasons for XDP to require frames are backed by a
> page.  It started out with a focus on gaining speed via simplicity.
> 
> The overall requirement is: XDP frame in physical contigious memory
>  - which is a requirement from BPF Direct-Access, for validating
> correcness.
>  - Implying you cannot split packet data over several pages.
> 
> An important part of the page-requirement is to allow creating SKB's
> outside the driver code.  This happen today in both cpumap and veth
> (when doing XDP_REDIRECT).  And we need to control and limit the
> variations, to avoid having to handle all kind of SKB schemes.
> Specifically we need enough tailroom for the skb-shared-info.
> 
> In the beginning we had the requirement of: 1-page per XDP frame.
>  - Gave us a simplified memory model
>  - Allow us to not touch atomic refcnt on page (always 1)
>  - Fixed 256 bytes headroom
>  - This gave us a lot of tailroom, expanding tail was trivial.
> 
> Eventually ixgbe+i40e force us to use a split-page model, allowing two
> frames per page.
>  - This started to complicate memory model
>  - This unfortunately gave issue of unknown tailroom, which killed the
>    tailroom expand option.
>  - Changes XDP headroom to be variable (192 or 256 bytes)

Hi Jesper,

is the split page memory model supported now (with two frames per page)?

Thanks,
Madalin

> E.g. I really want to allow bpf_xdp_adjust_tail() to *expand* the
> frame size, but after allowing the split-page model, we couldn't allow
> this easily.  And SKB alloc in cpumap/veth was also complicated by not
> knowing (implicit) xdp_frame "hard-end".  (We might have to extend
> xdp_buff with "data_hard_end").
> 
> --
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn:
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.link
> edin.com%2Fin%2Fbrouer&amp;data=02%7C01%7Cmadalin.bucur%40nxp.com%7C44c593
> 0f8a224fdd063208d66759613b%7C686ea1d3bc2b4c6fa92cd99c5c301635%7C0%7C0%7C63
> 6810030928918215&amp;sdata=PIdwIEvOAPlyWPScMjOdWiauOp2wAI7QXu9FNJ0SHzs%3D&
> amp;reserved=0

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support)
  2019-01-09 14:22               ` Madalin-cristian Bucur
@ 2019-01-09 14:25                 ` Ilias Apalodimas
  0 siblings, 0 replies; 31+ messages in thread
From: Ilias Apalodimas @ 2019-01-09 14:25 UTC (permalink / raw)
  To: Madalin-cristian Bucur
  Cc: Jesper Dangaard Brouer, Ioana Ciocoi Radulescu, netdev, davem,
	Ioana Ciornei, dsahern, Camelia Alexandra Groza

Hi Madalin,

> > > Thanks a lot for the info, will look into this. Do you have any
> > > pointers as to why the full page restriction exists in the first
> > > place? Sorry if it's a dumb question, but I haven't found details on
> > > this and I'd really like to understand it.
> > 
> > Hi Ioana,
> > 
> > I promised (offlist) that I would get back to you explaining the XDP
> > page-requirement...
> > 
> > There are several reasons for XDP to require frames are backed by a
> > page.  It started out with a focus on gaining speed via simplicity.
> > 
> > The overall requirement is: XDP frame in physical contigious memory
> >  - which is a requirement from BPF Direct-Access, for validating
> > correcness.
> >  - Implying you cannot split packet data over several pages.
> > 
> > An important part of the page-requirement is to allow creating SKB's
> > outside the driver code.  This happen today in both cpumap and veth
> > (when doing XDP_REDIRECT).  And we need to control and limit the
> > variations, to avoid having to handle all kind of SKB schemes.
> > Specifically we need enough tailroom for the skb-shared-info.
> > 
> > In the beginning we had the requirement of: 1-page per XDP frame.
> >  - Gave us a simplified memory model
> >  - Allow us to not touch atomic refcnt on page (always 1)
> >  - Fixed 256 bytes headroom
> >  - This gave us a lot of tailroom, expanding tail was trivial.
> > 
> > Eventually ixgbe+i40e force us to use a split-page model, allowing two
> > frames per page.
> >  - This started to complicate memory model
> >  - This unfortunately gave issue of unknown tailroom, which killed the
> >    tailroom expand option.
> >  - Changes XDP headroom to be variable (192 or 256 bytes)
> 
> Hi Jesper,
> 
> is the split page memory model supported now (with two frames per page)?

Yes, both Intel on their ixgbe and i40e driver and mellanox on mlx5 support
this.

Cheers
/Ilias

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2019-01-09 14:25 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-26 16:27 [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 1/8] dpaa2-eth: Add basic " Ioana Ciocoi Radulescu
2018-11-27 15:45   ` Camelia Alexandra Groza
2018-11-28 16:11   ` David Ahern
2018-11-26 16:27 ` [PATCH v2 net-next 2/8] dpaa2-eth: Allow XDP header adjustments Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 4/8] dpaa2-eth: Release buffers back to pool on XDP_DROP Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 3/8] dpaa2-eth: Move function Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 5/8] dpaa2-eth: Map Rx buffers as bidirectional Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 6/8] dpaa2-eth: Add support for XDP_TX Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 7/8] dpaa2-eth: Cleanup channel stats Ioana Ciocoi Radulescu
2018-11-26 16:27 ` [PATCH v2 net-next 8/8] dpaa2-eth: Add xdp counters Ioana Ciocoi Radulescu
2018-11-28 16:11   ` David Ahern
2018-11-28  0:24 ` [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support David Miller
2018-11-28  9:18   ` Ioana Ciocoi Radulescu
2018-11-28 16:10     ` David Ahern
2018-11-28 18:57     ` David Miller
2018-12-05 15:45 ` Jesper Dangaard Brouer
2018-12-07 16:54   ` Ioana Ciocoi Radulescu
2018-12-07 17:20     ` Ilias Apalodimas
2018-12-07 17:42       ` Ioana Ciocoi Radulescu
2018-12-07 17:51         ` Ilias Apalodimas
2018-12-07 18:07           ` Ioana Ciocoi Radulescu
2018-12-13 17:43             ` Ioana Ciocoi Radulescu
2018-12-13 18:47               ` Ilias Apalodimas
2018-12-21 15:31             ` Explaining the XDP page-requirement (Was: [PATCH v2 net-next 0/8] dpaa2-eth: Introduce XDP support) Jesper Dangaard Brouer
2019-01-07 10:34               ` Ioana Ciocoi Radulescu
2019-01-07 10:45                 ` Ilias Apalodimas
2019-01-07 10:49                   ` Ioana Ciocoi Radulescu
2019-01-07 10:53                     ` Ilias Apalodimas
2019-01-09 14:22               ` Madalin-cristian Bucur
2019-01-09 14:25                 ` Ilias Apalodimas

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.