All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support
@ 2019-03-01 17:47 Ioana Ciornei
  2019-03-01 17:47 ` [PATCH v2 1/2] dpaa2-eth: Add software annotation types Ioana Ciornei
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Ioana Ciornei @ 2019-03-01 17:47 UTC (permalink / raw)
  To: netdev, davem
  Cc: Ioana Ciocoi Radulescu, brouer, ilias.apalodimas, toke, Ioana Ciornei

The first patch adds different software annotation types for Tx frames
depending on frame type while the second one actually adds support for basic
XDP_REDIRECT.

Changes in v2:
  - add missing xdp_do_flush_map() call

Ioana Radulescu (2):
  dpaa2-eth: Add software annotation types
  dpaa2-eth: add XDP_REDIRECT support

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c   | 209 ++++++++++++++++++---
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h   |  38 +++-
 .../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c   |   1 +
 3 files changed, 214 insertions(+), 34 deletions(-)

-- 
1.9.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] dpaa2-eth: Add software annotation types
  2019-03-01 17:47 [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
@ 2019-03-01 17:47 ` Ioana Ciornei
  2019-03-01 17:47 ` [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
  2019-03-04  4:41 ` [PATCH v2 0/2] " David Miller
  2 siblings, 0 replies; 6+ messages in thread
From: Ioana Ciornei @ 2019-03-01 17:47 UTC (permalink / raw)
  To: netdev, davem
  Cc: Ioana Ciocoi Radulescu, brouer, ilias.apalodimas, toke, Ioana Ciornei

From: Ioana Radulescu <ruxandra.radulescu@nxp.com>

We write different metadata information in the software annotation
area of Tx frames, depending on frame type. Make this more explicit
by introducing a type field and separate structures for single buffer
and scatter-gather frames.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
---
Changes in v2:
  - none

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 38 +++++++++++++-----------
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 24 ++++++++++++---
 2 files changed, 40 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 87777b0..3acfd8c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -571,10 +571,11 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 	 * all of them on Tx Conf.
 	 */
 	swa = (struct dpaa2_eth_swa *)sgt_buf;
-	swa->skb = skb;
-	swa->scl = scl;
-	swa->num_sg = num_sg;
-	swa->sgt_size = sgt_buf_size;
+	swa->type = DPAA2_ETH_SWA_SG;
+	swa->sg.skb = skb;
+	swa->sg.scl = scl;
+	swa->sg.num_sg = num_sg;
+	swa->sg.sgt_size = sgt_buf_size;
 
 	/* Separately map the SGT buffer */
 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
@@ -609,7 +610,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
 {
 	struct device *dev = priv->net_dev->dev.parent;
 	u8 *buffer_start, *aligned_start;
-	struct sk_buff **skbh;
+	struct dpaa2_eth_swa *swa;
 	dma_addr_t addr;
 
 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
@@ -626,8 +627,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
 	 * (in the private data area) such that we can release it
 	 * on Tx confirm
 	 */
-	skbh = (struct sk_buff **)buffer_start;
-	*skbh = skb;
+	swa = (struct dpaa2_eth_swa *)buffer_start;
+	swa->type = DPAA2_ETH_SWA_SINGLE;
+	swa->single.skb = skb;
 
 	addr = dma_map_single(dev, buffer_start,
 			      skb_tail_pointer(skb) - buffer_start,
@@ -659,17 +661,17 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 {
 	struct device *dev = priv->net_dev->dev.parent;
 	dma_addr_t fd_addr;
-	struct sk_buff **skbh, *skb;
+	struct sk_buff *skb;
 	unsigned char *buffer_start;
 	struct dpaa2_eth_swa *swa;
 	u8 fd_format = dpaa2_fd_get_format(fd);
 
 	fd_addr = dpaa2_fd_get_addr(fd);
-	skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+	swa = (struct dpaa2_eth_swa *)buffer_start;
 
 	if (fd_format == dpaa2_fd_single) {
-		skb = *skbh;
-		buffer_start = (unsigned char *)skbh;
+		skb = swa->single.skb;
 		/* Accessing the skb buffer is safe before dma unmap, because
 		 * we didn't map the actual skb shell.
 		 */
@@ -677,15 +679,15 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 				 skb_tail_pointer(skb) - buffer_start,
 				 DMA_BIDIRECTIONAL);
 	} else if (fd_format == dpaa2_fd_sg) {
-		swa = (struct dpaa2_eth_swa *)skbh;
-		skb = swa->skb;
+		skb = swa->sg.skb;
 
 		/* Unmap the scatterlist */
-		dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
-		kfree(swa->scl);
+		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+			     DMA_BIDIRECTIONAL);
+		kfree(swa->sg.scl);
 
 		/* Unmap the SGT buffer */
-		dma_unmap_single(dev, fd_addr, swa->sgt_size,
+		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
 				 DMA_BIDIRECTIONAL);
 	} else {
 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
@@ -695,7 +697,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 	/* Get the timestamp value */
 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		struct skb_shared_hwtstamps shhwtstamps;
-		__le64 *ts = dpaa2_get_ts(skbh, true);
+		__le64 *ts = dpaa2_get_ts(buffer_start, true);
 		u64 ns;
 
 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -707,7 +709,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 
 	/* Free SGT buffer allocated on tx */
 	if (fd_format != dpaa2_fd_single)
-		skb_free_frag(skbh);
+		skb_free_frag(buffer_start);
 
 	/* Move on with skb release */
 	napi_consume_skb(skb, in_napi);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9510928..423976d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -89,12 +89,28 @@
  */
 #define DPAA2_ETH_SWA_SIZE		64
 
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+	DPAA2_ETH_SWA_SINGLE,
+	DPAA2_ETH_SWA_SG,
+};
+
 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
 struct dpaa2_eth_swa {
-	struct sk_buff *skb;
-	struct scatterlist *scl;
-	int num_sg;
-	int sgt_size;
+	enum dpaa2_eth_swa_type type;
+	union {
+		struct {
+			struct sk_buff *skb;
+		} single;
+		struct {
+			struct sk_buff *skb;
+			struct scatterlist *scl;
+			int num_sg;
+			int sgt_size;
+		} sg;
+	};
 };
 
 /* Annotation valid bits in FD FRC */
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support
  2019-03-01 17:47 [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
  2019-03-01 17:47 ` [PATCH v2 1/2] dpaa2-eth: Add software annotation types Ioana Ciornei
@ 2019-03-01 17:47 ` Ioana Ciornei
  2019-03-04 12:29   ` Jesper Dangaard Brouer
  2019-03-04  4:41 ` [PATCH v2 0/2] " David Miller
  2 siblings, 1 reply; 6+ messages in thread
From: Ioana Ciornei @ 2019-03-01 17:47 UTC (permalink / raw)
  To: netdev, davem
  Cc: Ioana Ciocoi Radulescu, brouer, ilias.apalodimas, toke, Ioana Ciornei

From: Ioana Radulescu <ruxandra.radulescu@nxp.com>

Implement support for the XDP_REDIRECT action.

The redirected frame is transmitted and confirmed on the regular Tx/Tx
conf queues. Frame is marked with the "XDP" type in the software
annotation, since it requires special treatment.

We don't have good hardware support for TX batching, so the
XDP_XMIT_FLUSH flag doesn't make a difference for now; ndo_xdp_xmit
performs the actual Tx operation on the spot.

Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
---
Changes in v2:
  - add missing xdp_do_flush_map() call

 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c   | 175 +++++++++++++++++++--
 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h   |  14 ++
 .../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c   |   1 +
 3 files changed, 176 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 3acfd8c..81085e7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -296,6 +296,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
 	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
 	xdp_set_data_meta_invalid(&xdp);
+	xdp.rxq = &ch->xdp_rxq;
 
 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -328,8 +329,20 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
 		xdp_release_buf(priv, ch, addr);
 		ch->stats.xdp_drop++;
 		break;
+	case XDP_REDIRECT:
+		dma_unmap_page(priv->net_dev->dev.parent, addr,
+			       DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+		ch->buf_count--;
+		xdp.data_hard_start = vaddr;
+		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+		if (unlikely(err))
+			ch->stats.xdp_drop++;
+		else
+			ch->stats.xdp_redirect++;
+		break;
 	}
 
+	ch->xdp.res |= xdp_act;
 out:
 	rcu_read_unlock();
 	return xdp_act;
@@ -657,27 +670,35 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
  * dpaa2_eth_tx().
  */
 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+		       struct dpaa2_eth_fq *fq,
 		       const struct dpaa2_fd *fd, bool in_napi)
 {
 	struct device *dev = priv->net_dev->dev.parent;
 	dma_addr_t fd_addr;
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
 	unsigned char *buffer_start;
 	struct dpaa2_eth_swa *swa;
 	u8 fd_format = dpaa2_fd_get_format(fd);
+	u32 fd_len = dpaa2_fd_get_len(fd);
 
 	fd_addr = dpaa2_fd_get_addr(fd);
 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
 	swa = (struct dpaa2_eth_swa *)buffer_start;
 
 	if (fd_format == dpaa2_fd_single) {
-		skb = swa->single.skb;
-		/* Accessing the skb buffer is safe before dma unmap, because
-		 * we didn't map the actual skb shell.
-		 */
-		dma_unmap_single(dev, fd_addr,
-				 skb_tail_pointer(skb) - buffer_start,
-				 DMA_BIDIRECTIONAL);
+		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+			skb = swa->single.skb;
+			/* Accessing the skb buffer is safe before dma unmap,
+			 * because we didn't map the actual skb shell.
+			 */
+			dma_unmap_single(dev, fd_addr,
+					 skb_tail_pointer(skb) - buffer_start,
+					 DMA_BIDIRECTIONAL);
+		} else {
+			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
+			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+					 DMA_BIDIRECTIONAL);
+		}
 	} else if (fd_format == dpaa2_fd_sg) {
 		skb = swa->sg.skb;
 
@@ -694,6 +715,16 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 		return;
 	}
 
+	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
+		fq->dq_frames++;
+		fq->dq_bytes += fd_len;
+	}
+
+	if (swa->type == DPAA2_ETH_SWA_XDP) {
+		xdp_return_frame(swa->xdp.xdpf);
+		return;
+	}
+
 	/* Get the timestamp value */
 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		struct skb_shared_hwtstamps shhwtstamps;
@@ -793,7 +824,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 	if (unlikely(err < 0)) {
 		percpu_stats->tx_errors++;
 		/* Clean up everything, including freeing the skb */
-		free_tx_fd(priv, &fd, false);
+		free_tx_fd(priv, fq, &fd, false);
 	} else {
 		fd_len = dpaa2_fd_get_len(&fd);
 		percpu_stats->tx_packets++;
@@ -830,12 +861,9 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 	percpu_extras->tx_conf_frames++;
 	percpu_extras->tx_conf_bytes += fd_len;
 
-	fq->dq_frames++;
-	fq->dq_bytes += fd_len;
-
 	/* Check frame errors in the FD field */
 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
-	free_tx_fd(priv, fd, true);
+	free_tx_fd(priv, fq, fd, true);
 
 	if (likely(!fd_errors))
 		return;
@@ -1083,6 +1111,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 	int err;
 
 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
+	ch->xdp.res = 0;
 	priv = ch->priv;
 
 	do {
@@ -1128,7 +1157,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 	work_done = max(rx_cleaned, 1);
 
 out:
-	if (txc_fq) {
+	if (txc_fq && txc_fq->dq_frames) {
 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
 					  txc_fq->dq_bytes);
@@ -1136,6 +1165,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 		txc_fq->dq_bytes = 0;
 	}
 
+	if (ch->xdp.res & XDP_REDIRECT)
+		xdp_do_flush_map();
+
 	return work_done;
 }
 
@@ -1730,6 +1762,105 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	return 0;
 }
 
+static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
+				    struct xdp_frame *xdpf)
+{
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+	struct rtnl_link_stats64 *percpu_stats;
+	struct dpaa2_eth_drv_stats *percpu_extras;
+	unsigned int needed_headroom;
+	struct dpaa2_eth_swa *swa;
+	struct dpaa2_eth_fq *fq;
+	struct dpaa2_fd fd;
+	void *buffer_start, *aligned_start;
+	dma_addr_t addr;
+	int err, i;
+
+	/* We require a minimum headroom to be able to transmit the frame.
+	 * Otherwise return an error and let the original net_device handle it
+	 */
+	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+	if (xdpf->headroom < needed_headroom)
+		return -EINVAL;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+	/* Setup the FD fields */
+	memset(&fd, 0, sizeof(fd));
+
+	/* Align FD address, if possible */
+	buffer_start = xdpf->data - needed_headroom;
+	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+				  DPAA2_ETH_TX_BUF_ALIGN);
+	if (aligned_start >= xdpf->data - xdpf->headroom)
+		buffer_start = aligned_start;
+
+	swa = (struct dpaa2_eth_swa *)buffer_start;
+	/* fill in necessary fields here */
+	swa->type = DPAA2_ETH_SWA_XDP;
+	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
+	swa->xdp.xdpf = xdpf;
+
+	addr = dma_map_single(dev, buffer_start,
+			      swa->xdp.dma_size,
+			      DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, addr))) {
+		percpu_stats->tx_dropped++;
+		return -ENOMEM;
+	}
+
+	dpaa2_fd_set_addr(&fd, addr);
+	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
+	dpaa2_fd_set_len(&fd, xdpf->len);
+	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
+	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
+
+	fq = &priv->fq[smp_processor_id()];
+	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+		err = priv->enqueue(priv, fq, &fd, 0);
+		if (err != -EBUSY)
+			break;
+	}
+	percpu_extras->tx_portal_busy += i;
+	if (unlikely(err < 0)) {
+		percpu_stats->tx_errors++;
+		/* let the Rx device handle the cleanup */
+		return err;
+	}
+
+	percpu_stats->tx_packets++;
+	percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+
+	return 0;
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+			      struct xdp_frame **frames, u32 flags)
+{
+	int drops = 0;
+	int i, err;
+
+	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	if (!netif_running(net_dev))
+		return -ENETDOWN;
+
+	for (i = 0; i < n; i++) {
+		struct xdp_frame *xdpf = frames[i];
+
+		err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
+		if (err) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+		}
+	}
+
+	return n - drops;
+}
+
 static const struct net_device_ops dpaa2_eth_ops = {
 	.ndo_open = dpaa2_eth_open,
 	.ndo_start_xmit = dpaa2_eth_tx,
@@ -1741,6 +1872,7 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	.ndo_do_ioctl = dpaa2_eth_ioctl,
 	.ndo_change_mtu = dpaa2_eth_change_mtu,
 	.ndo_bpf = dpaa2_eth_xdp,
+	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
 };
 
 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -2353,6 +2485,21 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
 		return err;
 	}
 
+	/* xdp_rxq setup */
+	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
+			       fq->flowid);
+	if (err) {
+		dev_err(dev, "xdp_rxq_info_reg failed\n");
+		return err;
+	}
+
+	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
+					 MEM_TYPE_PAGE_ORDER0, NULL);
+	if (err) {
+		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+		return err;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 423976d..7879622 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -95,6 +95,7 @@
 enum dpaa2_eth_swa_type {
 	DPAA2_ETH_SWA_SINGLE,
 	DPAA2_ETH_SWA_SG,
+	DPAA2_ETH_SWA_XDP,
 };
 
 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -110,6 +111,10 @@ struct dpaa2_eth_swa {
 			int num_sg;
 			int sgt_size;
 		} sg;
+		struct {
+			int dma_size;
+			struct xdp_frame *xdpf;
+		} xdp;
 	};
 };
 
@@ -273,6 +278,7 @@ struct dpaa2_eth_ch_stats {
 	__u64 xdp_drop;
 	__u64 xdp_tx;
 	__u64 xdp_tx_err;
+	__u64 xdp_redirect;
 };
 
 /* Maximum number of queues associated with a DPNI */
@@ -312,6 +318,7 @@ struct dpaa2_eth_ch_xdp {
 	struct bpf_prog *prog;
 	u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
 	int drop_cnt;
+	unsigned int res;
 };
 
 struct dpaa2_eth_channel {
@@ -326,6 +333,7 @@ struct dpaa2_eth_channel {
 	int buf_count;
 	struct dpaa2_eth_ch_stats stats;
 	struct dpaa2_eth_ch_xdp xdp;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct dpaa2_eth_dist_fields {
@@ -446,6 +454,12 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
 {
 	unsigned int headroom = DPAA2_ETH_SWA_SIZE;
 
+	/* If we don't have an skb (e.g. XDP buffer), we only need space for
+	 * the software annotation area
+	 */
+	if (!skb)
+		return headroom;
+
 	/* For non-linear skbs we have no headroom requirement, as we build a
 	 * SG frame with a newly allocated SGT buffer
 	 */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index a7389e7..591dfcf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -48,6 +48,7 @@
 	"[drv] xdp drop",
 	"[drv] xdp tx",
 	"[drv] xdp tx errors",
+	"[drv] xdp redirect",
 	/* FQ stats */
 	"[qbman] rx pending frames",
 	"[qbman] rx pending bytes",
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support
  2019-03-01 17:47 [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
  2019-03-01 17:47 ` [PATCH v2 1/2] dpaa2-eth: Add software annotation types Ioana Ciornei
  2019-03-01 17:47 ` [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
@ 2019-03-04  4:41 ` David Miller
  2 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2019-03-04  4:41 UTC (permalink / raw)
  To: ioana.ciornei; +Cc: netdev, ruxandra.radulescu, brouer, ilias.apalodimas, toke

From: Ioana Ciornei <ioana.ciornei@nxp.com>
Date: Fri, 1 Mar 2019 17:47:23 +0000

> The first patch adds different software annotation types for Tx frames
> depending on frame type while the second one actually adds support for basic
> XDP_REDIRECT.
> 
> Changes in v2:
>   - add missing xdp_do_flush_map() call

Series applied, thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support
  2019-03-01 17:47 ` [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
@ 2019-03-04 12:29   ` Jesper Dangaard Brouer
  2019-03-04 12:56     ` Ioana Ciocoi Radulescu
  0 siblings, 1 reply; 6+ messages in thread
From: Jesper Dangaard Brouer @ 2019-03-04 12:29 UTC (permalink / raw)
  To: Ioana Ciornei
  Cc: netdev, davem, Ioana Ciocoi Radulescu, ilias.apalodimas, toke, brouer

On Fri, 1 Mar 2019 17:47:24 +0000
Ioana Ciornei <ioana.ciornei@nxp.com> wrote:

> +static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
> +				    struct xdp_frame *xdpf)
> +{
> +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
> +	struct device *dev = net_dev->dev.parent;
> +	struct rtnl_link_stats64 *percpu_stats;
> +	struct dpaa2_eth_drv_stats *percpu_extras;
> +	unsigned int needed_headroom;
> +	struct dpaa2_eth_swa *swa;
> +	struct dpaa2_eth_fq *fq;
> +	struct dpaa2_fd fd;
> +	void *buffer_start, *aligned_start;
> +	dma_addr_t addr;
> +	int err, i;
> +
> +	/* We require a minimum headroom to be able to transmit the frame.
> +	 * Otherwise return an error and let the original net_device handle it
> +	 */
> +	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
> +	if (xdpf->headroom < needed_headroom)
> +		return -EINVAL;
> +
> +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
> +	percpu_extras = this_cpu_ptr(priv->percpu_extras);
> +
> +	/* Setup the FD fields */
> +	memset(&fd, 0, sizeof(fd));
> +
> +	/* Align FD address, if possible */
> +	buffer_start = xdpf->data - needed_headroom;
> +	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
> +				  DPAA2_ETH_TX_BUF_ALIGN);
> +	if (aligned_start >= xdpf->data - xdpf->headroom)
> +		buffer_start = aligned_start;
> +
> +	swa = (struct dpaa2_eth_swa *)buffer_start;
> +	/* fill in necessary fields here */
> +	swa->type = DPAA2_ETH_SWA_XDP;
> +	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
> +	swa->xdp.xdpf = xdpf;
> +
> +	addr = dma_map_single(dev, buffer_start,
> +			      swa->xdp.dma_size,
> +			      DMA_BIDIRECTIONAL);
> +	if (unlikely(dma_mapping_error(dev, addr))) {
> +		percpu_stats->tx_dropped++;
> +		return -ENOMEM;
> +	}
> +
> +	dpaa2_fd_set_addr(&fd, addr);
> +	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
> +	dpaa2_fd_set_len(&fd, xdpf->len);
> +	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
> +	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
> +
> +	fq = &priv->fq[smp_processor_id()];

It is guaranteed that you have one FQ per CPU in the system?

> +	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
> +		err = priv->enqueue(priv, fq, &fd, 0);
> +		if (err != -EBUSY)
> +			break;


-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support
  2019-03-04 12:29   ` Jesper Dangaard Brouer
@ 2019-03-04 12:56     ` Ioana Ciocoi Radulescu
  0 siblings, 0 replies; 6+ messages in thread
From: Ioana Ciocoi Radulescu @ 2019-03-04 12:56 UTC (permalink / raw)
  To: Jesper Dangaard Brouer, Ioana Ciornei
  Cc: netdev, davem, ilias.apalodimas, toke

> -----Original Message-----
> From: Jesper Dangaard Brouer <brouer@redhat.com>
> Sent: Monday, March 4, 2019 2:30 PM
> To: Ioana Ciornei <ioana.ciornei@nxp.com>
> Cc: netdev@vger.kernel.org; davem@davemloft.net; Ioana Ciocoi Radulescu
> <ruxandra.radulescu@nxp.com>; ilias.apalodimas@linaro.org;
> toke@redhat.com; brouer@redhat.com
> Subject: Re: [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support
> 
> On Fri, 1 Mar 2019 17:47:24 +0000
> Ioana Ciornei <ioana.ciornei@nxp.com> wrote:
> 
> > +static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
> > +				    struct xdp_frame *xdpf)
> > +{
> > +	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
> > +	struct device *dev = net_dev->dev.parent;
> > +	struct rtnl_link_stats64 *percpu_stats;
> > +	struct dpaa2_eth_drv_stats *percpu_extras;
> > +	unsigned int needed_headroom;
> > +	struct dpaa2_eth_swa *swa;
> > +	struct dpaa2_eth_fq *fq;
> > +	struct dpaa2_fd fd;
> > +	void *buffer_start, *aligned_start;
> > +	dma_addr_t addr;
> > +	int err, i;
> > +
> > +	/* We require a minimum headroom to be able to transmit the
> frame.
> > +	 * Otherwise return an error and let the original net_device handle it
> > +	 */
> > +	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
> > +	if (xdpf->headroom < needed_headroom)
> > +		return -EINVAL;
> > +
> > +	percpu_stats = this_cpu_ptr(priv->percpu_stats);
> > +	percpu_extras = this_cpu_ptr(priv->percpu_extras);
> > +
> > +	/* Setup the FD fields */
> > +	memset(&fd, 0, sizeof(fd));
> > +
> > +	/* Align FD address, if possible */
> > +	buffer_start = xdpf->data - needed_headroom;
> > +	aligned_start = PTR_ALIGN(buffer_start -
> DPAA2_ETH_TX_BUF_ALIGN,
> > +				  DPAA2_ETH_TX_BUF_ALIGN);
> > +	if (aligned_start >= xdpf->data - xdpf->headroom)
> > +		buffer_start = aligned_start;
> > +
> > +	swa = (struct dpaa2_eth_swa *)buffer_start;
> > +	/* fill in necessary fields here */
> > +	swa->type = DPAA2_ETH_SWA_XDP;
> > +	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
> > +	swa->xdp.xdpf = xdpf;
> > +
> > +	addr = dma_map_single(dev, buffer_start,
> > +			      swa->xdp.dma_size,
> > +			      DMA_BIDIRECTIONAL);
> > +	if (unlikely(dma_mapping_error(dev, addr))) {
> > +		percpu_stats->tx_dropped++;
> > +		return -ENOMEM;
> > +	}
> > +
> > +	dpaa2_fd_set_addr(&fd, addr);
> > +	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
> > +	dpaa2_fd_set_len(&fd, xdpf->len);
> > +	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
> > +	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
> > +
> > +	fq = &priv->fq[smp_processor_id()];
> 
> It is guaranteed that you have one FQ per CPU in the system?

Good catch.
We are guaranteed not to have more than one FQ per CPU, but
having fewer queues than CPUs on an interface is a valid (albeit
suboptimal) configuration.

We'll send a fix for this once net-next reopens.

Thanks,
Ioana

> 
> > +	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
> > +		err = priv->enqueue(priv, fq, &fd, 0);
> > +		if (err != -EBUSY)
> > +			break;
> 
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-03-04 12:59 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-01 17:47 [PATCH v2 0/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
2019-03-01 17:47 ` [PATCH v2 1/2] dpaa2-eth: Add software annotation types Ioana Ciornei
2019-03-01 17:47 ` [PATCH v2 2/2] dpaa2-eth: add XDP_REDIRECT support Ioana Ciornei
2019-03-04 12:29   ` Jesper Dangaard Brouer
2019-03-04 12:56     ` Ioana Ciocoi Radulescu
2019-03-04  4:41 ` [PATCH v2 0/2] " David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.