All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront
@ 2020-05-04  8:37 Denis Kirjanov
  2020-05-04  8:37 ` [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback Denis Kirjanov
  2020-05-05  8:33 ` [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Jesper Dangaard Brouer
  0 siblings, 2 replies; 9+ messages in thread
From: Denis Kirjanov @ 2020-05-04  8:37 UTC (permalink / raw)
  To: netdev; +Cc: jgross, wei.liu, paul, ilias.apalodimas

The patch adds a basic XDP processing to xen-netfront driver.

We ran an XDP program for an RX response received from netback
driver. Also we request xen-netback to adjust data offset for
bpf_xdp_adjust_head() header space for custom headers.

synchronization between frontend and backend parts is done
by using xenbus state switching:
Reconfiguring -> Reconfigured- > Connected

UDP packets drop rate using xdp program is around 310 kpps
using ./pktgen_sample04_many_flows.sh and 160 kpps without the patch.

v7:
- use page_pool_dev_alloc_pages() on page allocation
- remove the leftover break statement from netback_changed

v6:
- added the missing SOB line
- fixed subject

v5:
- split netfront/netback changes
- added a sync point between backend/frontend on switching to XDP
- added pagepool API

v4:
- added verbose patch descriprion
- don't expose the XDP headroom offset to the domU guest
- add a modparam to netback to toggle XDP offset
- don't process jumbo frames for now

v3:
- added XDP_TX support (tested with xdping echoserver)
- added XDP_REDIRECT support (tested with modified xdp_redirect_kern)
- moved xdp negotiation to xen-netback

v2:
- avoid data copying while passing to XDP
- tell xen-netback that we need the headroom space

Signed-off-by: Denis Kirjanov <denis.kirjanov@suse.com>
---
 drivers/net/xen-netfront.c | 300 ++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 296 insertions(+), 4 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 482c6c8..7be8ee6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -44,6 +44,9 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <net/ip.h>
+#include <linux/bpf.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -102,6 +105,8 @@ struct netfront_queue {
 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 	struct netfront_info *info;
 
+	struct bpf_prog __rcu *xdp_prog;
+
 	struct napi_struct napi;
 
 	/* Split event channels support, tx_* == rx_* when using
@@ -144,6 +149,9 @@ struct netfront_queue {
 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 	grant_ref_t gref_rx_head;
 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+	struct page_pool *page_pool;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct netfront_info {
@@ -159,6 +167,8 @@ struct netfront_info {
 	struct netfront_stats __percpu *rx_stats;
 	struct netfront_stats __percpu *tx_stats;
 
+	bool netback_has_xdp_headroom;
+
 	atomic_t rx_gso_checksum_fixup;
 };
 
@@ -167,6 +177,9 @@ struct netfront_rx_info {
 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 };
 
+static int xennet_xdp_xmit(struct net_device *dev, int n,
+			   struct xdp_frame **frames, u32 flags);
+
 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 {
 	list->link = id;
@@ -265,8 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 	if (unlikely(!skb))
 		return NULL;
 
-	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-	if (!page) {
+	page = page_pool_dev_alloc_pages(queue->page_pool);
+	if (unlikely(!page)) {
 		kfree_skb(skb);
 		return NULL;
 	}
@@ -778,6 +791,53 @@ static int xennet_get_extras(struct netfront_queue *queue,
 	return err;
 }
 
+u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
+		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
+		   struct xdp_buff *xdp)
+{
+	struct xdp_frame *xdpf;
+	u32 len = rx->status;
+	u32 act = XDP_PASS;
+	int err;
+
+	xdp->data_hard_start = page_address(pdata);
+	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+	xdp_set_data_meta_invalid(xdp);
+	xdp->data_end = xdp->data + len;
+	xdp->rxq = &queue->xdp_rxq;
+	xdp->handle = 0;
+
+	act = bpf_prog_run_xdp(prog, xdp);
+	switch (act) {
+	case XDP_TX:
+		get_page(pdata);
+		xdpf = convert_to_xdp_frame(xdp);
+		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
+		if (unlikely(err < 0))
+			trace_xdp_exception(queue->info->netdev, prog, act);
+		break;
+	case XDP_REDIRECT:
+		get_page(pdata);
+		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
+		if (unlikely(err))
+			trace_xdp_exception(queue->info->netdev, prog, act);
+		xdp_do_flush();
+		break;
+	case XDP_PASS:
+	case XDP_DROP:
+		break;
+
+	case XDP_ABORTED:
+		trace_xdp_exception(queue->info->netdev, prog, act);
+		break;
+
+	default:
+		bpf_warn_invalid_xdp_action(act);
+	}
+
+	return act;
+}
+
 static int xennet_get_responses(struct netfront_queue *queue,
 				struct netfront_rx_info *rinfo, RING_IDX rp,
 				struct sk_buff_head *list)
@@ -792,6 +852,9 @@ static int xennet_get_responses(struct netfront_queue *queue,
 	int slots = 1;
 	int err = 0;
 	unsigned long ret;
+	struct bpf_prog *xdp_prog;
+	struct xdp_buff xdp;
+	u32 verdict;
 
 	if (rx->flags & XEN_NETRXF_extra_info) {
 		err = xennet_get_extras(queue, extras, rp);
@@ -827,9 +890,20 @@ static int xennet_get_responses(struct netfront_queue *queue,
 
 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 
-		__skb_queue_tail(list, skb);
-
+		rcu_read_lock();
+		xdp_prog = rcu_dereference(queue->xdp_prog);
+		if (xdp_prog && !(rx->flags & XEN_NETRXF_more_data)) {
+			/* currently only a single page contains data */
+			WARN_ON_ONCE(skb_shinfo(skb)->nr_frags != 1);
+			verdict = xennet_run_xdp(queue,
+				       skb_frag_page(&skb_shinfo(skb)->frags[0]),
+				       rx, xdp_prog, &xdp);
+			if (verdict != XDP_PASS)
+				err = -EINVAL;
+		}
+		rcu_read_unlock();
 next:
+		__skb_queue_tail(list, skb);
 		if (!(rx->flags & XEN_NETRXF_more_data))
 			break;
 
@@ -997,6 +1071,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
 	struct sk_buff_head rxq;
 	struct sk_buff_head errq;
 	struct sk_buff_head tmpq;
+	struct bpf_prog *xdp_prog;
 	int err;
 
 	spin_lock(&queue->rx_lock);
@@ -1014,6 +1089,12 @@ static int xennet_poll(struct napi_struct *napi, int budget)
 		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
 		memset(extras, 0, sizeof(rinfo.extras));
 
+		rcu_read_lock();
+		xdp_prog = rcu_dereference(queue->xdp_prog);
+		if (xdp_prog)
+			rx->offset = XDP_PACKET_HEADROOM;
+		rcu_read_unlock();
+
 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
 
 		if (unlikely(err)) {
@@ -1261,6 +1342,156 @@ static void xennet_poll_controller(struct net_device *dev)
 }
 #endif
 
+#define NETBACK_XDP_HEADROOM_DISABLE	0
+#define NETBACK_XDP_HEADROOM_ENABLE	1
+
+static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
+{
+	int err;
+
+	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
+			     "feature-xdp", "%u", xdp);
+	if (err)
+		pr_debug("Error writing feature-xdp\n");
+
+	return err;
+}
+
+static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+			struct netlink_ext_ack *extack)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	struct bpf_prog *old_prog;
+	unsigned int i, err;
+	unsigned long int max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+
+	if (dev->mtu > max_mtu) {
+		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
+		return -EINVAL;
+	}
+
+	if (!np->netback_has_xdp_headroom)
+		return 0;
+
+	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
+	if (!old_prog && !prog)
+		return 0;
+
+	if (prog)
+		bpf_prog_add(prog, dev->real_num_tx_queues);
+
+	for (i = 0; i < dev->real_num_tx_queues; ++i)
+		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
+
+	if (old_prog)
+		for (i = 0; i < dev->real_num_tx_queues; ++i)
+			bpf_prog_put(old_prog);
+
+	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
+
+	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE:
+				  NETBACK_XDP_HEADROOM_DISABLE);
+	if (err)
+		return err;
+
+	/* avoid race with XDP headroom adjustment */
+	wait_event(module_wq,
+		   xenbus_read_driver_state(np->xbdev->otherend) ==
+		   XenbusStateReconfigured);
+	xenbus_switch_state(np->xbdev, XenbusStateConnected);
+
+	return 0;
+}
+
+static u32 xennet_xdp_query(struct net_device *dev)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	unsigned int num_queues = dev->real_num_tx_queues;
+	unsigned int i;
+	struct netfront_queue *queue;
+	const struct bpf_prog *xdp_prog;
+
+	for (i = 0; i < num_queues; ++i) {
+		queue = &np->queues[i];
+		xdp_prog = rtnl_dereference(queue->xdp_prog);
+		if (xdp_prog)
+			return xdp_prog->aux->id;
+	}
+
+	return 0;
+}
+
+static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
+	case XDP_QUERY_PROG:
+		xdp->prog_id = xennet_xdp_query(dev);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int xennet_xdp_xmit_one(struct net_device *dev, struct xdp_frame *xdpf)
+{
+	struct netfront_info *np = netdev_priv(dev);
+	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+	struct netfront_queue *queue = NULL;
+	unsigned int num_queues = dev->real_num_tx_queues;
+	unsigned long flags;
+	int notify;
+	struct xen_netif_tx_request *tx;
+
+	queue = &np->queues[smp_processor_id() % num_queues];
+
+	spin_lock_irqsave(&queue->tx_lock, flags);
+
+	tx = xennet_make_first_txreq(queue, NULL,
+				     virt_to_page(xdpf->data),
+				     offset_in_page(xdpf->data),
+				     xdpf->len);
+
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
+	if (notify)
+		notify_remote_via_irq(queue->tx_irq);
+
+	u64_stats_update_begin(&tx_stats->syncp);
+	tx_stats->bytes += xdpf->len;
+	tx_stats->packets++;
+	u64_stats_update_end(&tx_stats->syncp);
+
+	xennet_tx_buf_gc(queue);
+
+	spin_unlock_irqrestore(&queue->tx_lock, flags);
+	return 0;
+}
+
+static int xennet_xdp_xmit(struct net_device *dev, int n,
+			   struct xdp_frame **frames, u32 flags)
+{
+	int drops = 0;
+	int i, err;
+
+	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	for (i = 0; i < n; i++) {
+		struct xdp_frame *xdpf = frames[i];
+
+		if (!xdpf)
+			continue;
+		err = xennet_xdp_xmit_one(dev, xdpf);
+		if (err) {
+			xdp_return_frame_rx_napi(xdpf);
+			drops++;
+		}
+	}
+
+	return n - drops;
+}
+
 static const struct net_device_ops xennet_netdev_ops = {
 	.ndo_open            = xennet_open,
 	.ndo_stop            = xennet_close,
@@ -1272,6 +1503,8 @@ static void xennet_poll_controller(struct net_device *dev)
 	.ndo_fix_features    = xennet_fix_features,
 	.ndo_set_features    = xennet_set_features,
 	.ndo_select_queue    = xennet_select_queue,
+	.ndo_bpf            = xennet_xdp,
+	.ndo_xdp_xmit	    = xennet_xdp_xmit,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = xennet_poll_controller,
 #endif
@@ -1419,6 +1652,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
 		queue->rx_ring_ref = GRANT_INVALID_REF;
 		queue->tx.sring = NULL;
 		queue->rx.sring = NULL;
+
+		page_pool_destroy(queue->page_pool);
 	}
 }
 
@@ -1754,6 +1989,51 @@ static void xennet_destroy_queues(struct netfront_info *info)
 	info->queues = NULL;
 }
 
+
+
+static int xennet_create_page_pool(struct netfront_queue *queue)
+{
+	int err;
+	struct page_pool_params pp_params = {
+		.order = 0,
+		.flags = 0,
+		.pool_size = NET_RX_RING_SIZE,
+		.nid = NUMA_NO_NODE,
+		.dev = &queue->info->netdev->dev,
+		.offset = XDP_PACKET_HEADROOM,
+		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+	};
+
+	queue->page_pool = page_pool_create(&pp_params);
+	if (IS_ERR(queue->page_pool)) {
+		 err = PTR_ERR(queue->page_pool);
+		 queue->page_pool = NULL;
+		 return err;
+	}
+
+	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
+			       queue->id);
+	if (err) {
+		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
+		goto err_free_pp;
+	}
+
+	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
+					 MEM_TYPE_PAGE_ORDER0, NULL);
+	if (err) {
+		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
+		goto err_unregister_rxq;
+	}
+	return 0;
+
+err_unregister_rxq:
+	xdp_rxq_info_unreg(&queue->xdp_rxq);
+err_free_pp:
+	page_pool_destroy(queue->page_pool);
+	queue->page_pool = NULL;
+	return err;
+}
+
 static int xennet_create_queues(struct netfront_info *info,
 				unsigned int *num_queues)
 {
@@ -1779,6 +2059,14 @@ static int xennet_create_queues(struct netfront_info *info,
 			break;
 		}
 
+		/* use page pool recycling instead of buddy allocator */
+		ret = xennet_create_page_pool(queue);
+		if (ret < 0) {
+			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
+			*num_queues = i;
+			return ret;
+		}
+
 		netif_napi_add(queue->info->netdev, &queue->napi,
 			       xennet_poll, 64);
 		if (netif_running(info->netdev))
@@ -1825,6 +2113,8 @@ static int talk_to_netback(struct xenbus_device *dev,
 		goto out_unlocked;
 	}
 
+	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
+							      "feature-xdp-headroom", 0);
 	rtnl_lock();
 	if (info->queues)
 		xennet_destroy_queues(info);
@@ -1959,6 +2249,8 @@ static int xennet_connect(struct net_device *dev)
 	err = talk_to_netback(np->xbdev, np);
 	if (err)
 		return err;
+	if (np->netback_has_xdp_headroom)
+		pr_info("backend supports XDP headroom\n");
 
 	/* talk_to_netback() sets the correct number of queues */
 	num_queues = dev->real_num_tx_queues;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-04  8:37 [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Denis Kirjanov
@ 2020-05-04  8:37 ` Denis Kirjanov
  2020-05-05 14:19   ` Paul Durrant
  2020-05-05  8:33 ` [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Jesper Dangaard Brouer
  1 sibling, 1 reply; 9+ messages in thread
From: Denis Kirjanov @ 2020-05-04  8:37 UTC (permalink / raw)
  To: netdev; +Cc: jgross, wei.liu, paul, ilias.apalodimas

the patch basically adds the offset adjustment and netfront
state reading to make XDP work on netfront side.

Signed-off-by: Denis Kirjanov <denis.kirjanov@suse.com>
---
 drivers/net/xen-netback/common.h  |  2 ++
 drivers/net/xen-netback/netback.c |  7 +++++++
 drivers/net/xen-netback/rx.c      |  7 ++++++-
 drivers/net/xen-netback/xenbus.c  | 28 ++++++++++++++++++++++++++++
 4 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 05847eb..4a148d6 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -280,6 +280,7 @@ struct xenvif {
 	u8 ip_csum:1;
 	u8 ipv6_csum:1;
 	u8 multicast_control:1;
+	u8 xdp_enabled:1;
 
 	/* Is this interface disabled? True when backend discovers
 	 * frontend is rogue.
@@ -395,6 +396,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
+extern bool provides_xdp_headroom;
 
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_stall_timeout_msecs;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 315dfc6..6dfca72 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,13 @@
 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
 
+/* The module parameter tells that we have to put data
+ * for xen-netfront with the XDP_PACKET_HEADROOM offset
+ * needed for XDP processing
+ */
+bool provides_xdp_headroom = true;
+module_param(provides_xdp_headroom, bool, 0644);
+
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 			       u8 status);
 
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ef58870..1c0cf8a 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,6 +33,11 @@
 #include <xen/xen.h>
 #include <xen/events.h>
 
+static inline int xenvif_rx_xdp_offset(struct xenvif *vif)
+{
+	return vif->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
+}
+
 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
 {
 	RING_IDX prod, cons;
@@ -356,7 +361,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue,
 				struct xen_netif_rx_request *req,
 				struct xen_netif_rx_response *rsp)
 {
-	unsigned int offset = 0;
+	unsigned int offset = xenvif_rx_xdp_offset(queue->vif);
 	unsigned int flags;
 
 	do {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 286054b..7c0450e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -393,6 +393,20 @@ static void set_backend_state(struct backend_info *be,
 	}
 }
 
+static void read_xenbus_frontend_xdp(struct backend_info *be,
+				      struct xenbus_device *dev)
+{
+	struct xenvif *vif = be->vif;
+	unsigned int val;
+	int err;
+
+	err = xenbus_scanf(XBT_NIL, dev->otherend,
+			   "feature-xdp", "%u", &val);
+	if (err < 0)
+		return;
+	vif->xdp_enabled = val;
+}
+
 /**
  * Callback received when the frontend's state changes.
  */
@@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device *dev,
 		set_backend_state(be, XenbusStateConnected);
 		break;
 
+	case XenbusStateReconfiguring:
+		read_xenbus_frontend_xdp(be, dev);
+		xenbus_switch_state(dev, XenbusStateReconfigured);
+		break;
+
 	case XenbusStateClosing:
 		set_backend_state(be, XenbusStateClosing);
 		break;
@@ -1036,6 +1055,15 @@ static int netback_probe(struct xenbus_device *dev,
 			goto abort_transaction;
 		}
 
+		/* we can adjust a headroom for netfront XDP processing */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "feature-xdp-headroom", "%d",
+				    !!provides_xdp_headroom);
+		if (err) {
+			message = "writing feature-xdp-headroom";
+			goto abort_transaction;
+		}
+
 		/* We don't support rx-flip path (except old guests who
 		 * don't grok this feature flag).
 		 */
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront
  2020-05-04  8:37 [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Denis Kirjanov
  2020-05-04  8:37 ` [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback Denis Kirjanov
@ 2020-05-05  8:33 ` Jesper Dangaard Brouer
  1 sibling, 0 replies; 9+ messages in thread
From: Jesper Dangaard Brouer @ 2020-05-05  8:33 UTC (permalink / raw)
  To: Denis Kirjanov; +Cc: brouer, netdev, jgross, wei.liu, paul, ilias.apalodimas

On Mon,  4 May 2020 11:37:53 +0300
Denis Kirjanov <kda@linux-powerpc.org> wrote:

> +static int xennet_create_page_pool(struct netfront_queue *queue)
> +{
> +	int err;
> +	struct page_pool_params pp_params = {
> +		.order = 0,
> +		.flags = 0,
> +		.pool_size = NET_RX_RING_SIZE,
> +		.nid = NUMA_NO_NODE,
> +		.dev = &queue->info->netdev->dev,
> +		.offset = XDP_PACKET_HEADROOM,
> +		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
> +	};
> +
> +	queue->page_pool = page_pool_create(&pp_params);
> +	if (IS_ERR(queue->page_pool)) {
> +		 err = PTR_ERR(queue->page_pool);
> +		 queue->page_pool = NULL;
> +		 return err;
> +	}
> +
> +	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
> +			       queue->id);
> +	if (err) {
> +		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
> +		goto err_free_pp;
> +	}
> +
> +	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
> +					 MEM_TYPE_PAGE_ORDER0, NULL);

What!?! - You are creating a page_pool, but registering a MEM_TYPE_PAGE_ORDER0.

Have you even tested this?  The page_pool in-flight accounting will be
completely off.  This should show up when removing the page_pool again.

For driver developers do diagnose and catch stuff like this, we have
some bpftrace scripts that can help troubleshoot, here[1]:

[1] https://github.com/xdp-project/xdp-project/tree/master/areas/mem/bpftrace

-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-04  8:37 ` [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback Denis Kirjanov
@ 2020-05-05 14:19   ` Paul Durrant
  2020-05-05 15:57     ` Denis Kirjanov
  0 siblings, 1 reply; 9+ messages in thread
From: Paul Durrant @ 2020-05-05 14:19 UTC (permalink / raw)
  To: 'Denis Kirjanov', netdev; +Cc: jgross, wei.liu, ilias.apalodimas

> -----Original Message-----
> From: Denis Kirjanov <kda@linux-powerpc.org>
> Sent: 04 May 2020 09:38
> To: netdev@vger.kernel.org
> Cc: jgross@suse.com; wei.liu@kernel.org; paul@xen.org; ilias.apalodimas@linaro.org
> Subject: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
> 
> the patch basically adds the offset adjustment and netfront
> state reading to make XDP work on netfront side.
> 
> Signed-off-by: Denis Kirjanov <denis.kirjanov@suse.com>
> ---
>  drivers/net/xen-netback/common.h  |  2 ++
>  drivers/net/xen-netback/netback.c |  7 +++++++
>  drivers/net/xen-netback/rx.c      |  7 ++++++-
>  drivers/net/xen-netback/xenbus.c  | 28 ++++++++++++++++++++++++++++
>  4 files changed, 43 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
> index 05847eb..4a148d6 100644
> --- a/drivers/net/xen-netback/common.h
> +++ b/drivers/net/xen-netback/common.h
> @@ -280,6 +280,7 @@ struct xenvif {
>  	u8 ip_csum:1;
>  	u8 ipv6_csum:1;
>  	u8 multicast_control:1;
> +	u8 xdp_enabled:1;
> 
>  	/* Is this interface disabled? True when backend discovers
>  	 * frontend is rogue.
> @@ -395,6 +396,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
>  irqreturn_t xenvif_interrupt(int irq, void *dev_id);
> 
>  extern bool separate_tx_rx_irq;
> +extern bool provides_xdp_headroom;
> 
>  extern unsigned int rx_drain_timeout_msecs;
>  extern unsigned int rx_stall_timeout_msecs;
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
> index 315dfc6..6dfca72 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -96,6 +96,13 @@
>  module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
>  MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
> 
> +/* The module parameter tells that we have to put data
> + * for xen-netfront with the XDP_PACKET_HEADROOM offset
> + * needed for XDP processing
> + */
> +bool provides_xdp_headroom = true;
> +module_param(provides_xdp_headroom, bool, 0644);
> +
>  static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
>  			       u8 status);
> 
> diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
> index ef58870..1c0cf8a 100644
> --- a/drivers/net/xen-netback/rx.c
> +++ b/drivers/net/xen-netback/rx.c
> @@ -33,6 +33,11 @@
>  #include <xen/xen.h>
>  #include <xen/events.h>
> 
> +static inline int xenvif_rx_xdp_offset(struct xenvif *vif)
> +{
> +	return vif->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
> +}
> +
>  static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
>  {
>  	RING_IDX prod, cons;
> @@ -356,7 +361,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue,
>  				struct xen_netif_rx_request *req,
>  				struct xen_netif_rx_response *rsp)
>  {
> -	unsigned int offset = 0;
> +	unsigned int offset = xenvif_rx_xdp_offset(queue->vif);
>  	unsigned int flags;
> 
>  	do {
> diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
> index 286054b..7c0450e 100644
> --- a/drivers/net/xen-netback/xenbus.c
> +++ b/drivers/net/xen-netback/xenbus.c
> @@ -393,6 +393,20 @@ static void set_backend_state(struct backend_info *be,
>  	}
>  }
> 
> +static void read_xenbus_frontend_xdp(struct backend_info *be,
> +				      struct xenbus_device *dev)
> +{
> +	struct xenvif *vif = be->vif;
> +	unsigned int val;
> +	int err;
> +
> +	err = xenbus_scanf(XBT_NIL, dev->otherend,
> +			   "feature-xdp", "%u", &val);
> +	if (err < 0)

xenbus_scanf() returns the number of successfully parsed values so you ought to be checking for != 1 here.

> +		return;
> +	vif->xdp_enabled = val;
> +}
> +
>  /**
>   * Callback received when the frontend's state changes.
>   */
> @@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device *dev,
>  		set_backend_state(be, XenbusStateConnected);
>  		break;
> 
> +	case XenbusStateReconfiguring:
> +		read_xenbus_frontend_xdp(be, dev);

Is the frontend always expected to trigger a re-configure, or could feature-xdp already be enabled prior to connection?

> +		xenbus_switch_state(dev, XenbusStateReconfigured);
> +		break;
> +
>  	case XenbusStateClosing:
>  		set_backend_state(be, XenbusStateClosing);
>  		break;
> @@ -1036,6 +1055,15 @@ static int netback_probe(struct xenbus_device *dev,
>  			goto abort_transaction;
>  		}
> 
> +		/* we can adjust a headroom for netfront XDP processing */
> +		err = xenbus_printf(xbt, dev->nodename,
> +				    "feature-xdp-headroom", "%d",
> +				    !!provides_xdp_headroom);

provides_xdp_headroom is bool so the !! ought to be unnecessary.

  Paul

> +		if (err) {
> +			message = "writing feature-xdp-headroom";
> +			goto abort_transaction;
> +		}
> +
>  		/* We don't support rx-flip path (except old guests who
>  		 * don't grok this feature flag).
>  		 */
> --
> 1.8.3.1



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-05 14:19   ` Paul Durrant
@ 2020-05-05 15:57     ` Denis Kirjanov
  2020-05-05 16:28       ` Paul Durrant
  0 siblings, 1 reply; 9+ messages in thread
From: Denis Kirjanov @ 2020-05-05 15:57 UTC (permalink / raw)
  To: paul; +Cc: netdev, jgross, wei.liu, ilias.apalodimas

On 5/5/20, Paul Durrant <xadimgnik@gmail.com> wrote:
>> -----Original Message-----
>> From: Denis Kirjanov <kda@linux-powerpc.org>
>> Sent: 04 May 2020 09:38
>> To: netdev@vger.kernel.org
>> Cc: jgross@suse.com; wei.liu@kernel.org; paul@xen.org;
>> ilias.apalodimas@linaro.org
>> Subject: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment
>> to xen-netback
>>
>> the patch basically adds the offset adjustment and netfront
>> state reading to make XDP work on netfront side.
>>
>> Signed-off-by: Denis Kirjanov <denis.kirjanov@suse.com>
>> ---
>>  drivers/net/xen-netback/common.h  |  2 ++
>>  drivers/net/xen-netback/netback.c |  7 +++++++
>>  drivers/net/xen-netback/rx.c      |  7 ++++++-
>>  drivers/net/xen-netback/xenbus.c  | 28 ++++++++++++++++++++++++++++
>>  4 files changed, 43 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/net/xen-netback/common.h
>> b/drivers/net/xen-netback/common.h
>> index 05847eb..4a148d6 100644
>> --- a/drivers/net/xen-netback/common.h
>> +++ b/drivers/net/xen-netback/common.h
>> @@ -280,6 +280,7 @@ struct xenvif {
>>  	u8 ip_csum:1;
>>  	u8 ipv6_csum:1;
>>  	u8 multicast_control:1;
>> +	u8 xdp_enabled:1;
>>
>>  	/* Is this interface disabled? True when backend discovers
>>  	 * frontend is rogue.
>> @@ -395,6 +396,7 @@ static inline pending_ring_idx_t
>> nr_pending_reqs(struct xenvif_queue *queue)
>>  irqreturn_t xenvif_interrupt(int irq, void *dev_id);
>>
>>  extern bool separate_tx_rx_irq;
>> +extern bool provides_xdp_headroom;
>>
>>  extern unsigned int rx_drain_timeout_msecs;
>>  extern unsigned int rx_stall_timeout_msecs;
>> diff --git a/drivers/net/xen-netback/netback.c
>> b/drivers/net/xen-netback/netback.c
>> index 315dfc6..6dfca72 100644
>> --- a/drivers/net/xen-netback/netback.c
>> +++ b/drivers/net/xen-netback/netback.c
>> @@ -96,6 +96,13 @@
>>  module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
>>  MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
>>
>> +/* The module parameter tells that we have to put data
>> + * for xen-netfront with the XDP_PACKET_HEADROOM offset
>> + * needed for XDP processing
>> + */
>> +bool provides_xdp_headroom = true;
>> +module_param(provides_xdp_headroom, bool, 0644);
>> +
>>  static void xenvif_idx_release(struct xenvif_queue *queue, u16
>> pending_idx,
>>  			       u8 status);
>>
>> diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
>> index ef58870..1c0cf8a 100644
>> --- a/drivers/net/xen-netback/rx.c
>> +++ b/drivers/net/xen-netback/rx.c
>> @@ -33,6 +33,11 @@
>>  #include <xen/xen.h>
>>  #include <xen/events.h>
>>
>> +static inline int xenvif_rx_xdp_offset(struct xenvif *vif)
>> +{
>> +	return vif->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
>> +}
>> +
>>  static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
>>  {
>>  	RING_IDX prod, cons;
>> @@ -356,7 +361,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue
>> *queue,
>>  				struct xen_netif_rx_request *req,
>>  				struct xen_netif_rx_response *rsp)
>>  {
>> -	unsigned int offset = 0;
>> +	unsigned int offset = xenvif_rx_xdp_offset(queue->vif);
>>  	unsigned int flags;
>>
>>  	do {
>> diff --git a/drivers/net/xen-netback/xenbus.c
>> b/drivers/net/xen-netback/xenbus.c
>> index 286054b..7c0450e 100644
>> --- a/drivers/net/xen-netback/xenbus.c
>> +++ b/drivers/net/xen-netback/xenbus.c
>> @@ -393,6 +393,20 @@ static void set_backend_state(struct backend_info
>> *be,
>>  	}
>>  }
>>
>> +static void read_xenbus_frontend_xdp(struct backend_info *be,
>> +				      struct xenbus_device *dev)
>> +{
>> +	struct xenvif *vif = be->vif;
>> +	unsigned int val;
>> +	int err;
>> +
>> +	err = xenbus_scanf(XBT_NIL, dev->otherend,
>> +			   "feature-xdp", "%u", &val);
>> +	if (err < 0)
>
> xenbus_scanf() returns the number of successfully parsed values so you ought
> to be checking for != 1 here.

right, makes sense.

>
>> +		return;
>> +	vif->xdp_enabled = val;
>> +}
>> +
>>  /**
>>   * Callback received when the frontend's state changes.
>>   */
>> @@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device
>> *dev,
>>  		set_backend_state(be, XenbusStateConnected);
>>  		break;
>>
>> +	case XenbusStateReconfiguring:
>> +		read_xenbus_frontend_xdp(be, dev);
>
> Is the frontend always expected to trigger a re-configure, or could
> feature-xdp already be enabled prior to connection?

Yes, feature-xdp is set by the frontend when  xdp code is loaded.

>
>> +		xenbus_switch_state(dev, XenbusStateReconfigured);
>> +		break;
>> +
>>  	case XenbusStateClosing:
>>  		set_backend_state(be, XenbusStateClosing);
>>  		break;
>> @@ -1036,6 +1055,15 @@ static int netback_probe(struct xenbus_device
>> *dev,
>>  			goto abort_transaction;
>>  		}
>>
>> +		/* we can adjust a headroom for netfront XDP processing */
>> +		err = xenbus_printf(xbt, dev->nodename,
>> +				    "feature-xdp-headroom", "%d",
>> +				    !!provides_xdp_headroom);
>
> provides_xdp_headroom is bool so the !! ought to be unnecessary.

Ok, will post the updated version shortly.

Thanks for review!

>
>   Paul
>
>> +		if (err) {
>> +			message = "writing feature-xdp-headroom";
>> +			goto abort_transaction;
>> +		}
>> +
>>  		/* We don't support rx-flip path (except old guests who
>>  		 * don't grok this feature flag).
>>  		 */
>> --
>> 1.8.3.1
>
>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-05 15:57     ` Denis Kirjanov
@ 2020-05-05 16:28       ` Paul Durrant
  2020-05-06 17:45         ` Denis Kirjanov
  0 siblings, 1 reply; 9+ messages in thread
From: Paul Durrant @ 2020-05-05 16:28 UTC (permalink / raw)
  To: 'Denis Kirjanov'; +Cc: netdev, jgross, wei.liu, ilias.apalodimas

> -----Original Message-----
> >> @@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device
> >> *dev,
> >>  		set_backend_state(be, XenbusStateConnected);
> >>  		break;
> >>
> >> +	case XenbusStateReconfiguring:
> >> +		read_xenbus_frontend_xdp(be, dev);
> >
> > Is the frontend always expected to trigger a re-configure, or could
> > feature-xdp already be enabled prior to connection?
> 
> Yes, feature-xdp is set by the frontend when  xdp code is loaded.
> 

That's still ambiguous... what I'm getting at is whether you also need to read the xdp state when transitioning into Connected as well as Reconfiguring?

  Paul


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-05 16:28       ` Paul Durrant
@ 2020-05-06 17:45         ` Denis Kirjanov
  2020-05-07  7:18           ` Paul Durrant
  0 siblings, 1 reply; 9+ messages in thread
From: Denis Kirjanov @ 2020-05-06 17:45 UTC (permalink / raw)
  To: paul; +Cc: netdev, jgross, wei.liu, ilias.apalodimas

On 5/5/20, Paul Durrant <xadimgnik@gmail.com> wrote:
>> -----Original Message-----
>> >> @@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device
>> >> *dev,
>> >>  		set_backend_state(be, XenbusStateConnected);
>> >>  		break;
>> >>
>> >> +	case XenbusStateReconfiguring:
>> >> +		read_xenbus_frontend_xdp(be, dev);
>> >
>> > Is the frontend always expected to trigger a re-configure, or could
>> > feature-xdp already be enabled prior to connection?
>>
>> Yes, feature-xdp is set by the frontend when  xdp code is loaded.
>>
>
> That's still ambiguous... what I'm getting at is whether you also need to
> read the xdp state when transitioning into Connected as well as
> Reconfiguring?

I have to read the state only during the Reconfiguring state since
that's where an XDP program is loaded / unloaded and then we transition
from Reconfigred to Connected

>
>   Paul
>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-06 17:45         ` Denis Kirjanov
@ 2020-05-07  7:18           ` Paul Durrant
  2020-05-07 11:30             ` Denis Kirjanov
  0 siblings, 1 reply; 9+ messages in thread
From: Paul Durrant @ 2020-05-07  7:18 UTC (permalink / raw)
  To: 'Denis Kirjanov'; +Cc: netdev, jgross, wei.liu, ilias.apalodimas

> -----Original Message-----
> From: Denis Kirjanov <kda@linux-powerpc.org>
> Sent: 06 May 2020 18:45
> To: paul@xen.org
> Cc: netdev@vger.kernel.org; jgross@suse.com; wei.liu@kernel.org; ilias.apalodimas@linaro.org
> Subject: Re: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
> 
> On 5/5/20, Paul Durrant <xadimgnik@gmail.com> wrote:
> >> -----Original Message-----
> >> >> @@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device
> >> >> *dev,
> >> >>  		set_backend_state(be, XenbusStateConnected);
> >> >>  		break;
> >> >>
> >> >> +	case XenbusStateReconfiguring:
> >> >> +		read_xenbus_frontend_xdp(be, dev);
> >> >
> >> > Is the frontend always expected to trigger a re-configure, or could
> >> > feature-xdp already be enabled prior to connection?
> >>
> >> Yes, feature-xdp is set by the frontend when  xdp code is loaded.
> >>
> >
> > That's still ambiguous... what I'm getting at is whether you also need to
> > read the xdp state when transitioning into Connected as well as
> > Reconfiguring?
> 
> I have to read the state only during the Reconfiguring state since
> that's where an XDP program is loaded / unloaded and then we transition
> from Reconfigred to Connected
> 

Ok, but what about netback re-connection? It is possible that netback can be disconnected, unloaded, reloaded and re-attached to a running frontend. In this case XDP would be active so I still think read_xenbus_frontend_xdp() needs to form part of ring connection (if only in this case).

  Paul


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback
  2020-05-07  7:18           ` Paul Durrant
@ 2020-05-07 11:30             ` Denis Kirjanov
  0 siblings, 0 replies; 9+ messages in thread
From: Denis Kirjanov @ 2020-05-07 11:30 UTC (permalink / raw)
  To: paul; +Cc: netdev, jgross, wei.liu, ilias.apalodimas

On 5/7/20, Paul Durrant <xadimgnik@gmail.com> wrote:
>> -----Original Message-----
>> From: Denis Kirjanov <kda@linux-powerpc.org>
>> Sent: 06 May 2020 18:45
>> To: paul@xen.org
>> Cc: netdev@vger.kernel.org; jgross@suse.com; wei.liu@kernel.org;
>> ilias.apalodimas@linaro.org
>> Subject: Re: [PATCH net-next v7 2/2] xen networking: add XDP offset
>> adjustment to xen-netback
>>
>> On 5/5/20, Paul Durrant <xadimgnik@gmail.com> wrote:
>> >> -----Original Message-----
>> >> >> @@ -417,6 +431,11 @@ static void frontend_changed(struct
>> >> >> xenbus_device
>> >> >> *dev,
>> >> >>  		set_backend_state(be, XenbusStateConnected);
>> >> >>  		break;
>> >> >>
>> >> >> +	case XenbusStateReconfiguring:
>> >> >> +		read_xenbus_frontend_xdp(be, dev);
>> >> >
>> >> > Is the frontend always expected to trigger a re-configure, or could
>> >> > feature-xdp already be enabled prior to connection?
>> >>
>> >> Yes, feature-xdp is set by the frontend when  xdp code is loaded.
>> >>
>> >
>> > That's still ambiguous... what I'm getting at is whether you also need
>> > to
>> > read the xdp state when transitioning into Connected as well as
>> > Reconfiguring?
>>
>> I have to read the state only during the Reconfiguring state since
>> that's where an XDP program is loaded / unloaded and then we transition
>> from Reconfigred to Connected
>>
>
> Ok, but what about netback re-connection? It is possible that netback can be
> disconnected, unloaded, reloaded and re-attached to a running frontend. In
> this case XDP would be active so I still think read_xenbus_frontend_xdp()
> needs to form part of ring connection (if only in this case).

I made a change to xdp-netfront to keep the state of XDP
and then just pass the saved state in talk_to_netback()

>
>   Paul
>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-05-07 11:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-04  8:37 [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Denis Kirjanov
2020-05-04  8:37 ` [PATCH net-next v7 2/2] xen networking: add XDP offset adjustment to xen-netback Denis Kirjanov
2020-05-05 14:19   ` Paul Durrant
2020-05-05 15:57     ` Denis Kirjanov
2020-05-05 16:28       ` Paul Durrant
2020-05-06 17:45         ` Denis Kirjanov
2020-05-07  7:18           ` Paul Durrant
2020-05-07 11:30             ` Denis Kirjanov
2020-05-05  8:33 ` [PATCH net-next v7 1/2] xen networking: add basic XDP support for xen-netfront Jesper Dangaard Brouer

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.