netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Björn Töpel" <bjorn.topel@gmail.com>
To: bjorn.topel@gmail.com, magnus.karlsson@intel.com,
	magnus.karlsson@gmail.com, ast@kernel.org, daniel@iogearbox.net,
	netdev@vger.kernel.org
Cc: "Björn Töpel" <bjorn.topel@intel.com>,
	brouer@redhat.com, u9012063@gmail.com, qi.z.zhang@intel.com,
	jakub.kicinski@netronome.com, andrew@lunn.ch
Subject: [PATCH bpf-next v2 3/7] bpf: add bpf_xsk_redirect function
Date: Mon, 17 Dec 2018 11:24:57 +0100	[thread overview]
Message-ID: <20181217102501.22019-4-bjorn.topel@gmail.com> (raw)
In-Reply-To: <20181217102501.22019-1-bjorn.topel@gmail.com>

From: Björn Töpel <bjorn.topel@intel.com>

The bpf_xsk_redirect function is a new redirect bpf function, in
addition to bpf_redirect/bpf_redirect_map. If an XDP socket has been
attached to a netdev Rx queue via the XDP_ATTACH bind() option and
bpf_xsk_redirect is called, the packet will be redirected to the
attached socket.

The bpf_xsk_redirect function returns XDP_REDIRECT if there is a
socket attached to the originated queue, otherwise XDP_PASS.

This commit also adds the corresponding trace points for the redirect
call.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 include/linux/filter.h     |   4 ++
 include/trace/events/xdp.h |  61 ++++++++++++++++++++++
 include/uapi/linux/bpf.h   |  14 +++++-
 net/core/filter.c          | 100 +++++++++++++++++++++++++++++++++++++
 4 files changed, 178 insertions(+), 1 deletion(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 537e9e7c6e6f..ee3efca4477e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -532,6 +532,10 @@ struct bpf_redirect_info {
 	u32 flags;
 	struct bpf_map *map;
 	struct bpf_map *map_to_flush;
+#ifdef CONFIG_XDP_SOCKETS
+	struct xdp_sock *xsk;
+	struct xdp_sock *xsk_to_flush;
+#endif
 	u32 kern_flags;
 };
 
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index e95cb86b65cf..30f399bd462b 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -158,6 +158,67 @@ struct _bpf_dtab_netdev {
 	 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map),	\
 				    err, map, idx)
 
+DECLARE_EVENT_CLASS(xsk_redirect_template,
+
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff),
+
+	TP_STRUCT__entry(
+		__field(int, prog_id)
+		__field(u32, act)
+		__field(int, ifindex)
+		__field(int, err)
+		__field(u32, queue_index)
+		__field(enum xdp_mem_type, mem_type)
+	),
+
+	TP_fast_assign(
+		__entry->prog_id	= xdp->aux->id;
+		__entry->act		= XDP_REDIRECT;
+		__entry->ifindex	= dev->ifindex;
+		__entry->err		= err;
+		__entry->queue_index	= xbuff->rxq->queue_index;
+		__entry->mem_type	= xbuff->rxq->mem.type;
+	),
+
+	TP_printk("prog_id=%d action=%s ifindex=%d err=%d queue_index=%d"
+		  " mem_type=%d",
+		  __entry->prog_id,
+		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+		  __entry->ifindex,
+		  __entry->err,
+		  __entry->queue_index,
+		  __entry->mem_type)
+);
+
+DEFINE_EVENT(xsk_redirect_template, xsk_redirect,
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff)
+);
+
+DEFINE_EVENT(xsk_redirect_template, xsk_redirect_err,
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff)
+);
+
+#define _trace_xsk_redirect(dev, xdp, xbuff)		\
+	 trace_xsk_redirect(dev, xdp, 0, xbuff)
+
+#define _trace_xsk_redirect_err(dev, xdp, xbuff, err)	\
+	 trace_xsk_redirect_err(dev, xdp, err, xbuff)
+
 TRACE_EVENT(xdp_cpumap_kthread,
 
 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e7d57e89f25f..2568039b6265 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2319,6 +2319,17 @@ union bpf_attr {
  *		"**y**".
  *	Return
  *		0
+ *
+ * int bpf_xsk_redirect(struct xdp_buff *xdp_md)
+ *	 Description
+ *		Redirect the packet to the attached XDP socket, if any.
+ *		An XDP socket can be attached to a network interface Rx
+ *		queue by passing the XDP_ATTACH option at bind point of
+ *		the socket.
+ *
+ *	Return
+ *		**XDP_REDIRECT** if there is an XDP socket attached to the Rx
+ *		queue receiving the frame, otherwise **XDP_PASS**.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -2413,7 +2424,8 @@ union bpf_attr {
 	FN(map_peek_elem),		\
 	FN(msg_push_data),		\
 	FN(msg_pop_data),		\
-	FN(rc_pointer_rel),
+	FN(rc_pointer_rel),		\
+	FN(xsk_redirect),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
diff --git a/net/core/filter.c b/net/core/filter.c
index f9348806e843..93ed82cd741f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3415,6 +3415,17 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
 	return 0;
 }
 
+static void xdp_do_flush_xsk(struct bpf_redirect_info *ri)
+{
+#ifdef CONFIG_XDP_SOCKETS
+	struct xdp_sock *xsk = ri->xsk_to_flush;
+
+	ri->xsk_to_flush = NULL;
+	if (xsk)
+		xsk_flush(xsk);
+#endif
+}
+
 void xdp_do_flush_map(void)
 {
 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
@@ -3436,6 +3447,8 @@ void xdp_do_flush_map(void)
 			break;
 		}
 	}
+
+	xdp_do_flush_xsk(ri);
 }
 EXPORT_SYMBOL_GPL(xdp_do_flush_map);
 
@@ -3501,6 +3514,30 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
 	return err;
 }
 
+#ifdef CONFIG_XDP_SOCKETS
+static int xdp_do_xsk_redirect(struct net_device *dev, struct xdp_buff *xdp,
+			       struct bpf_prog *xdp_prog,
+			       struct bpf_redirect_info *ri)
+{
+	struct xdp_sock *xsk = ri->xsk;
+	int err;
+
+	ri->xsk = NULL;
+	ri->xsk_to_flush = xsk;
+
+	err = xsk_attached_rcv(xsk, xdp);
+	if (unlikely(err))
+		goto err;
+
+	_trace_xsk_redirect(dev, xdp_prog, xdp);
+	return 0;
+
+err:
+	_trace_xsk_redirect_err(dev, xdp_prog, xdp, err);
+	return err;
+}
+#endif /* CONFIG_XDP_SOCKETS */
+
 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 		    struct bpf_prog *xdp_prog)
 {
@@ -3510,6 +3547,10 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 	if (likely(map))
 		return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
 
+#ifdef CONFIG_XDP_SOCKETS
+	if (ri->xsk)
+		return xdp_do_xsk_redirect(dev, xdp, xdp_prog, ri);
+#endif
 	return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
@@ -3560,6 +3601,33 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
 	return err;
 }
 
+#ifdef CONFIG_XDP_SOCKETS
+static int xdp_do_generic_xsk_redirect(struct net_device *dev,
+				       struct xdp_buff *xdp,
+				       struct bpf_prog *xdp_prog,
+				       struct sk_buff *skb)
+{
+	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+	struct xdp_sock *xsk = ri->xsk;
+	int err;
+
+	ri->xsk = NULL;
+	ri->xsk_to_flush = NULL;
+
+	err = xsk_generic_attached_rcv(xsk, xdp);
+	if (err)
+		goto err;
+
+	consume_skb(skb);
+	_trace_xsk_redirect(dev, xdp_prog, xdp);
+	return 0;
+
+err:
+	_trace_xsk_redirect_err(dev, xdp_prog, xdp, err);
+	return err;
+}
+#endif /* CONFIG_XDP_SOCKETS */
+
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 			    struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
@@ -3572,6 +3640,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 	if (map)
 		return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
 						   map);
+#ifdef CONFIG_XDP_SOCKETS
+	if (ri->xsk)
+		return xdp_do_generic_xsk_redirect(dev, xdp, xdp_prog, skb);
+#endif
+
 	ri->ifindex = 0;
 	fwd = dev_get_by_index_rcu(dev_net(dev), index);
 	if (unlikely(!fwd)) {
@@ -3639,6 +3712,29 @@ static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
 	.arg3_type      = ARG_ANYTHING,
 };
 
+#ifdef CONFIG_XDP_SOCKETS
+BPF_CALL_1(bpf_xdp_xsk_redirect, struct xdp_buff *, xdp)
+{
+	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+	struct xdp_sock *xsk;
+
+	xsk = READ_ONCE(xdp->rxq->dev->_rx[xdp->rxq->queue_index].xsk);
+	if (xsk) {
+		ri->xsk = xsk;
+		return XDP_REDIRECT;
+	}
+
+	return XDP_PASS;
+}
+
+static const struct bpf_func_proto bpf_xdp_xsk_redirect_proto = {
+	.func           = bpf_xdp_xsk_redirect,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+};
+#endif /* CONFIG_XDP_SOCKETS */
+
 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
 				  unsigned long off, unsigned long len)
 {
@@ -5511,6 +5607,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_xdp_sk_lookup_tcp_proto;
 	case BPF_FUNC_sk_release:
 		return &bpf_sk_release_proto;
+#endif
+#ifdef CONFIG_XDP_SOCKETS
+	case BPF_FUNC_xsk_redirect:
+		return &bpf_xdp_xsk_redirect_proto;
 #endif
 	default:
 		return bpf_base_func_proto(func_id);
-- 
2.19.1

  parent reply	other threads:[~2018-12-17 10:25 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-17 10:24 [PATCH bpf-next v2 0/7] Add support for XDP_ATTACH Björn Töpel
2018-12-17 10:24 ` [PATCH bpf-next v2 1/7] xsk: simplify AF_XDP socket teardown Björn Töpel
2018-12-17 10:24 ` [PATCH bpf-next v2 2/7] xsk: add XDP_ATTACH bind() flag Björn Töpel
2018-12-17 10:24 ` Björn Töpel [this message]
2018-12-17 10:24 ` [PATCH bpf-next v2 4/7] tools/bpf: sync kernel uapi bpf.h to tools directory Björn Töpel
2018-12-17 10:24 ` [PATCH bpf-next v2 5/7] libbpf: initial support for builtin BPF programs Björn Töpel
2018-12-17 10:25 ` [PATCH bpf-next v2 6/7] samples: bpf: simplify/cleanup xdpsock Björn Töpel
2018-12-17 10:25 ` [PATCH bpf-next v2 7/7] samples: bpf: add support for XDP_ATTACH to xdpsock Björn Töpel
2018-12-17 12:50 ` [PATCH bpf-next v2 0/7] Add support for XDP_ATTACH Jesper Dangaard Brouer
2018-12-17 13:39   ` Björn Töpel
2018-12-17 14:10     ` Jesper Dangaard Brouer
2018-12-17 14:55       ` Magnus Karlsson
2018-12-17 15:30         ` Björn Töpel
2018-12-18 23:04           ` Alexei Starovoitov
2018-12-19  9:34             ` Björn Töpel
2018-12-19 16:23               ` Jesper Dangaard Brouer
2018-12-20  0:48               ` Alexei Starovoitov
2018-12-18  2:36     ` Jakub Kicinski
2018-12-18  8:59       ` Björn Töpel
2018-12-18 18:18         ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181217102501.22019-4-bjorn.topel@gmail.com \
    --to=bjorn.topel@gmail.com \
    --cc=andrew@lunn.ch \
    --cc=ast@kernel.org \
    --cc=bjorn.topel@intel.com \
    --cc=brouer@redhat.com \
    --cc=daniel@iogearbox.net \
    --cc=jakub.kicinski@netronome.com \
    --cc=magnus.karlsson@gmail.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=qi.z.zhang@intel.com \
    --cc=u9012063@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).