netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Björn Töpel" <bjorn.topel@gmail.com>
To: bjorn.topel@gmail.com, magnus.karlsson@intel.com,
	magnus.karlsson@gmail.com, ast@kernel.org, daniel@iogearbox.net,
	netdev@vger.kernel.org
Cc: "Björn Töpel" <bjorn.topel@intel.com>,
	brouer@redhat.com, u9012063@gmail.com, qi.z.zhang@intel.com
Subject: [PATCH bpf-next 3/7] bpf: add bpf_xsk_redirect function
Date: Fri,  7 Dec 2018 12:44:27 +0100	[thread overview]
Message-ID: <20181207114431.18038-4-bjorn.topel@gmail.com> (raw)
In-Reply-To: <20181207114431.18038-1-bjorn.topel@gmail.com>

From: Björn Töpel <bjorn.topel@intel.com>

The bpf_xsk_redirect function is a new redirect bpf function, in
addition to bpf_redirect/bpf_redirect_map. If an XDP socket has been
attached to a netdev Rx queue via the XDP_ATTACH bind() option and
bpf_xsk_redirect is called, the packet will be redirected to the
attached socket.

The bpf_xsk_redirect function returns XDP_REDIRECT if there is a
socket attached to the originated queue, otherwise XDP_PASS.

This commit also adds the corresponding trace points for the redirect
call.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 include/linux/filter.h     |   4 ++
 include/trace/events/xdp.h |  61 ++++++++++++++++++++++
 include/uapi/linux/bpf.h   |  14 +++++-
 net/core/filter.c          | 100 +++++++++++++++++++++++++++++++++++++
 4 files changed, 178 insertions(+), 1 deletion(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index d16deead65c6..691b5c1003c8 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -525,6 +525,10 @@ struct bpf_redirect_info {
 	u32 flags;
 	struct bpf_map *map;
 	struct bpf_map *map_to_flush;
+#ifdef CONFIG_XDP_SOCKETS
+	struct xdp_sock *xsk;
+	struct xdp_sock *xsk_to_flush;
+#endif
 	u32 kern_flags;
 };
 
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index e95cb86b65cf..30f399bd462b 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -158,6 +158,67 @@ struct _bpf_dtab_netdev {
 	 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map),	\
 				    err, map, idx)
 
+DECLARE_EVENT_CLASS(xsk_redirect_template,
+
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff),
+
+	TP_STRUCT__entry(
+		__field(int, prog_id)
+		__field(u32, act)
+		__field(int, ifindex)
+		__field(int, err)
+		__field(u32, queue_index)
+		__field(enum xdp_mem_type, mem_type)
+	),
+
+	TP_fast_assign(
+		__entry->prog_id	= xdp->aux->id;
+		__entry->act		= XDP_REDIRECT;
+		__entry->ifindex	= dev->ifindex;
+		__entry->err		= err;
+		__entry->queue_index	= xbuff->rxq->queue_index;
+		__entry->mem_type	= xbuff->rxq->mem.type;
+	),
+
+	TP_printk("prog_id=%d action=%s ifindex=%d err=%d queue_index=%d"
+		  " mem_type=%d",
+		  __entry->prog_id,
+		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+		  __entry->ifindex,
+		  __entry->err,
+		  __entry->queue_index,
+		  __entry->mem_type)
+);
+
+DEFINE_EVENT(xsk_redirect_template, xsk_redirect,
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff)
+);
+
+DEFINE_EVENT(xsk_redirect_template, xsk_redirect_err,
+	TP_PROTO(const struct net_device *dev,
+		 const struct bpf_prog *xdp,
+		 int err,
+		 struct xdp_buff *xbuff),
+
+	TP_ARGS(dev, xdp, err, xbuff)
+);
+
+#define _trace_xsk_redirect(dev, xdp, xbuff)		\
+	 trace_xsk_redirect(dev, xdp, 0, xbuff)
+
+#define _trace_xsk_redirect_err(dev, xdp, xbuff, err)	\
+	 trace_xsk_redirect_err(dev, xdp, err, xbuff)
+
 TRACE_EVENT(xdp_cpumap_kthread,
 
 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a84fd232d934..2912d87a39ba 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2298,6 +2298,17 @@ union bpf_attr {
  *		payload and/or *pop* value being to large.
  *	Return
  *		0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xsk_redirect(struct xdp_buff *xdp_md)
+ *	 Description
+ *		Redirect the packet to the attached XDP socket, if any.
+ *		An XDP socket can be attached to a network interface Rx
+ *		queue by passing the XDP_ATTACH option at bind point of
+ *		the socket.
+ *
+ *	Return
+ *		**XDP_REDIRECT** if there is an XDP socket attached to the Rx
+ *		queue receiving the frame, otherwise **XDP_PASS**.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -2391,7 +2402,8 @@ union bpf_attr {
 	FN(map_pop_elem),		\
 	FN(map_peek_elem),		\
 	FN(msg_push_data),		\
-	FN(msg_pop_data),
+	FN(msg_pop_data),		\
+	FN(xsk_redirect),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
diff --git a/net/core/filter.c b/net/core/filter.c
index 3d54af4c363d..86c5fe5a9ec0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3415,6 +3415,17 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
 	return 0;
 }
 
+static void xdp_do_flush_xsk(struct bpf_redirect_info *ri)
+{
+#ifdef CONFIG_XDP_SOCKETS
+	struct xdp_sock *xsk = ri->xsk_to_flush;
+
+	ri->xsk_to_flush = NULL;
+	if (xsk)
+		xsk_flush(xsk);
+#endif
+}
+
 void xdp_do_flush_map(void)
 {
 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
@@ -3436,6 +3447,8 @@ void xdp_do_flush_map(void)
 			break;
 		}
 	}
+
+	xdp_do_flush_xsk(ri);
 }
 EXPORT_SYMBOL_GPL(xdp_do_flush_map);
 
@@ -3501,6 +3514,30 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
 	return err;
 }
 
+#ifdef CONFIG_XDP_SOCKETS
+static int xdp_do_xsk_redirect(struct net_device *dev, struct xdp_buff *xdp,
+			       struct bpf_prog *xdp_prog,
+			       struct bpf_redirect_info *ri)
+{
+	struct xdp_sock *xsk = ri->xsk;
+	int err;
+
+	ri->xsk = NULL;
+	ri->xsk_to_flush = xsk;
+
+	err = xsk_attached_rcv(xsk, xdp);
+	if (unlikely(err))
+		goto err;
+
+	_trace_xsk_redirect(dev, xdp_prog, xdp);
+	return 0;
+
+err:
+	_trace_xsk_redirect_err(dev, xdp_prog, xdp, err);
+	return err;
+}
+#endif /* CONFIG_XDP_SOCKETS */
+
 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 		    struct bpf_prog *xdp_prog)
 {
@@ -3510,6 +3547,10 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 	if (likely(map))
 		return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
 
+#ifdef CONFIG_XDP_SOCKETS
+	if (ri->xsk)
+		return xdp_do_xsk_redirect(dev, xdp, xdp_prog, ri);
+#endif
 	return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
@@ -3560,6 +3601,33 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
 	return err;
 }
 
+#ifdef CONFIG_XDP_SOCKETS
+static int xdp_do_generic_xsk_redirect(struct net_device *dev,
+				       struct xdp_buff *xdp,
+				       struct bpf_prog *xdp_prog,
+				       struct sk_buff *skb)
+{
+	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+	struct xdp_sock *xsk = ri->xsk;
+	int err;
+
+	ri->xsk = NULL;
+	ri->xsk_to_flush = NULL;
+
+	err = xsk_generic_attached_rcv(xsk, xdp);
+	if (err)
+		goto err;
+
+	consume_skb(skb);
+	_trace_xsk_redirect(dev, xdp_prog, xdp);
+	return 0;
+
+err:
+	_trace_xsk_redirect_err(dev, xdp_prog, xdp, err);
+	return err;
+}
+#endif /* CONFIG_XDP_SOCKETS */
+
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 			    struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
@@ -3572,6 +3640,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 	if (map)
 		return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
 						   map);
+#ifdef CONFIG_XDP_SOCKETS
+	if (ri->xsk)
+		return xdp_do_generic_xsk_redirect(dev, xdp, xdp_prog, skb);
+#endif
+
 	ri->ifindex = 0;
 	fwd = dev_get_by_index_rcu(dev_net(dev), index);
 	if (unlikely(!fwd)) {
@@ -3639,6 +3712,29 @@ static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
 	.arg3_type      = ARG_ANYTHING,
 };
 
+#ifdef CONFIG_XDP_SOCKETS
+BPF_CALL_1(bpf_xdp_xsk_redirect, struct xdp_buff *, xdp)
+{
+	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+	struct xdp_sock *xsk;
+
+	xsk = READ_ONCE(xdp->rxq->dev->_rx[xdp->rxq->queue_index].xsk);
+	if (xsk) {
+		ri->xsk = xsk;
+		return XDP_REDIRECT;
+	}
+
+	return XDP_PASS;
+}
+
+static const struct bpf_func_proto bpf_xdp_xsk_redirect_proto = {
+	.func           = bpf_xdp_xsk_redirect,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+};
+#endif /* CONFIG_XDP_SOCKETS */
+
 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
 				  unsigned long off, unsigned long len)
 {
@@ -5510,6 +5606,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_xdp_sk_lookup_tcp_proto;
 	case BPF_FUNC_sk_release:
 		return &bpf_sk_release_proto;
+#endif
+#ifdef CONFIG_XDP_SOCKETS
+	case BPF_FUNC_xsk_redirect:
+		return &bpf_xdp_xsk_redirect_proto;
 #endif
 	default:
 		return bpf_base_func_proto(func_id);
-- 
2.19.1

  parent reply	other threads:[~2018-12-07 11:45 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-07 11:44 [PATCH bpf-next 0/7] Add XDP_ATTACH bind() flag to AF_XDP sockets Björn Töpel
2018-12-07 11:44 ` [PATCH bpf-next 1/7] xsk: simplify AF_XDP socket teardown Björn Töpel
2018-12-07 11:44 ` [PATCH bpf-next 2/7] xsk: add XDP_ATTACH bind() flag Björn Töpel
2018-12-07 11:44 ` Björn Töpel [this message]
2018-12-07 11:44 ` [PATCH bpf-next 4/7] bpf: prepare for builtin bpf program Björn Töpel
2018-12-07 11:44 ` [PATCH bpf-next 5/7] bpf: add function to load builtin BPF program Björn Töpel
2018-12-07 11:44 ` [PATCH bpf-next 6/7] xsk: load a builtin XDP program on XDP_ATTACH Björn Töpel
2018-12-10  2:17   ` Jakub Kicinski
2018-12-10  7:29     ` Björn Töpel
2018-12-07 13:42 ` [PATCH bpf-next 0/7] Add XDP_ATTACH bind() flag to AF_XDP sockets Jesper Dangaard Brouer
2018-12-07 14:01   ` Björn Töpel
2018-12-08 14:52     ` Jesper Dangaard Brouer
2018-12-08 18:43       ` Björn Töpel
2018-12-08 20:42         ` Jesper Dangaard Brouer
2018-12-07 15:27   ` Björn Töpel
2018-12-07 21:21 ` Alexei Starovoitov
2018-12-08  9:31   ` Björn Töpel
2018-12-08 15:12   ` Jesper Dangaard Brouer
2018-12-08 16:55     ` Andrew Lunn
2018-12-08 20:37       ` Jesper Dangaard Brouer
2018-12-08 20:48         ` Andrew Lunn
2018-12-08 18:50     ` Björn Töpel
     [not found] ` <20181207114431.18038-8-bjorn.topel@gmail.com>
2018-12-08 10:08   ` [PATCH bpf-next 7/7] samples: bpf: add support for XDP_ATTACH to xdpsock Zhang, Qi Z
2018-12-10 14:01 ` [PATCH bpf-next 0/7] Add XDP_ATTACH bind() flag to AF_XDP sockets Björn Töpel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181207114431.18038-4-bjorn.topel@gmail.com \
    --to=bjorn.topel@gmail.com \
    --cc=ast@kernel.org \
    --cc=bjorn.topel@intel.com \
    --cc=brouer@redhat.com \
    --cc=daniel@iogearbox.net \
    --cc=magnus.karlsson@gmail.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=qi.z.zhang@intel.com \
    --cc=u9012063@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).