netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: <sameehj@amazon.com>
To: <davem@davemloft.net>, <netdev@vger.kernel.org>
Cc: Sameeh Jubran <sameehj@amazon.com>, <dwmw@amazon.com>,
	<zorik@amazon.com>, <matua@amazon.com>, <saeedb@amazon.com>,
	<msw@amazon.com>, <aliguori@amazon.com>, <nafea@amazon.com>,
	<gtzalik@amazon.com>, <netanel@amazon.com>, <alisaidi@amazon.com>,
	<benh@amazon.com>, <akiyano@amazon.com>, <ndagan@amazon.com>,
	<ast@kernel.org>, <daniel@iogearbox.net>, <kafai@fb.com>,
	<songliubraving@fb.com>, <yhs@fb.com>, <andriin@fb.com>,
	<john.fastabend@gmail.com>, <kpsingh@chromium.org>,
	<kuba@kernel.org>, <hawk@kernel.org>, <shayagr@amazon.com>,
	<lorenzo@kernel.org>
Subject: [PATCH RFC net-next 1/2] xdp: helpers: add multibuffer support
Date: Mon, 27 Jul 2020 12:56:52 +0000	[thread overview]
Message-ID: <20200727125653.31238-2-sameehj@amazon.com> (raw)
In-Reply-To: <20200727125653.31238-1-sameehj@amazon.com>

From: Sameeh Jubran <sameehj@amazon.com>

The implementation is based on this [0] draft by Jesper D. Brouer.

Provided two helpers:

* bpf_xdp_get_frag()
* bpf_xdp_get_frag_count()

[0] xdp mb design - https://github.com/xdp-project/xdp-project/blob/master/areas/core/xdp-multi-buffer01-design.org
Signed-off-by: Sameeh Jubran <sameehj@amazon.com>
---
 include/uapi/linux/bpf.h       | 13 +++++++++
 net/core/filter.c              | 60 ++++++++++++++++++++++++++++++++++++++++++
 tools/include/uapi/linux/bpf.h | 13 +++++++++
 3 files changed, 86 insertions(+)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 5e3863899..3484e481a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3320,6 +3320,17 @@ union bpf_attr {
  *		A non-negative value equal to or less than *size* on success,
  *		or a negative error in case of failure.
  *
+ * int bpf_xdp_get_frag(struct xdp_buff *xdp_md, u32 frag_index, u32 *size, u32 *offset)
+ * 	Description
+ *		Get the offset from containing page and size of a given frag.
+ * 	Return
+ * 		0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xdp_get_frag_count(struct xdp_buff *xdp_md)
+ * 	Description
+ *		Get the total number of frags for a given packet.
+ * 	Return
+ * 		The number of frags
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3464,6 +3475,8 @@ union bpf_attr {
 	FN(skc_to_tcp_request_sock),	\
 	FN(skc_to_udp6_sock),		\
 	FN(get_task_stack),		\
+	FN(xdp_get_frag),		\
+	FN(xdp_get_frag_count),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/net/core/filter.c b/net/core/filter.c
index bdd2382e6..93e790d7b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3452,6 +3452,62 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
 	.arg2_type	= ARG_ANYTHING,
 };
 
+static inline bool __bpf_xdp_has_frags(struct  xdp_buff *xdp)
+{
+	return xdp->mb != 0;
+}
+
+BPF_CALL_1(bpf_xdp_get_frag_count, struct  xdp_buff*, xdp)
+{
+	return __bpf_xdp_has_frags(xdp) ?
+		((struct skb_shared_info *)xdp->data_end)->nr_frags : 0;
+}
+
+const struct bpf_func_proto bpf_xdp_get_frag_count_proto = {
+	.func		= bpf_xdp_get_frag_count,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_4(bpf_xdp_get_frag, struct  xdp_buff*, xdp, u32, frag_index,
+	   u32*, size, u32*, offset)
+{
+	skb_frag_t *frags;
+	u32 frag_size;
+	u16 nr_frags;
+	struct skb_shared_info *skb_info;
+
+	if (!__bpf_xdp_has_frags(xdp))
+		return -EINVAL;
+
+	skb_info = xdp_data_hard_end(xdp);
+	frags = skb_info->frags;
+	nr_frags = skb_info->nr_frags;
+
+	if (frag_index >= nr_frags)
+		return -EINVAL;
+
+	frag_size = frags[frag_index].bv_len;
+
+	if (size)
+		memcpy(size, &frag_size, sizeof(frag_size));
+	if (offset)
+		memcpy(offset, &frags[frag_index].bv_offset, sizeof(frags[frag_index].bv_offset));
+
+	return 0;
+}
+
+const struct bpf_func_proto bpf_xdp_get_frag_proto = {
+	.func		= bpf_xdp_get_frag,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_ANYTHING,
+	.arg3_type	= ARG_PTR_TO_INT,
+	.arg4_type	= ARG_PTR_TO_INT,
+};
+
 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
 {
 	void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
@@ -6475,6 +6531,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_xdp_redirect_map_proto;
 	case BPF_FUNC_xdp_adjust_tail:
 		return &bpf_xdp_adjust_tail_proto;
+	case BPF_FUNC_xdp_get_frag_count:
+		return &bpf_xdp_get_frag_count_proto;
+	case BPF_FUNC_xdp_get_frag:
+		return &bpf_xdp_get_frag_proto;
 	case BPF_FUNC_fib_lookup:
 		return &bpf_xdp_fib_lookup_proto;
 #ifdef CONFIG_INET
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 5e3863899..3484e481a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3320,6 +3320,17 @@ union bpf_attr {
  *		A non-negative value equal to or less than *size* on success,
  *		or a negative error in case of failure.
  *
+ * int bpf_xdp_get_frag(struct xdp_buff *xdp_md, u32 frag_index, u32 *size, u32 *offset)
+ * 	Description
+ *		Get the offset from containing page and size of a given frag.
+ * 	Return
+ * 		0 on success, or a negative error in case of failure.
+ *
+ * int bpf_xdp_get_frag_count(struct xdp_buff *xdp_md)
+ * 	Description
+ *		Get the total number of frags for a given packet.
+ * 	Return
+ * 		The number of frags
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3464,6 +3475,8 @@ union bpf_attr {
 	FN(skc_to_tcp_request_sock),	\
 	FN(skc_to_udp6_sock),		\
 	FN(get_task_stack),		\
+	FN(xdp_get_frag),		\
+	FN(xdp_get_frag_count),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
-- 
2.16.6


  reply	other threads:[~2020-07-27 12:57 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-27 12:56 [PATCH RFC net-next 0/2] XDP multi buffer helpers sameehj
2020-07-27 12:56 ` sameehj [this message]
2020-07-27 19:35   ` [PATCH RFC net-next 1/2] xdp: helpers: add multibuffer support David Miller
2020-07-27 12:56 ` [PATCH RFC net-next 2/2] samples/bpf: add bpf program that uses xdp mb helpers sameehj
2020-07-28 12:33 ` [PATCH RFC net-next 0/2] XDP multi buffer helpers Maciej Fijalkowski
2020-07-28 12:54   ` Lorenzo Bianconi
2020-07-28 18:48   ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200727125653.31238-2-sameehj@amazon.com \
    --to=sameehj@amazon.com \
    --cc=akiyano@amazon.com \
    --cc=aliguori@amazon.com \
    --cc=alisaidi@amazon.com \
    --cc=andriin@fb.com \
    --cc=ast@kernel.org \
    --cc=benh@amazon.com \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=dwmw@amazon.com \
    --cc=gtzalik@amazon.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@chromium.org \
    --cc=kuba@kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=matua@amazon.com \
    --cc=msw@amazon.com \
    --cc=nafea@amazon.com \
    --cc=ndagan@amazon.com \
    --cc=netanel@amazon.com \
    --cc=netdev@vger.kernel.org \
    --cc=saeedb@amazon.com \
    --cc=shayagr@amazon.com \
    --cc=songliubraving@fb.com \
    --cc=yhs@fb.com \
    --cc=zorik@amazon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).