bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Lorenzo Bianconi <lorenzo@kernel.org>
To: bpf@vger.kernel.org, netdev@vger.kernel.org
Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net,
	kuba@kernel.org, ast@kernel.org, daniel@iogearbox.net,
	shayagr@amazon.com, john.fastabend@gmail.com, dsahern@kernel.org,
	brouer@redhat.com, echaudro@redhat.com, jasowang@redhat.com,
	alexander.duyck@gmail.com, saeed@kernel.org,
	maciej.fijalkowski@intel.com, magnus.karlsson@intel.com,
	tirthendu.sarkar@intel.com, toke@redhat.com
Subject: [PATCH v12 bpf-next 10/18] bpf: add multi-buff support to the bpf_xdp_adjust_tail() API
Date: Fri, 20 Aug 2021 17:40:23 +0200	[thread overview]
Message-ID: <a86c03ed84a6ca275e65d44aef8abaff890f7e3f.1629473233.git.lorenzo@kernel.org> (raw)
In-Reply-To: <cover.1629473233.git.lorenzo@kernel.org>

From: Eelco Chaudron <echaudro@redhat.com>

This change adds support for tail growing and shrinking for XDP multi-buff.

When called on a multi-buffer packet with a grow request, it will always
work on the last fragment of the packet. So the maximum grow size is the
last fragments tailroom, i.e. no new buffer will be allocated.

When shrinking, it will work from the last fragment, all the way down to
the base buffer depending on the shrinking size. It's important to mention
that once you shrink down the fragment(s) are freed, so you can not grow
again to the original size.

Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
 include/net/xdp.h |  9 +++++++
 net/core/filter.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++
 net/core/xdp.c    |  5 ++--
 3 files changed, 72 insertions(+), 2 deletions(-)

diff --git a/include/net/xdp.h b/include/net/xdp.h
index d66e9877d773..cdaecf8d4d61 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -145,6 +145,13 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
 	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
 }
 
+static inline unsigned int xdp_get_frag_tailroom(const skb_frag_t *frag)
+{
+	struct page *page = skb_frag_page(frag);
+
+	return page_size(page) - skb_frag_size(frag) - skb_frag_off(frag);
+}
+
 struct xdp_frame {
 	void *data;
 	u16 len;
@@ -290,6 +297,8 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
 	return xdp_frame;
 }
 
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+		  struct xdp_buff *xdp);
 void xdp_return_frame(struct xdp_frame *xdpf);
 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
 void xdp_return_buff(struct xdp_buff *xdp);
diff --git a/net/core/filter.c b/net/core/filter.c
index 59b8f5050180..e6a589705813 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3818,11 +3818,71 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
 	.arg2_type	= ARG_ANYTHING,
 };
 
+static int bpf_xdp_mb_adjust_tail(struct xdp_buff *xdp, int offset)
+{
+	struct skb_shared_info *sinfo;
+
+	sinfo = xdp_get_shared_info_from_buff(xdp);
+	if (offset >= 0) {
+		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+		int size;
+
+		if (unlikely(offset > xdp_get_frag_tailroom(frag)))
+			return -EINVAL;
+
+		size = skb_frag_size(frag);
+		memset(skb_frag_address(frag) + size, 0, offset);
+		skb_frag_size_set(frag, size + offset);
+		sinfo->xdp_frags_size += offset;
+	} else {
+		int i, n_frags_free = 0, len_free = 0, tlen_free = 0;
+
+		offset = abs(offset);
+		if (unlikely(offset > ((int)(xdp->data_end - xdp->data) +
+				       sinfo->xdp_frags_size - ETH_HLEN)))
+			return -EINVAL;
+
+		for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) {
+			skb_frag_t *frag = &sinfo->frags[i];
+			int size = skb_frag_size(frag);
+			int shrink = min_t(int, offset, size);
+
+			len_free += shrink;
+			offset -= shrink;
+
+			if (unlikely(size == shrink)) {
+				struct page *page = skb_frag_page(frag);
+
+				__xdp_return(page_address(page), &xdp->rxq->mem,
+					     false, NULL);
+				tlen_free += page_size(page);
+				n_frags_free++;
+			} else {
+				skb_frag_size_set(frag, size - shrink);
+				break;
+			}
+		}
+		sinfo->nr_frags -= n_frags_free;
+		sinfo->xdp_frags_size -= len_free;
+		sinfo->xdp_frags_tsize -= tlen_free;
+
+		if (unlikely(offset > 0)) {
+			xdp_buff_clear_mb(xdp);
+			xdp->data_end -= offset;
+		}
+	}
+
+	return 0;
+}
+
 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
 {
 	void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
 	void *data_end = xdp->data_end + offset;
 
+	if (unlikely(xdp_buff_is_mb(xdp)))
+		return bpf_xdp_mb_adjust_tail(xdp, offset);
+
 	/* Notice that xdp_data_hard_end have reserved some tailroom */
 	if (unlikely(data_end > data_hard_end))
 		return -EINVAL;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 1346fb8b3f50..a71cdea75306 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -339,8 +339,8 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
  * is used for those calls sites.  Thus, allowing for faster recycling
  * of xdp_frames/pages in those cases.
  */
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
-			 struct xdp_buff *xdp)
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+		  struct xdp_buff *xdp)
 {
 	struct xdp_mem_allocator *xa;
 	struct page *page;
@@ -373,6 +373,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(__xdp_return);
 
 void xdp_return_frame(struct xdp_frame *xdpf)
 {
-- 
2.31.1


  parent reply	other threads:[~2021-08-20 15:45 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 15:40 [PATCH v12 bpf-next 00/18] mvneta: introduce XDP multi-buffer support Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 01/18] net: skbuff: add size metadata to skb_shared_info for xdp Lorenzo Bianconi
2021-08-31 23:13   ` John Fastabend
2021-09-03 17:13     ` Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 02/18] xdp: introduce flags field in xdp_buff/xdp_frame Lorenzo Bianconi
2021-08-31 23:15   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 03/18] net: mvneta: update mb bit before passing the xdp buffer to eBPF layer Lorenzo Bianconi
2021-08-31 23:23   ` John Fastabend
2021-09-03 17:23     ` Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 04/18] net: mvneta: simplify mvneta_swbm_add_rx_fragment management Lorenzo Bianconi
2021-08-31 23:34   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 05/18] net: xdp: add xdp_update_skb_shared_info utility routine Lorenzo Bianconi
2021-08-31 23:38   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 06/18] net: marvell: rely on " Lorenzo Bianconi
2021-08-31 23:41   ` John Fastabend
2021-09-03 17:27     ` Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 07/18] xdp: add multi-buff support to xdp_return_{buff/frame} Lorenzo Bianconi
2021-08-31 23:43   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 08/18] net: mvneta: add multi buffer support to XDP_TX Lorenzo Bianconi
2021-08-31 23:44   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 09/18] net: mvneta: enable jumbo frames for XDP Lorenzo Bianconi
2021-08-31 23:45   ` John Fastabend
2021-08-20 15:40 ` Lorenzo Bianconi [this message]
2021-09-01  0:10   ` [PATCH v12 bpf-next 10/18] bpf: add multi-buff support to the bpf_xdp_adjust_tail() API John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 11/18] bpf: introduce bpf_xdp_get_buff_len helper Lorenzo Bianconi
2021-09-01  0:12   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 12/18] bpf: add multi-buffer support to xdp copy helpers Lorenzo Bianconi
2021-09-01  0:19   ` John Fastabend
2021-08-20 15:40 ` [PATCH v12 bpf-next 13/18] bpf: move user_size out of bpf_test_init Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 14/18] bpf: introduce multibuff support to bpf_prog_test_run_xdp() Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 15/18] bpf: test_run: add xdp_shared_info pointer in bpf_test_finish signature Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 16/18] bpf: update xdp_adjust_tail selftest to include multi-buffer Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 17/18] net: xdp: introduce bpf_xdp_adjust_data helper Lorenzo Bianconi
2021-09-01  0:36   ` John Fastabend
2021-09-03 17:57     ` Lorenzo Bianconi
2021-08-20 15:40 ` [PATCH v12 bpf-next 18/18] bpf: add bpf_xdp_adjust_data selftest Lorenzo Bianconi
2021-09-01  0:45 ` [PATCH v12 bpf-next 00/18] mvneta: introduce XDP multi-buffer support John Fastabend
2021-09-07  8:35   ` Lorenzo Bianconi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a86c03ed84a6ca275e65d44aef8abaff890f7e3f.1629473233.git.lorenzo@kernel.org \
    --to=lorenzo@kernel.org \
    --cc=alexander.duyck@gmail.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=brouer@redhat.com \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=dsahern@kernel.org \
    --cc=echaudro@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=lorenzo.bianconi@redhat.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=saeed@kernel.org \
    --cc=shayagr@amazon.com \
    --cc=tirthendu.sarkar@intel.com \
    --cc=toke@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).