All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
	willemb@google.com, fw@strlen.de,
	Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next 2/3] net: skbuff: cache one skb_ext for use by GRO
Date: Tue, 14 Feb 2023 19:43:54 -0800	[thread overview]
Message-ID: <20230215034355.481925-3-kuba@kernel.org> (raw)
In-Reply-To: <20230215034355.481925-1-kuba@kernel.org>

On the driver -> GRO path we can avoid thrashing the kmemcache
by holding onto one skb_ext.

Drivers usually report static data, so don't bother trying to
hold onto the skb_ext if the ext has contents which require
a destructor.

With a single flow and SW GRO adding a tc_skb_ext to every
frame costs around 16.6% of performance (21.2Gbps -> 17.6Gbps,
yes it's a relatively slow CPU). Using the cache reduces
the loss to 9.3%, (-> 19.2Gbps) although obviously in real
life the recycling will be less effective.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
 include/linux/skbuff.h |  1 +
 net/core/skbuff.c      | 79 +++++++++++++++++++++++++++++++++++++++---
 2 files changed, 75 insertions(+), 5 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d5602b15c714..e68cb0a777b9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4622,6 +4622,7 @@ struct skb_ext *__skb_ext_alloc(gfp_t flags);
 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
 		    struct skb_ext *ext);
 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
+void *napi_skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
 void __skb_ext_put(struct skb_ext *ext);
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6f0fc1f09536..feb5034b13ad 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -224,6 +224,9 @@ static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
 struct napi_alloc_cache {
 	struct page_frag_cache page;
 	struct page_frag_1k page_small;
+#ifdef CONFIG_SKB_EXTENSIONS
+	struct skb_ext *ext;
+#endif
 	unsigned int skb_count;
 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
 };
@@ -1228,6 +1231,43 @@ static void napi_skb_cache_put(struct sk_buff *skb)
 	}
 }
 
+static bool skb_ext_needs_destruct(const struct skb_ext *ext)
+{
+	bool needs_destruct = false;
+
+#ifdef CONFIG_XFRM
+	needs_destruct |= __skb_ext_exist(ext, SKB_EXT_SEC_PATH);
+#endif
+#ifdef CONFIG_MCTP_FLOWS
+	needs_destruct |= __skb_ext_exist(ext, SKB_EXT_MCTP);
+#endif
+
+	return needs_destruct;
+}
+
+static void napi_skb_ext_put(struct sk_buff *skb)
+{
+#ifdef CONFIG_SKB_EXTENSIONS
+	struct skb_ext *ext;
+
+	if (!skb->active_extensions)
+		return;
+
+	ext = skb->extensions;
+	if (!skb_ext_needs_destruct(ext)) {
+		struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+
+		if (refcount_read(&ext->refcnt) == 1 && !nc->ext) {
+			kasan_poison_object_data(skbuff_ext_cache, ext);
+			nc->ext = ext;
+			return;
+		}
+	}
+
+	__skb_ext_put(ext);
+#endif
+}
+
 void __kfree_skb_defer(struct sk_buff *skb)
 {
 	skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
@@ -1239,7 +1279,7 @@ void napi_skb_free_stolen_head(struct sk_buff *skb)
 	if (unlikely(skb->slow_gro)) {
 		nf_reset_ct(skb);
 		skb_dst_drop(skb);
-		skb_ext_put(skb);
+		napi_skb_ext_put(skb);
 		skb_orphan(skb);
 		skb->slow_gro = 0;
 	}
@@ -6599,6 +6639,12 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
 }
 
+static void skb_ext_init(struct skb_ext *new)
+{
+	memset(new->offset, 0, sizeof(new->offset));
+	refcount_set(&new->refcnt, 1);
+}
+
 /**
  * __skb_ext_alloc - allocate a new skb extensions storage
  *
@@ -6612,10 +6658,8 @@ struct skb_ext *__skb_ext_alloc(gfp_t flags)
 {
 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
 
-	if (new) {
-		memset(new->offset, 0, sizeof(new->offset));
-		refcount_set(&new->refcnt, 1);
-	}
+	if (new)
+		skb_ext_init(new);
 
 	return new;
 }
@@ -6731,6 +6775,31 @@ void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
 }
 EXPORT_SYMBOL(skb_ext_add);
 
+void *napi_skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
+{
+	struct skb_ext *new = NULL;
+
+	if (!skb->active_extensions) {
+		struct napi_alloc_cache *nc;
+
+		nc = this_cpu_ptr(&napi_alloc_cache);
+		new = nc->ext;
+		if (new) {
+			kasan_unpoison_object_data(skbuff_ext_cache, new);
+			nc->ext = NULL;
+		} else {
+			new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+			if (!new)
+				return NULL;
+		}
+
+		skb_ext_init(new);
+	}
+
+	return skb_ext_add_finalize(skb, id, new);
+}
+EXPORT_SYMBOL(napi_skb_ext_add);
+
 #ifdef CONFIG_XFRM
 static void skb_ext_put_sp(struct sec_path *sp)
 {
-- 
2.39.1


  parent reply	other threads:[~2023-02-15  3:44 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-15  3:43 [PATCH net-next 0/3] net: skbuff: cache one skb_ext for use by GRO Jakub Kicinski
2023-02-15  3:43 ` [PATCH net-next 1/3] net: skb: carve the allocation out of skb_ext_add() Jakub Kicinski
2023-02-15  3:43 ` Jakub Kicinski [this message]
2023-02-15  8:41   ` [PATCH net-next 2/3] net: skbuff: cache one skb_ext for use by GRO Paolo Abeni
2023-02-15 17:45     ` Jakub Kicinski
2023-02-15 18:08       ` Alexander Lobakin
2023-02-15 19:08         ` Paolo Abeni
2023-02-15 15:37   ` Edward Cree
2023-02-15 16:17     ` Alexander Lobakin
2023-02-15 17:52       ` Jakub Kicinski
2023-02-15 18:01         ` Alexander Lobakin
2023-02-15 18:20           ` Jakub Kicinski
2023-02-16 12:04             ` Alexander Lobakin
2023-02-15  3:43 ` [PATCH net-next 3/3] net: create and use NAPI version of tc_skb_ext_alloc() Jakub Kicinski
2023-02-15 16:50   ` Jamal Hadi Salim
2023-02-15 17:03     ` Jiri Pirko
2023-02-15 18:36       ` Jamal Hadi Salim
2023-02-15 17:35     ` Jakub Kicinski
2023-02-15 18:38       ` Jamal Hadi Salim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230215034355.481925-3-kuba@kernel.org \
    --to=kuba@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=fw@strlen.de \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=willemb@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.