All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
@ 2020-11-25 17:34 Marco Elver
  2020-11-25 20:43 ` Jakub Kicinski
  0 siblings, 1 reply; 6+ messages in thread
From: Marco Elver @ 2020-11-25 17:34 UTC (permalink / raw)
  To: elver, kuba, davem
  Cc: johannes, a.nogikh, andreyknvl, dvyukov, linux-kernel, netdev,
	linux-wireless, idosch, fw, willemb

It turns out that usage of skb extensions can cause memory leaks. Ido
Schimmel reported: "[...] there are instances that blindly overwrite
'skb->extensions' by invoking skb_copy_header() after __alloc_skb()."

Therefore, give up on using skb extensions for KCOV handle, and instead
directly store kcov_handle in sk_buff.

Fixes: 6370cc3bbd8a ("net: add kcov handle to skb extensions")
Fixes: 85ce50d337d1 ("net: kcov: don't select SKB_EXTENSIONS when there is no NET")
Fixes: 97f53a08cba1 ("net: linux/skbuff.h: combine SKB_EXTENSIONS + KCOV handling")
Link: https://lore.kernel.org/linux-wireless/20201121160941.GA485907@shredder.lan/
Reported-by: Ido Schimmel <idosch@idosch.org>
Signed-off-by: Marco Elver <elver@google.com>
---
 include/linux/skbuff.h | 37 +++++++++++++------------------------
 lib/Kconfig.debug      |  1 -
 net/core/skbuff.c      | 12 +-----------
 3 files changed, 14 insertions(+), 36 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0a1239819fd2..333bcdc39635 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -701,6 +701,7 @@ typedef unsigned char *sk_buff_data_t;
  *	@transport_header: Transport layer header
  *	@network_header: Network layer header
  *	@mac_header: Link layer header
+ *	@kcov_handle: KCOV remote handle for remote coverage collection
  *	@tail: Tail pointer
  *	@end: End pointer
  *	@head: Head of buffer
@@ -904,6 +905,10 @@ struct sk_buff {
 	__u16			network_header;
 	__u16			mac_header;
 
+#ifdef CONFIG_KCOV
+	u64			kcov_handle;
+#endif
+
 	/* private: */
 	__u32			headers_end[0];
 	/* public: */
@@ -4150,9 +4155,6 @@ enum skb_ext_id {
 #endif
 #if IS_ENABLED(CONFIG_MPTCP)
 	SKB_EXT_MPTCP,
-#endif
-#if IS_ENABLED(CONFIG_KCOV)
-	SKB_EXT_KCOV_HANDLE,
 #endif
 	SKB_EXT_NUM, /* must be last */
 };
@@ -4608,35 +4610,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
 #endif
 }
 
-#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS)
 static inline void skb_set_kcov_handle(struct sk_buff *skb,
 				       const u64 kcov_handle)
 {
-	/* Do not allocate skb extensions only to set kcov_handle to zero
-	 * (as it is zero by default). However, if the extensions are
-	 * already allocated, update kcov_handle anyway since
-	 * skb_set_kcov_handle can be called to zero a previously set
-	 * value.
-	 */
-	if (skb_has_extensions(skb) || kcov_handle) {
-		u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE);
-
-		if (kcov_handle_ptr)
-			*kcov_handle_ptr = kcov_handle;
-	}
+#ifdef CONFIG_KCOV
+	skb->kcov_handle = kcov_handle;
+#endif
 }
 
 static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
 {
-	u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE);
-
-	return kcov_handle ? *kcov_handle : 0;
-}
+#ifdef CONFIG_KCOV
+	return skb->kcov_handle;
 #else
-static inline void skb_set_kcov_handle(struct sk_buff *skb,
-				       const u64 kcov_handle) { }
-static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; }
-#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */
+	return 0;
+#endif
+}
 
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 826a205ffd1c..1d15cdaf1b89 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1879,7 +1879,6 @@ config KCOV
 	depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
 	select DEBUG_FS
 	select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
-	select SKB_EXTENSIONS if NET
 	help
 	  KCOV exposes kernel code coverage information in a form suitable
 	  for coverage-guided fuzzing (randomized testing).
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ffe3dcc0ebea..070b1077d976 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -233,6 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 	skb->end = skb->tail + size;
 	skb->mac_header = (typeof(skb->mac_header))~0U;
 	skb->transport_header = (typeof(skb->transport_header))~0U;
+	skb_set_kcov_handle(skb, kcov_common_handle());
 
 	/* make sure we initialize shinfo sequentially */
 	shinfo = skb_shinfo(skb);
@@ -249,9 +250,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 
 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
 	}
-
-	skb_set_kcov_handle(skb, kcov_common_handle());
-
 out:
 	return skb;
 nodata:
@@ -285,8 +283,6 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb,
 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 	atomic_set(&shinfo->dataref, 1);
 
-	skb_set_kcov_handle(skb, kcov_common_handle());
-
 	return skb;
 }
 
@@ -4208,9 +4204,6 @@ static const u8 skb_ext_type_len[] = {
 #if IS_ENABLED(CONFIG_MPTCP)
 	[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
 #endif
-#if IS_ENABLED(CONFIG_KCOV)
-	[SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64),
-#endif
 };
 
 static __always_inline unsigned int skb_ext_total_length(void)
@@ -4227,9 +4220,6 @@ static __always_inline unsigned int skb_ext_total_length(void)
 #endif
 #if IS_ENABLED(CONFIG_MPTCP)
 		skb_ext_type_len[SKB_EXT_MPTCP] +
-#endif
-#if IS_ENABLED(CONFIG_KCOV)
-		skb_ext_type_len[SKB_EXT_KCOV_HANDLE] +
 #endif
 		0;
 }

base-commit: 470dfd808ac4135f313967f9d3e107b87fc6a0b3
-- 
2.29.2.454.gaff20da3a2-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
  2020-11-25 17:34 [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff Marco Elver
@ 2020-11-25 20:43 ` Jakub Kicinski
  2020-11-25 22:42   ` Marco Elver
  0 siblings, 1 reply; 6+ messages in thread
From: Jakub Kicinski @ 2020-11-25 20:43 UTC (permalink / raw)
  To: Marco Elver
  Cc: davem, johannes, a.nogikh, andreyknvl, dvyukov, linux-kernel,
	netdev, linux-wireless, idosch, fw, willemb

On Wed, 25 Nov 2020 18:34:36 +0100 Marco Elver wrote:
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index ffe3dcc0ebea..070b1077d976 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -233,6 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
>  	skb->end = skb->tail + size;
>  	skb->mac_header = (typeof(skb->mac_header))~0U;
>  	skb->transport_header = (typeof(skb->transport_header))~0U;
> +	skb_set_kcov_handle(skb, kcov_common_handle());
>  
>  	/* make sure we initialize shinfo sequentially */
>  	shinfo = skb_shinfo(skb);
> @@ -249,9 +250,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
>  
>  		fclones->skb2.fclone = SKB_FCLONE_CLONE;
>  	}
> -
> -	skb_set_kcov_handle(skb, kcov_common_handle());

Why the move?

>  out:
>  	return skb;
>  nodata:
> @@ -285,8 +283,6 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb,
>  	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
>  	atomic_set(&shinfo->dataref, 1);
>  
> -	skb_set_kcov_handle(skb, kcov_common_handle());
> -
>  	return skb;
>  }

And why are we dropping this?

If this was omitted in earlier versions it's just a independent bug, 
I don't think build_skb() will call __alloc_skb(), so we need a to
set the handle here.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
  2020-11-25 20:43 ` Jakub Kicinski
@ 2020-11-25 22:42   ` Marco Elver
  2020-11-26 16:34     ` Willem de Bruijn
  0 siblings, 1 reply; 6+ messages in thread
From: Marco Elver @ 2020-11-25 22:42 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: David S. Miller, Johannes Berg, Aleksandr Nogikh,
	Andrey Konovalov, Dmitry Vyukov, LKML, Netdev, linux-wireless,
	Ido Schimmel, Florian Westphal, Willem de Bruijn

On Wed, 25 Nov 2020 at 21:43, Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Wed, 25 Nov 2020 18:34:36 +0100 Marco Elver wrote:
> > diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> > index ffe3dcc0ebea..070b1077d976 100644
> > --- a/net/core/skbuff.c
> > +++ b/net/core/skbuff.c
> > @@ -233,6 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> >       skb->end = skb->tail + size;
> >       skb->mac_header = (typeof(skb->mac_header))~0U;
> >       skb->transport_header = (typeof(skb->transport_header))~0U;
> > +     skb_set_kcov_handle(skb, kcov_common_handle());
> >
> >       /* make sure we initialize shinfo sequentially */
> >       shinfo = skb_shinfo(skb);
> > @@ -249,9 +250,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> >
> >               fclones->skb2.fclone = SKB_FCLONE_CLONE;
> >       }
> > -
> > -     skb_set_kcov_handle(skb, kcov_common_handle());
>
> Why the move?

v2 of the original series had it above. I frankly don't mind.

1. Group it with the other fields above?

2. Leave it at the end here?

> >  out:
> >       return skb;
> >  nodata:
> > @@ -285,8 +283,6 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb,
> >       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> >       atomic_set(&shinfo->dataref, 1);
> >
> > -     skb_set_kcov_handle(skb, kcov_common_handle());
> > -
> >       return skb;
> >  }
>
> And why are we dropping this?

It wasn't here originally.

> If this was omitted in earlier versions it's just a independent bug,
> I don't think build_skb() will call __alloc_skb(), so we need a to
> set the handle here.

Correct, that was an original omission.

Will send v2.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
  2020-11-25 22:42   ` Marco Elver
@ 2020-11-26 16:34     ` Willem de Bruijn
  2020-11-27 12:26       ` Marco Elver
  0 siblings, 1 reply; 6+ messages in thread
From: Willem de Bruijn @ 2020-11-26 16:34 UTC (permalink / raw)
  To: Marco Elver
  Cc: Jakub Kicinski, David S. Miller, Johannes Berg, Aleksandr Nogikh,
	Andrey Konovalov, Dmitry Vyukov, LKML, Netdev, linux-wireless,
	Ido Schimmel, Florian Westphal, Willem de Bruijn

On Thu, Nov 26, 2020 at 3:19 AM Marco Elver <elver@google.com> wrote:
>
> On Wed, 25 Nov 2020 at 21:43, Jakub Kicinski <kuba@kernel.org> wrote:
> >
> > On Wed, 25 Nov 2020 18:34:36 +0100 Marco Elver wrote:
> > > diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> > > index ffe3dcc0ebea..070b1077d976 100644
> > > --- a/net/core/skbuff.c
> > > +++ b/net/core/skbuff.c
> > > @@ -233,6 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> > >       skb->end = skb->tail + size;
> > >       skb->mac_header = (typeof(skb->mac_header))~0U;
> > >       skb->transport_header = (typeof(skb->transport_header))~0U;
> > > +     skb_set_kcov_handle(skb, kcov_common_handle());
> > >
> > >       /* make sure we initialize shinfo sequentially */
> > >       shinfo = skb_shinfo(skb);
> > > @@ -249,9 +250,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> > >
> > >               fclones->skb2.fclone = SKB_FCLONE_CLONE;
> > >       }
> > > -
> > > -     skb_set_kcov_handle(skb, kcov_common_handle());
> >
> > Why the move?
>
> v2 of the original series had it above. I frankly don't mind.
>
> 1. Group it with the other fields above?
>
> 2. Leave it at the end here?
>
> > >  out:
> > >       return skb;
> > >  nodata:
> > > @@ -285,8 +283,6 @@ static struct sk_buff *__build_skb_around(struct sk_buff *skb,
> > >       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> > >       atomic_set(&shinfo->dataref, 1);
> > >
> > > -     skb_set_kcov_handle(skb, kcov_common_handle());
> > > -
> > >       return skb;
> > >  }
> >
> > And why are we dropping this?
>
> It wasn't here originally.
>
> > If this was omitted in earlier versions it's just a independent bug,
> > I don't think build_skb() will call __alloc_skb(), so we need a to
> > set the handle here.
>
> Correct, that was an original omission.
>
> Will send v2.

Does it make more sense to revert the patch that added the extensions
and the follow-on fixes and add a separate new patch instead?

If adding a new field to the skb, even if only in debug builds,
please check with pahole how it affects struct layout if you
haven't yet.

The skb_extensions idea was mine. Apologies for steering
this into an apparently unsuccessful direction. Adding new fields
to skb is very rare because possibly problematic wrt allocation.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
  2020-11-26 16:34     ` Willem de Bruijn
@ 2020-11-27 12:26       ` Marco Elver
  2020-11-27 16:50         ` Willem de Bruijn
  0 siblings, 1 reply; 6+ messages in thread
From: Marco Elver @ 2020-11-27 12:26 UTC (permalink / raw)
  To: Willem de Bruijn
  Cc: Jakub Kicinski, David S. Miller, Johannes Berg, Aleksandr Nogikh,
	Andrey Konovalov, Dmitry Vyukov, LKML, Netdev, linux-wireless,
	Ido Schimmel, Florian Westphal, Willem de Bruijn

On Thu, 26 Nov 2020 at 17:35, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
> On Thu, Nov 26, 2020 at 3:19 AM Marco Elver <elver@google.com> wrote:
[...]
> > Will send v2.
>
> Does it make more sense to revert the patch that added the extensions
> and the follow-on fixes and add a separate new patch instead?

That doesn't work, because then we'll end up with a build-broken
commit in between the reverts and the new version, because mac80211
uses skb_get_kcov_handle().

> If adding a new field to the skb, even if only in debug builds,
> please check with pahole how it affects struct layout if you
> haven't yet.

Without KCOV:

        /* size: 224, cachelines: 4, members: 72 */
        /* sum members: 217, holes: 1, sum holes: 2 */
        /* sum bitfield members: 36 bits, bit holes: 2, sum bit holes: 4 bits */
        /* forced alignments: 2 */
        /* last cacheline: 32 bytes */

With KCOV:

        /* size: 232, cachelines: 4, members: 73 */
        /* sum members: 225, holes: 1, sum holes: 2 */
        /* sum bitfield members: 36 bits, bit holes: 2, sum bit holes: 4 bits */
        /* forced alignments: 2 */
        /* last cacheline: 40 bytes */


> The skb_extensions idea was mine. Apologies for steering
> this into an apparently unsuccessful direction. Adding new fields
> to skb is very rare because possibly problematic wrt allocation.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff
  2020-11-27 12:26       ` Marco Elver
@ 2020-11-27 16:50         ` Willem de Bruijn
  0 siblings, 0 replies; 6+ messages in thread
From: Willem de Bruijn @ 2020-11-27 16:50 UTC (permalink / raw)
  To: Marco Elver
  Cc: Willem de Bruijn, Jakub Kicinski, David S. Miller, Johannes Berg,
	Aleksandr Nogikh, Andrey Konovalov, Dmitry Vyukov, LKML, Netdev,
	linux-wireless, Ido Schimmel, Florian Westphal

On Fri, Nov 27, 2020 at 7:26 AM Marco Elver <elver@google.com> wrote:
>
> On Thu, 26 Nov 2020 at 17:35, Willem de Bruijn
> <willemdebruijn.kernel@gmail.com> wrote:
> > On Thu, Nov 26, 2020 at 3:19 AM Marco Elver <elver@google.com> wrote:
> [...]
> > > Will send v2.
> >
> > Does it make more sense to revert the patch that added the extensions
> > and the follow-on fixes and add a separate new patch instead?
>
> That doesn't work, because then we'll end up with a build-broken
> commit in between the reverts and the new version, because mac80211
> uses skb_get_kcov_handle().
>
> > If adding a new field to the skb, even if only in debug builds,
> > please check with pahole how it affects struct layout if you
> > haven't yet.
>
> Without KCOV:
>
>         /* size: 224, cachelines: 4, members: 72 */
>         /* sum members: 217, holes: 1, sum holes: 2 */
>         /* sum bitfield members: 36 bits, bit holes: 2, sum bit holes: 4 bits */
>         /* forced alignments: 2 */
>         /* last cacheline: 32 bytes */
>
> With KCOV:
>
>         /* size: 232, cachelines: 4, members: 73 */
>         /* sum members: 225, holes: 1, sum holes: 2 */
>         /* sum bitfield members: 36 bits, bit holes: 2, sum bit holes: 4 bits */
>         /* forced alignments: 2 */
>         /* last cacheline: 40 bytes */

Thanks. defconfig leaves some symbols disabled, but manually enabling
them just fills a hole, so 232 is indeed the worst case allocation.

I recall a firm edict against growing skb, but I don't know of a
hard limit at exactly 224.

There is a limit at 2048 - sizeof(struct skb_shared_data) == 1728B
when using pages for two ETH_FRAME_LEN (1514) allocations.

This would leave 1728 - 1514 == 214B if also squeezing the skb itself
in with the same allocation.

But I have no idea if this is used anywhere. Certainly have no example
ready. And as you show, the previous default already is at 224.

If no one else knows of a hard limit at 224 or below, I suppose the
next technical limit is just 256 for kmem cache purposes.

My understanding was that skb_extensions was supposed to solve this
problem of extending the skb without growing the main structure. Not
for this patch, but I wonder if we can resolve the issues exposed here
and make usable in more conditions.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-11-27 16:51 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-25 17:34 [PATCH net-next] net: switch to storing KCOV handle directly in sk_buff Marco Elver
2020-11-25 20:43 ` Jakub Kicinski
2020-11-25 22:42   ` Marco Elver
2020-11-26 16:34     ` Willem de Bruijn
2020-11-27 12:26       ` Marco Elver
2020-11-27 16:50         ` Willem de Bruijn

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.