All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
@ 2022-10-01 20:51 Eric Dumazet
  2022-10-01 21:15 ` Willy Tarreau
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Eric Dumazet @ 2022-10-01 20:51 UTC (permalink / raw)
  To: David S . Miller, Jakub Kicinski, Paolo Abeni
  Cc: netdev, Eric Dumazet, Eric Dumazet, Christophe Leroy, Willy Tarreau

From: Eric Dumazet <edumazet@google.com>

Christophe Leroy reported a ~80ms latency spike
happening at first TCP connect() time.

This is because __inet_hash_connect() uses get_random_once()
to populate a perturbation table which became quite big
after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")

get_random_once() uses DO_ONCE(), which block hard irqs for the duration
of the operation.

This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
for operations where we prefer to stay in process context.

Then __inet_hash_connect() can use get_random_slow_once()
to populate its perturbation table.

Fixes: 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
Fixes: 190cc82489f4 ("tcp: change source port randomizarion at connect() time")
Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Link: https://lore.kernel.org/netdev/CANn89iLAEYBaoYajy0Y9UmGFff5GPxDUoG-ErVB2jDdRNQ5Tug@mail.gmail.com/T/#t
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Willy Tarreau <w@1wt.eu>
---
 include/linux/once.h       | 28 ++++++++++++++++++++++++++++
 lib/once.c                 | 30 ++++++++++++++++++++++++++++++
 net/ipv4/inet_hashtables.c |  4 ++--
 3 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/include/linux/once.h b/include/linux/once.h
index b14d8b309d52b198bb144689fe67d9ed235c2b3e..176ab75b42df740a738d04d8480821a0b3b65ba9 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,10 +5,18 @@
 #include <linux/types.h>
 #include <linux/jump_label.h>
 
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
 bool __do_once_start(bool *done, unsigned long *flags);
 void __do_once_done(bool *done, struct static_key_true *once_key,
 		    unsigned long *flags, struct module *mod);
 
+/* Variant for process contexts only. */
+bool __do_once_slow_start(bool *done);
+void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+			 struct module *mod);
+
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
  * once, where DO_ONCE() can live in the fast-path. After @func has
@@ -52,7 +60,27 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
 		___ret;							     \
 	})
 
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLOW(func, ...)						     \
+	({								     \
+		bool ___ret = false;					     \
+		static bool __section(".data.once") ___done = false;	     \
+		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
+		if (static_branch_unlikely(&___once_key)) {		     \
+			___ret = __do_once_slow_start(&___done);	     \
+			if (unlikely(___ret)) {				     \
+				func(__VA_ARGS__);			     \
+				__do_once_slow_done(&___done, &___once_key,  \
+						    THIS_MODULE);	     \
+			}						     \
+		}							     \
+		___ret;							     \
+	})
+
 #define get_random_once(buf, nbytes)					     \
 	DO_ONCE(get_random_bytes, (buf), (nbytes))
 
+#define get_random_slow_once(buf, nbytes)				     \
+	DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
+
 #endif /* _LINUX_ONCE_H */
diff --git a/lib/once.c b/lib/once.c
index 59149bf3bfb4a97e4fa7febee737155d700bae48..351f66aad310a47f17d0636da0ed5b2b4460522d 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -66,3 +66,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
 	once_disable_jump(once_key, mod);
 }
 EXPORT_SYMBOL(__do_once_done);
+
+static DEFINE_MUTEX(once_mutex);
+
+bool __do_once_slow_start(bool *done)
+	__acquires(once_mutex)
+{
+	mutex_lock(&once_mutex);
+	if (*done) {
+		mutex_unlock(&once_mutex);
+		/* Keep sparse happy by restoring an even lock count on
+		 * this mutex. In case we return here, we don't call into
+		 * __do_once_done but return early in the DO_ONCE_SLOW() macro.
+		 */
+		__acquire(once_mutex);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(__do_once_slow_start);
+
+void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+			 struct module *mod)
+	__releases(once_mutex)
+{
+	*done = true;
+	mutex_unlock(&once_mutex);
+	once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_slow_done);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 49db8c597eea83a27e91edc429c2c4779b0a5cd7..dc1c5629cd0d61716d6d99131c57b49717785709 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -958,8 +958,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 	if (likely(remaining > 1))
 		remaining &= ~1U;
 
-	net_get_random_once(table_perturb,
-			    INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+	get_random_slow_once(table_perturb,
+			     INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
 
 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
-- 
2.38.0.rc1.362.ged0d419d3c-goog


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 20:51 [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Eric Dumazet
@ 2022-10-01 21:15 ` Willy Tarreau
  2022-10-01 22:50   ` Jason A. Donenfeld
  2022-10-01 22:44 ` Jason A. Donenfeld
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 13+ messages in thread
From: Willy Tarreau @ 2022-10-01 21:15 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, netdev,
	Eric Dumazet, Christophe Leroy

Hi Eric,

On Sat, Oct 01, 2022 at 01:51:02PM -0700, Eric Dumazet wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Christophe Leroy reported a ~80ms latency spike
> happening at first TCP connect() time.

Seeing Christophe's message also made me wonder if we didn't break
something back then :-/

> This is because __inet_hash_connect() uses get_random_once()
> to populate a perturbation table which became quite big
> after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> 
> get_random_once() uses DO_ONCE(), which block hard irqs for the duration
> of the operation.
> 
> This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> for operations where we prefer to stay in process context.

That's a nice improvement I think. I was wondering if, for this special
case, we *really* need an exclusive DO_ONCE(). I mean, we're getting
random bytes, we really do not care if two CPUs change them in parallel
provided that none uses them before the table is entirely filled. Thus
that could probably end up as something like:

    if (!atomic_read(&done)) {
        get_random_bytes(array);
        atomic_set(&done, 1);
    }

In any case, your solution remains cleaner and more robust, though.

Thanks,
Willy

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 20:51 [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Eric Dumazet
  2022-10-01 21:15 ` Willy Tarreau
@ 2022-10-01 22:44 ` Jason A. Donenfeld
  2022-10-01 22:50   ` Eric Dumazet
  2022-10-03 17:25   ` Jakub Kicinski
  2022-10-02  8:58 ` [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Christophe Leroy
  2022-10-03 12:40 ` patchwork-bot+netdevbpf
  3 siblings, 2 replies; 13+ messages in thread
From: Jason A. Donenfeld @ 2022-10-01 22:44 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, netdev,
	Eric Dumazet, Christophe Leroy, Willy Tarreau

On Sat, Oct 01, 2022 at 01:51:02PM -0700, Eric Dumazet wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Christophe Leroy reported a ~80ms latency spike
> happening at first TCP connect() time.
> 
> This is because __inet_hash_connect() uses get_random_once()
> to populate a perturbation table which became quite big
> after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> 
> get_random_once() uses DO_ONCE(), which block hard irqs for the duration
> of the operation.
> 
> This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> for operations where we prefer to stay in process context.
> 
> Then __inet_hash_connect() can use get_random_slow_once()
> to populate its perturbation table.
> 
> Fixes: 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> Fixes: 190cc82489f4 ("tcp: change source port randomizarion at connect() time")
> Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> Link: https://lore.kernel.org/netdev/CANn89iLAEYBaoYajy0Y9UmGFff5GPxDUoG-ErVB2jDdRNQ5Tug@mail.gmail.com/T/#t
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: Willy Tarreau <w@1wt.eu>
> ---
>  include/linux/once.h       | 28 ++++++++++++++++++++++++++++
>  lib/once.c                 | 30 ++++++++++++++++++++++++++++++
>  net/ipv4/inet_hashtables.c |  4 ++--
>  3 files changed, 60 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/once.h b/include/linux/once.h
> index b14d8b309d52b198bb144689fe67d9ed235c2b3e..176ab75b42df740a738d04d8480821a0b3b65ba9 100644
> --- a/include/linux/once.h
> +++ b/include/linux/once.h
> @@ -5,10 +5,18 @@
>  #include <linux/types.h>
>  #include <linux/jump_label.h>
>  
> +/* Helpers used from arbitrary contexts.
> + * Hard irqs are blocked, be cautious.
> + */
>  bool __do_once_start(bool *done, unsigned long *flags);
>  void __do_once_done(bool *done, struct static_key_true *once_key,
>  		    unsigned long *flags, struct module *mod);
>  
> +/* Variant for process contexts only. */
> +bool __do_once_slow_start(bool *done);
> +void __do_once_slow_done(bool *done, struct static_key_true *once_key,
> +			 struct module *mod);
> +
>  /* Call a function exactly once. The idea of DO_ONCE() is to perform
>   * a function call such as initialization of random seeds, etc, only
>   * once, where DO_ONCE() can live in the fast-path. After @func has
> @@ -52,7 +60,27 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
>  		___ret;							     \
>  	})
>  
> +/* Variant of DO_ONCE() for process/sleepable contexts. */
> +#define DO_ONCE_SLOW(func, ...)						     \
> +	({								     \
> +		bool ___ret = false;					     \
> +		static bool __section(".data.once") ___done = false;	     \
> +		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
> +		if (static_branch_unlikely(&___once_key)) {		     \
> +			___ret = __do_once_slow_start(&___done);	     \
> +			if (unlikely(___ret)) {				     \
> +				func(__VA_ARGS__);			     \
> +				__do_once_slow_done(&___done, &___once_key,  \
> +						    THIS_MODULE);	     \
> +			}						     \
> +		}							     \
> +		___ret;							     \
> +	})
> +

Hmm, I dunno about this macro-choice explosion here. The whole thing
with DO_ONCE() is that the static branch makes it zero cost most of the
time while being somewhat expensive the rest of the time, but who cares,
because "the rest" is just once.

So instead, why not just branch on whether or not we can sleep here, if
that can be worked out dynamically? If not, and if you really do need
two sets of macros and functions, at least you can call the new one
something other than "slow"? Maybe something about being _SLEEPABLE()
instead?

Also, the __do_once_slow_done() function misses a really nice
optimization, which is that the static branch can be changed
synchronously instead of having to allocate and fire off that workqueue,
since by definition we're in sleepable context here.

Jason

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 22:44 ` Jason A. Donenfeld
@ 2022-10-01 22:50   ` Eric Dumazet
  2022-10-03 17:25   ` Jakub Kicinski
  1 sibling, 0 replies; 13+ messages in thread
From: Eric Dumazet @ 2022-10-01 22:50 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni,
	netdev, Christophe Leroy, Willy Tarreau

On Sat, Oct 1, 2022 at 3:44 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> On Sat, Oct 01, 2022 at 01:51:02PM -0700, Eric Dumazet wrote:
> > From: Eric Dumazet <edumazet@google.com>
> >
> > Christophe Leroy reported a ~80ms latency spike
> > happening at first TCP connect() time.
> >
> > This is because __inet_hash_connect() uses get_random_once()
> > to populate a perturbation table which became quite big
> > after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> >
> > get_random_once() uses DO_ONCE(), which block hard irqs for the duration
> > of the operation.
> >
> > This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> > for operations where we prefer to stay in process context.
> >
> > Then __inet_hash_connect() can use get_random_slow_once()
> > to populate its perturbation table.
> >
> > Fixes: 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> > Fixes: 190cc82489f4 ("tcp: change source port randomizarion at connect() time")
> > Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> > Link: https://lore.kernel.org/netdev/CANn89iLAEYBaoYajy0Y9UmGFff5GPxDUoG-ErVB2jDdRNQ5Tug@mail.gmail.com/T/#t
> > Signed-off-by: Eric Dumazet <edumazet@google.com>
> > Cc: Willy Tarreau <w@1wt.eu>
> > ---
> >  include/linux/once.h       | 28 ++++++++++++++++++++++++++++
> >  lib/once.c                 | 30 ++++++++++++++++++++++++++++++
> >  net/ipv4/inet_hashtables.c |  4 ++--
> >  3 files changed, 60 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/linux/once.h b/include/linux/once.h
> > index b14d8b309d52b198bb144689fe67d9ed235c2b3e..176ab75b42df740a738d04d8480821a0b3b65ba9 100644
> > --- a/include/linux/once.h
> > +++ b/include/linux/once.h
> > @@ -5,10 +5,18 @@
> >  #include <linux/types.h>
> >  #include <linux/jump_label.h>
> >
> > +/* Helpers used from arbitrary contexts.
> > + * Hard irqs are blocked, be cautious.
> > + */
> >  bool __do_once_start(bool *done, unsigned long *flags);
> >  void __do_once_done(bool *done, struct static_key_true *once_key,
> >                   unsigned long *flags, struct module *mod);
> >
> > +/* Variant for process contexts only. */
> > +bool __do_once_slow_start(bool *done);
> > +void __do_once_slow_done(bool *done, struct static_key_true *once_key,
> > +                      struct module *mod);
> > +
> >  /* Call a function exactly once. The idea of DO_ONCE() is to perform
> >   * a function call such as initialization of random seeds, etc, only
> >   * once, where DO_ONCE() can live in the fast-path. After @func has
> > @@ -52,7 +60,27 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
> >               ___ret;                                                      \
> >       })
> >
> > +/* Variant of DO_ONCE() for process/sleepable contexts. */
> > +#define DO_ONCE_SLOW(func, ...)                                                   \
> > +     ({                                                                   \
> > +             bool ___ret = false;                                         \
> > +             static bool __section(".data.once") ___done = false;         \
> > +             static DEFINE_STATIC_KEY_TRUE(___once_key);                  \
> > +             if (static_branch_unlikely(&___once_key)) {                  \
> > +                     ___ret = __do_once_slow_start(&___done);             \
> > +                     if (unlikely(___ret)) {                              \
> > +                             func(__VA_ARGS__);                           \
> > +                             __do_once_slow_done(&___done, &___once_key,  \
> > +                                                 THIS_MODULE);            \
> > +                     }                                                    \
> > +             }                                                            \
> > +             ___ret;                                                      \
> > +     })
> > +
>
> Hmm, I dunno about this macro-choice explosion here. The whole thing
> with DO_ONCE() is that the static branch makes it zero cost most of the
> time while being somewhat expensive the rest of the time, but who cares,
> because "the rest" is just once.
>
> So instead, why not just branch on whether or not we can sleep here, if
> that can be worked out dynamically? If not, and if you really do need
> two sets of macros and functions, at least you can call the new one
> something other than "slow"? Maybe something about being _SLEEPABLE()
> instead?

No idea what you mean. I do not want to over engineer code that yet have to be
adopted by other callers. If you think you want to spend week end time on this,
feel free to take over at this point.

>
> Also, the __do_once_slow_done() function misses a really nice
> optimization, which is that the static branch can be changed
> synchronously instead of having to allocate and fire off that workqueue,
> since by definition we're in sleepable context here.
>

This was deliberate. We already spent a lot of time in the called function,
better just return to the caller as fast as possible.

This really does not matter, the work queue is fired once by definition.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 21:15 ` Willy Tarreau
@ 2022-10-01 22:50   ` Jason A. Donenfeld
  2022-10-02  5:38     ` Willy Tarreau
  0 siblings, 1 reply; 13+ messages in thread
From: Jason A. Donenfeld @ 2022-10-01 22:50 UTC (permalink / raw)
  To: Willy Tarreau
  Cc: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni,
	netdev, Eric Dumazet, Christophe Leroy

On Sat, Oct 01, 2022 at 11:15:29PM +0200, Willy Tarreau wrote:
> Hi Eric,
> 
> On Sat, Oct 01, 2022 at 01:51:02PM -0700, Eric Dumazet wrote:
> > From: Eric Dumazet <edumazet@google.com>
> > 
> > Christophe Leroy reported a ~80ms latency spike
> > happening at first TCP connect() time.
> 
> Seeing Christophe's message also made me wonder if we didn't break
> something back then :-/
> 
> > This is because __inet_hash_connect() uses get_random_once()
> > to populate a perturbation table which became quite big
> > after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> > 
> > get_random_once() uses DO_ONCE(), which block hard irqs for the duration
> > of the operation.
> > 
> > This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> > for operations where we prefer to stay in process context.
> 
> That's a nice improvement I think. I was wondering if, for this special
> case, we *really* need an exclusive DO_ONCE(). I mean, we're getting
> random bytes, we really do not care if two CPUs change them in parallel
> provided that none uses them before the table is entirely filled. Thus
> that could probably end up as something like:
> 
>     if (!atomic_read(&done)) {
>         get_random_bytes(array);
>         atomic_set(&done, 1);
>     }

If you don't care about the tables being consistent between CPUs, then
yea, sure, that seems like a reasonable approach, and I like not
polluting once.{c,h} with some _SLOW() special cases. If you don't want
the atomic read in there you could also do the same pattern with a
static branch, like what DO_ONCE() does:

   if (static_branch_unlikely(&need_bytes)) {
      get_random_bytes(array);
      static_branch_disable(&need_bytes);
   }

Anyway, same thing as your suggestion more or less.

Jason

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 22:50   ` Jason A. Donenfeld
@ 2022-10-02  5:38     ` Willy Tarreau
  0 siblings, 0 replies; 13+ messages in thread
From: Willy Tarreau @ 2022-10-02  5:38 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni,
	netdev, Eric Dumazet, Christophe Leroy

On Sun, Oct 02, 2022 at 12:50:38AM +0200, Jason A. Donenfeld wrote:
> > > This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> > > for operations where we prefer to stay in process context.
> > 
> > That's a nice improvement I think. I was wondering if, for this special
> > case, we *really* need an exclusive DO_ONCE(). I mean, we're getting
> > random bytes, we really do not care if two CPUs change them in parallel
> > provided that none uses them before the table is entirely filled. Thus
> > that could probably end up as something like:
> > 
> >     if (!atomic_read(&done)) {
> >         get_random_bytes(array);
> >         atomic_set(&done, 1);
> >     }
> 
> If you don't care about the tables being consistent between CPUs, then
> yea, sure, that seems like a reasonable approach, and I like not
> polluting once.{c,h} with some _SLOW() special cases.

I don't see this as pollution, it possibly is a nice addition for certain
use cases or early fast paths where the risk of contention is high.

> If you don't want
> the atomic read in there you could also do the same pattern with a
> static branch, like what DO_ONCE() does:
> 
>    if (static_branch_unlikely(&need_bytes)) {
>       get_random_bytes(array);
>       static_branch_disable(&need_bytes);
>    }
> 
> Anyway, same thing as your suggestion more or less.

What I don't know in fact is if the code patching itself can be
responsible for a measurable part of the extra time Christophe noticed.
Anyway at least Christophe now has a few approaches to try, let's first
see if any of them fixes the regression.

Willy

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 20:51 [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Eric Dumazet
  2022-10-01 21:15 ` Willy Tarreau
  2022-10-01 22:44 ` Jason A. Donenfeld
@ 2022-10-02  8:58 ` Christophe Leroy
  2022-10-03 12:40 ` patchwork-bot+netdevbpf
  3 siblings, 0 replies; 13+ messages in thread
From: Christophe Leroy @ 2022-10-02  8:58 UTC (permalink / raw)
  To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
  Cc: netdev, Eric Dumazet, Willy Tarreau



Le 01/10/2022 à 22:51, Eric Dumazet a écrit :
> From: Eric Dumazet <edumazet@google.com>
> 
> Christophe Leroy reported a ~80ms latency spike
> happening at first TCP connect() time.
> 
> This is because __inet_hash_connect() uses get_random_once()
> to populate a perturbation table which became quite big
> after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> 
> get_random_once() uses DO_ONCE(), which block hard irqs for the duration
> of the operation.
> 
> This patch adds DO_ONCE_SLOW() which uses a mutex instead of a spinlock
> for operations where we prefer to stay in process context.
> 
> Then __inet_hash_connect() can use get_random_slow_once()
> to populate its perturbation table.

Many thanks for your quick answer and your patch.

It works great, now the irqsoff tracer reports a 2ms latency in a spi 
transfert. So the issue with tcp connect is gone.

> 
> Fixes: 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> Fixes: 190cc82489f4 ("tcp: change source port randomizarion at connect() time")
> Reported-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> Link: https://lore.kernel.org/netdev/CANn89iLAEYBaoYajy0Y9UmGFff5GPxDUoG-ErVB2jDdRNQ5Tug@mail.gmail.com/T/#t
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: Willy Tarreau <w@1wt.eu>

Tested-by: Christophe Leroy <christophe.leroy@csgroup.eu>


> ---
>   include/linux/once.h       | 28 ++++++++++++++++++++++++++++
>   lib/once.c                 | 30 ++++++++++++++++++++++++++++++
>   net/ipv4/inet_hashtables.c |  4 ++--
>   3 files changed, 60 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/once.h b/include/linux/once.h
> index b14d8b309d52b198bb144689fe67d9ed235c2b3e..176ab75b42df740a738d04d8480821a0b3b65ba9 100644
> --- a/include/linux/once.h
> +++ b/include/linux/once.h
> @@ -5,10 +5,18 @@
>   #include <linux/types.h>
>   #include <linux/jump_label.h>
> 
> +/* Helpers used from arbitrary contexts.
> + * Hard irqs are blocked, be cautious.
> + */
>   bool __do_once_start(bool *done, unsigned long *flags);
>   void __do_once_done(bool *done, struct static_key_true *once_key,
>                      unsigned long *flags, struct module *mod);
> 
> +/* Variant for process contexts only. */
> +bool __do_once_slow_start(bool *done);
> +void __do_once_slow_done(bool *done, struct static_key_true *once_key,
> +                        struct module *mod);
> +
>   /* Call a function exactly once. The idea of DO_ONCE() is to perform
>    * a function call such as initialization of random seeds, etc, only
>    * once, where DO_ONCE() can live in the fast-path. After @func has
> @@ -52,7 +60,27 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
>                  ___ret;                                                      \
>          })
> 
> +/* Variant of DO_ONCE() for process/sleepable contexts. */
> +#define DO_ONCE_SLOW(func, ...)                                                     \
> +       ({                                                                   \
> +               bool ___ret = false;                                         \
> +               static bool __section(".data.once") ___done = false;         \
> +               static DEFINE_STATIC_KEY_TRUE(___once_key);                  \
> +               if (static_branch_unlikely(&___once_key)) {                  \
> +                       ___ret = __do_once_slow_start(&___done);             \
> +                       if (unlikely(___ret)) {                              \
> +                               func(__VA_ARGS__);                           \
> +                               __do_once_slow_done(&___done, &___once_key,  \
> +                                                   THIS_MODULE);            \
> +                       }                                                    \
> +               }                                                            \
> +               ___ret;                                                      \
> +       })
> +
>   #define get_random_once(buf, nbytes)                                        \
>          DO_ONCE(get_random_bytes, (buf), (nbytes))
> 
> +#define get_random_slow_once(buf, nbytes)                                   \
> +       DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
> +
>   #endif /* _LINUX_ONCE_H */
> diff --git a/lib/once.c b/lib/once.c
> index 59149bf3bfb4a97e4fa7febee737155d700bae48..351f66aad310a47f17d0636da0ed5b2b4460522d 100644
> --- a/lib/once.c
> +++ b/lib/once.c
> @@ -66,3 +66,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
>          once_disable_jump(once_key, mod);
>   }
>   EXPORT_SYMBOL(__do_once_done);
> +
> +static DEFINE_MUTEX(once_mutex);
> +
> +bool __do_once_slow_start(bool *done)
> +       __acquires(once_mutex)
> +{
> +       mutex_lock(&once_mutex);
> +       if (*done) {
> +               mutex_unlock(&once_mutex);
> +               /* Keep sparse happy by restoring an even lock count on
> +                * this mutex. In case we return here, we don't call into
> +                * __do_once_done but return early in the DO_ONCE_SLOW() macro.
> +                */
> +               __acquire(once_mutex);
> +               return false;
> +       }
> +
> +       return true;
> +}
> +EXPORT_SYMBOL(__do_once_slow_start);
> +
> +void __do_once_slow_done(bool *done, struct static_key_true *once_key,
> +                        struct module *mod)
> +       __releases(once_mutex)
> +{
> +       *done = true;
> +       mutex_unlock(&once_mutex);
> +       once_disable_jump(once_key, mod);
> +}
> +EXPORT_SYMBOL(__do_once_slow_done);
> diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
> index 49db8c597eea83a27e91edc429c2c4779b0a5cd7..dc1c5629cd0d61716d6d99131c57b49717785709 100644
> --- a/net/ipv4/inet_hashtables.c
> +++ b/net/ipv4/inet_hashtables.c
> @@ -958,8 +958,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
>          if (likely(remaining > 1))
>                  remaining &= ~1U;
> 
> -       net_get_random_once(table_perturb,
> -                           INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
> +       get_random_slow_once(table_perturb,
> +                            INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
>          index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
> 
>          offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
> --
> 2.38.0.rc1.362.ged0d419d3c-goog
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 20:51 [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Eric Dumazet
                   ` (2 preceding siblings ...)
  2022-10-02  8:58 ` [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Christophe Leroy
@ 2022-10-03 12:40 ` patchwork-bot+netdevbpf
  3 siblings, 0 replies; 13+ messages in thread
From: patchwork-bot+netdevbpf @ 2022-10-03 12:40 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: davem, kuba, pabeni, netdev, edumazet, christophe.leroy, w

Hello:

This patch was applied to netdev/net-next.git (master)
by David S. Miller <davem@davemloft.net>:

On Sat,  1 Oct 2022 13:51:02 -0700 you wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Christophe Leroy reported a ~80ms latency spike
> happening at first TCP connect() time.
> 
> This is because __inet_hash_connect() uses get_random_once()
> to populate a perturbation table which became quite big
> after commit 4c2c8f03a5ab ("tcp: increase source port perturb table to 2^16")
> 
> [...]

Here is the summary with links:
  - [net-next] once: add DO_ONCE_SLOW() for sleepable contexts
    https://git.kernel.org/netdev/net-next/c/62c07983bef9

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-01 22:44 ` Jason A. Donenfeld
  2022-10-01 22:50   ` Eric Dumazet
@ 2022-10-03 17:25   ` Jakub Kicinski
  2022-10-03 17:43     ` Jason A. Donenfeld
  1 sibling, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2022-10-03 17:25 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: Eric Dumazet, David S . Miller, Paolo Abeni, netdev,
	Eric Dumazet, Christophe Leroy, Willy Tarreau

On Sun, 2 Oct 2022 00:44:28 +0200 Jason A. Donenfeld wrote:
> So instead, why not just branch on whether or not we can sleep here, if
> that can be worked out dynamically? 

IDK if we can dynamically work out if _all_ _possible_ callers are 
in a specific context, can we?

> If not, and if you really do need two sets of macros and functions,
> at least you can call the new one something other than "slow"? Maybe
> something about being _SLEEPABLE() instead?

+1 for s/SLOW/SLEEPABLE/. I was about to suggest s/SLOW/TASK/.
But I guess it's already applied..

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts
  2022-10-03 17:25   ` Jakub Kicinski
@ 2022-10-03 17:43     ` Jason A. Donenfeld
  2022-10-03 18:14         ` [PATCH net-next] " Jason A. Donenfeld
  0 siblings, 1 reply; 13+ messages in thread
From: Jason A. Donenfeld @ 2022-10-03 17:43 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Eric Dumazet, David S . Miller, Paolo Abeni, netdev,
	Eric Dumazet, Christophe Leroy, Willy Tarreau

On Mon, Oct 3, 2022 at 7:25 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Sun, 2 Oct 2022 00:44:28 +0200 Jason A. Donenfeld wrote:
> > So instead, why not just branch on whether or not we can sleep here, if
> > that can be worked out dynamically?
>
> IDK if we can dynamically work out if _all_ _possible_ callers are
> in a specific context, can we?
>
> > If not, and if you really do need two sets of macros and functions,
> > at least you can call the new one something other than "slow"? Maybe
> > something about being _SLEEPABLE() instead?
>
> +1 for s/SLOW/SLEEPABLE/. I was about to suggest s/SLOW/TASK/.
> But I guess it's already applied..

I'll send a patch to change it in a minute.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] once: rename _SLOW to _SLEEPABLE
  2022-10-03 17:43     ` Jason A. Donenfeld
@ 2022-10-03 18:14         ` Jason A. Donenfeld
  0 siblings, 0 replies; 13+ messages in thread
From: Jason A. Donenfeld @ 2022-10-03 18:14 UTC (permalink / raw)
  To: netdev, linux-kernel, kuba
  Cc: Jason A. Donenfeld, Eric Dumazet, David S . Miller, Eric Dumazet,
	Christophe Leroy

The _SLOW designation wasn't really descriptive of anything. This is
meant to be called from process context when it's possible to sleep. So
name this more aptly _SLEEPABLE, which better fits its intended use.

Fixes: 62c07983bef9 ("once: add DO_ONCE_SLOW() for sleepable contexts")
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 include/linux/once.h       | 38 +++++++++++++++++++-------------------
 lib/once.c                 | 10 +++++-----
 net/ipv4/inet_hashtables.c |  4 ++--
 3 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/include/linux/once.h b/include/linux/once.h
index 176ab75b42df..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -13,9 +13,9 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
 		    unsigned long *flags, struct module *mod);
 
 /* Variant for process contexts only. */
-bool __do_once_slow_start(bool *done);
-void __do_once_slow_done(bool *done, struct static_key_true *once_key,
-			 struct module *mod);
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+			      struct module *mod);
 
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
@@ -61,26 +61,26 @@ void __do_once_slow_done(bool *done, struct static_key_true *once_key,
 	})
 
 /* Variant of DO_ONCE() for process/sleepable contexts. */
-#define DO_ONCE_SLOW(func, ...)						     \
-	({								     \
-		bool ___ret = false;					     \
-		static bool __section(".data.once") ___done = false;	     \
-		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
-		if (static_branch_unlikely(&___once_key)) {		     \
-			___ret = __do_once_slow_start(&___done);	     \
-			if (unlikely(___ret)) {				     \
-				func(__VA_ARGS__);			     \
-				__do_once_slow_done(&___done, &___once_key,  \
-						    THIS_MODULE);	     \
-			}						     \
-		}							     \
-		___ret;							     \
+#define DO_ONCE_SLEEPABLE(func, ...)						\
+	({									\
+		bool ___ret = false;						\
+		static bool __section(".data.once") ___done = false;		\
+		static DEFINE_STATIC_KEY_TRUE(___once_key);			\
+		if (static_branch_unlikely(&___once_key)) {			\
+			___ret = __do_once_sleepable_start(&___done);		\
+			if (unlikely(___ret)) {					\
+				func(__VA_ARGS__);				\
+				__do_once_sleepable_done(&___done, &___once_key,\
+						    THIS_MODULE);		\
+			}							\
+		}								\
+		___ret;								\
 	})
 
 #define get_random_once(buf, nbytes)					     \
 	DO_ONCE(get_random_bytes, (buf), (nbytes))
 
-#define get_random_slow_once(buf, nbytes)				     \
-	DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
+#define get_random_sleepable_once(buf, nbytes)				     \
+	DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
 
 #endif /* _LINUX_ONCE_H */
diff --git a/lib/once.c b/lib/once.c
index 351f66aad310..2c306f0e891e 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -69,7 +69,7 @@ EXPORT_SYMBOL(__do_once_done);
 
 static DEFINE_MUTEX(once_mutex);
 
-bool __do_once_slow_start(bool *done)
+bool __do_once_sleepable_start(bool *done)
 	__acquires(once_mutex)
 {
 	mutex_lock(&once_mutex);
@@ -77,7 +77,7 @@ bool __do_once_slow_start(bool *done)
 		mutex_unlock(&once_mutex);
 		/* Keep sparse happy by restoring an even lock count on
 		 * this mutex. In case we return here, we don't call into
-		 * __do_once_done but return early in the DO_ONCE_SLOW() macro.
+		 * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
 		 */
 		__acquire(once_mutex);
 		return false;
@@ -85,9 +85,9 @@ bool __do_once_slow_start(bool *done)
 
 	return true;
 }
-EXPORT_SYMBOL(__do_once_slow_start);
+EXPORT_SYMBOL(__do_once_sleepable_start);
 
-void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
 			 struct module *mod)
 	__releases(once_mutex)
 {
@@ -95,4 +95,4 @@ void __do_once_slow_done(bool *done, struct static_key_true *once_key,
 	mutex_unlock(&once_mutex);
 	once_disable_jump(once_key, mod);
 }
-EXPORT_SYMBOL(__do_once_slow_done);
+EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index dc1c5629cd0d..a0ad34e4f044 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -958,8 +958,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 	if (likely(remaining > 1))
 		remaining &= ~1U;
 
-	get_random_slow_once(table_perturb,
-			     INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+	get_random_sleepable_once(table_perturb,
+				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
 
 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH net-next] once: rename _SLOW to _SLEEPABLE
@ 2022-10-03 18:14         ` Jason A. Donenfeld
  0 siblings, 0 replies; 13+ messages in thread
From: Jason A. Donenfeld @ 2022-10-03 18:14 UTC (permalink / raw)
  To: netdev, linux-kernel, kuba
  Cc: Jason A. Donenfeld, Eric Dumazet, David S . Miller, Eric Dumazet,
	Christophe Leroy

The _SLOW designation wasn't really descriptive of anything. This is
meant to be called from process context when it's possible to sleep. So
name this more aptly _SLEEPABLE, which better fits its intended use.

Fixes: 62c07983bef9 ("once: add DO_ONCE_SLOW() for sleepable contexts")
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 include/linux/once.h       | 38 +++++++++++++++++++-------------------
 lib/once.c                 | 10 +++++-----
 net/ipv4/inet_hashtables.c |  4 ++--
 3 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/include/linux/once.h b/include/linux/once.h
index 176ab75b42df..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -13,9 +13,9 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
 		    unsigned long *flags, struct module *mod);
 
 /* Variant for process contexts only. */
-bool __do_once_slow_start(bool *done);
-void __do_once_slow_done(bool *done, struct static_key_true *once_key,
-			 struct module *mod);
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+			      struct module *mod);
 
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
@@ -61,26 +61,26 @@ void __do_once_slow_done(bool *done, struct static_key_true *once_key,
 	})
 
 /* Variant of DO_ONCE() for process/sleepable contexts. */
-#define DO_ONCE_SLOW(func, ...)						     \
-	({								     \
-		bool ___ret = false;					     \
-		static bool __section(".data.once") ___done = false;	     \
-		static DEFINE_STATIC_KEY_TRUE(___once_key);		     \
-		if (static_branch_unlikely(&___once_key)) {		     \
-			___ret = __do_once_slow_start(&___done);	     \
-			if (unlikely(___ret)) {				     \
-				func(__VA_ARGS__);			     \
-				__do_once_slow_done(&___done, &___once_key,  \
-						    THIS_MODULE);	     \
-			}						     \
-		}							     \
-		___ret;							     \
+#define DO_ONCE_SLEEPABLE(func, ...)						\
+	({									\
+		bool ___ret = false;						\
+		static bool __section(".data.once") ___done = false;		\
+		static DEFINE_STATIC_KEY_TRUE(___once_key);			\
+		if (static_branch_unlikely(&___once_key)) {			\
+			___ret = __do_once_sleepable_start(&___done);		\
+			if (unlikely(___ret)) {					\
+				func(__VA_ARGS__);				\
+				__do_once_sleepable_done(&___done, &___once_key,\
+						    THIS_MODULE);		\
+			}							\
+		}								\
+		___ret;								\
 	})
 
 #define get_random_once(buf, nbytes)					     \
 	DO_ONCE(get_random_bytes, (buf), (nbytes))
 
-#define get_random_slow_once(buf, nbytes)				     \
-	DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
+#define get_random_sleepable_once(buf, nbytes)				     \
+	DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
 
 #endif /* _LINUX_ONCE_H */
diff --git a/lib/once.c b/lib/once.c
index 351f66aad310..2c306f0e891e 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -69,7 +69,7 @@ EXPORT_SYMBOL(__do_once_done);
 
 static DEFINE_MUTEX(once_mutex);
 
-bool __do_once_slow_start(bool *done)
+bool __do_once_sleepable_start(bool *done)
 	__acquires(once_mutex)
 {
 	mutex_lock(&once_mutex);
@@ -77,7 +77,7 @@ bool __do_once_slow_start(bool *done)
 		mutex_unlock(&once_mutex);
 		/* Keep sparse happy by restoring an even lock count on
 		 * this mutex. In case we return here, we don't call into
-		 * __do_once_done but return early in the DO_ONCE_SLOW() macro.
+		 * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
 		 */
 		__acquire(once_mutex);
 		return false;
@@ -85,9 +85,9 @@ bool __do_once_slow_start(bool *done)
 
 	return true;
 }
-EXPORT_SYMBOL(__do_once_slow_start);
+EXPORT_SYMBOL(__do_once_sleepable_start);
 
-void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
 			 struct module *mod)
 	__releases(once_mutex)
 {
@@ -95,4 +95,4 @@ void __do_once_slow_done(bool *done, struct static_key_true *once_key,
 	mutex_unlock(&once_mutex);
 	once_disable_jump(once_key, mod);
 }
-EXPORT_SYMBOL(__do_once_slow_done);
+EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index dc1c5629cd0d..a0ad34e4f044 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -958,8 +958,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 	if (likely(remaining > 1))
 		remaining &= ~1U;
 
-	get_random_slow_once(table_perturb,
-			     INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+	get_random_sleepable_once(table_perturb,
+				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
 
 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
-- 
2.37.3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] once: rename _SLOW to _SLEEPABLE
  2022-10-03 18:14         ` [PATCH net-next] " Jason A. Donenfeld
  (?)
@ 2022-10-03 22:50         ` Eric Dumazet
  -1 siblings, 0 replies; 13+ messages in thread
From: Eric Dumazet @ 2022-10-03 22:50 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: netdev, LKML, Jakub Kicinski, Eric Dumazet, David S . Miller,
	Christophe Leroy

On Mon, Oct 3, 2022 at 11:14 AM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> The _SLOW designation wasn't really descriptive of anything. This is
> meant to be called from process context when it's possible to sleep. So
> name this more aptly _SLEEPABLE, which better fits its intended use.
>
> Fixes: 62c07983bef9 ("once: add DO_ONCE_SLOW() for sleepable contexts")

Yes, this works for me, thank you.

Reviewed-by: Eric Dumazet <edumazet@google.com>

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2022-10-03 22:50 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-01 20:51 [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Eric Dumazet
2022-10-01 21:15 ` Willy Tarreau
2022-10-01 22:50   ` Jason A. Donenfeld
2022-10-02  5:38     ` Willy Tarreau
2022-10-01 22:44 ` Jason A. Donenfeld
2022-10-01 22:50   ` Eric Dumazet
2022-10-03 17:25   ` Jakub Kicinski
2022-10-03 17:43     ` Jason A. Donenfeld
2022-10-03 18:14       ` [PATCH] once: rename _SLOW to _SLEEPABLE Jason A. Donenfeld
2022-10-03 18:14         ` [PATCH net-next] " Jason A. Donenfeld
2022-10-03 22:50         ` [PATCH] " Eric Dumazet
2022-10-02  8:58 ` [PATCH net-next] once: add DO_ONCE_SLOW() for sleepable contexts Christophe Leroy
2022-10-03 12:40 ` patchwork-bot+netdevbpf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.