All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/kfence: select random number before taking raw lock
@ 2022-06-09 12:17 Jason A. Donenfeld
  2022-06-09 12:27 ` Marco Elver
  2022-06-09 12:31 ` [PATCH] " Geert Uytterhoeven
  0 siblings, 2 replies; 7+ messages in thread
From: Jason A. Donenfeld @ 2022-06-09 12:17 UTC (permalink / raw)
  To: linux-mm, linux-kernel
  Cc: Jason A. Donenfeld, John Ogness, Alexander Potapenko,
	Marco Elver, Dmitry Vyukov, Geert Uytterhoeven

The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
its random numbers before taking its raw spinlocks. This also has the
nice effect of doing less work inside the lock. It should fix a splat
that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:

     dump_backtrace.part.0+0x98/0xc0
     show_stack+0x14/0x28
     dump_stack_lvl+0xac/0xec
     dump_stack+0x14/0x2c
     __lock_acquire+0x388/0x10a0
     lock_acquire+0x190/0x2c0
     _raw_spin_lock_irqsave+0x6c/0x94
     crng_make_state+0x148/0x1e4
     _get_random_bytes.part.0+0x4c/0xe8
     get_random_u32+0x4c/0x140
     __kfence_alloc+0x460/0x5c4
     kmem_cache_alloc_trace+0x194/0x1dc
     __kthread_create_on_node+0x5c/0x1a8
     kthread_create_on_node+0x58/0x7c
     printk_start_kthread.part.0+0x34/0xa8
     printk_activate_kthreads+0x4c/0x54
     do_one_initcall+0xec/0x278
     kernel_init_freeable+0x11c/0x214
     kernel_init+0x24/0x124
     ret_from_fork+0x10/0x20

Cc: John Ogness <john.ogness@linutronix.de>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 mm/kfence/core.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4e7cd4c8e687..6322b7729b50 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	unsigned long flags;
 	struct slab *slab;
 	void *addr;
+	bool random_right_allocate = prandom_u32_max(2);
+	bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
+			    !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 
 	/* Try to obtain a free object. */
 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	 * is that the out-of-bounds accesses detected are deterministic for
 	 * such allocations.
 	 */
-	if (prandom_u32_max(2)) {
+	if (random_right_allocate) {
 		/* Allocate on the "right" side, re-calculate address. */
 		meta->addr += PAGE_SIZE - size;
 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	if (cache->ctor)
 		cache->ctor(addr);
 
-	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
+	if (random_fault)
 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
 
 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/kfence: select random number before taking raw lock
  2022-06-09 12:17 [PATCH] mm/kfence: select random number before taking raw lock Jason A. Donenfeld
@ 2022-06-09 12:27 ` Marco Elver
  2022-06-09 12:29   ` Jason A. Donenfeld
  2022-06-09 12:31 ` [PATCH] " Geert Uytterhoeven
  1 sibling, 1 reply; 7+ messages in thread
From: Marco Elver @ 2022-06-09 12:27 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: linux-mm, linux-kernel, John Ogness, Alexander Potapenko,
	Dmitry Vyukov, Geert Uytterhoeven

On Thu, 9 Jun 2022 at 14:17, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
> its random numbers before taking its raw spinlocks. This also has the
> nice effect of doing less work inside the lock. It should fix a splat
> that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:
>
>      dump_backtrace.part.0+0x98/0xc0
>      show_stack+0x14/0x28
>      dump_stack_lvl+0xac/0xec
>      dump_stack+0x14/0x2c
>      __lock_acquire+0x388/0x10a0
>      lock_acquire+0x190/0x2c0
>      _raw_spin_lock_irqsave+0x6c/0x94
>      crng_make_state+0x148/0x1e4
>      _get_random_bytes.part.0+0x4c/0xe8
>      get_random_u32+0x4c/0x140
>      __kfence_alloc+0x460/0x5c4
>      kmem_cache_alloc_trace+0x194/0x1dc
>      __kthread_create_on_node+0x5c/0x1a8
>      kthread_create_on_node+0x58/0x7c
>      printk_start_kthread.part.0+0x34/0xa8
>      printk_activate_kthreads+0x4c/0x54
>      do_one_initcall+0xec/0x278
>      kernel_init_freeable+0x11c/0x214
>      kernel_init+0x24/0x124
>      ret_from_fork+0x10/0x20
>
> Cc: John Ogness <john.ogness@linutronix.de>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> ---
>  mm/kfence/core.c | 7 +++++--
>  1 file changed, 5 insertions(+), 2 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 4e7cd4c8e687..6322b7729b50 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         unsigned long flags;
>         struct slab *slab;
>         void *addr;
> +       bool random_right_allocate = prandom_u32_max(2);
> +       bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
> +                           !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
>
>         /* Try to obtain a free object. */
>         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
> @@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>          * is that the out-of-bounds accesses detected are deterministic for
>          * such allocations.
>          */
> -       if (prandom_u32_max(2)) {
> +       if (random_right_allocate) {
>                 /* Allocate on the "right" side, re-calculate address. */
>                 meta->addr += PAGE_SIZE - size;
>                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
> @@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         if (cache->ctor)
>                 cache->ctor(addr);
>
> -       if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
> +       if (random_fault)

The compiler should elide this branch entirely if
CONFIG_KFENCE_STRESS_TEST_FAULTS=0, but not sure it'll always do so
now. My suggestion is to make both new bools consts, to help out the
compiler a little.

Otherwise looks good, thanks for the quick fix!

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/kfence: select random number before taking raw lock
  2022-06-09 12:27 ` Marco Elver
@ 2022-06-09 12:29   ` Jason A. Donenfeld
  2022-06-09 12:33     ` [PATCH v2] " Jason A. Donenfeld
  0 siblings, 1 reply; 7+ messages in thread
From: Jason A. Donenfeld @ 2022-06-09 12:29 UTC (permalink / raw)
  To: Marco Elver
  Cc: Linux-MM, LKML, John Ogness, Alexander Potapenko, Dmitry Vyukov,
	Geert Uytterhoeven

On Thu, Jun 9, 2022 at 2:27 PM Marco Elver <elver@google.com> wrote:
> The compiler should elide this branch entirely if
> CONFIG_KFENCE_STRESS_TEST_FAULTS=0, but not sure it'll always do so
> now. My suggestion is to make both new bools consts, to help out the
> compiler a little.
>
> Otherwise looks good, thanks for the quick fix!

Disassembling reveals it still does elide. (gcc 11.3) But I'll make
them const just in case and send a v2.

Jason

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] mm/kfence: select random number before taking raw lock
  2022-06-09 12:17 [PATCH] mm/kfence: select random number before taking raw lock Jason A. Donenfeld
  2022-06-09 12:27 ` Marco Elver
@ 2022-06-09 12:31 ` Geert Uytterhoeven
  1 sibling, 0 replies; 7+ messages in thread
From: Geert Uytterhoeven @ 2022-06-09 12:31 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: Linux MM, Linux Kernel Mailing List, John Ogness,
	Alexander Potapenko, Marco Elver, Dmitry Vyukov

Hi Jason,

On Thu, Jun 9, 2022 at 2:17 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
> its random numbers before taking its raw spinlocks. This also has the
> nice effect of doing less work inside the lock. It should fix a splat
> that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:
>
>      dump_backtrace.part.0+0x98/0xc0
>      show_stack+0x14/0x28
>      dump_stack_lvl+0xac/0xec
>      dump_stack+0x14/0x2c
>      __lock_acquire+0x388/0x10a0
>      lock_acquire+0x190/0x2c0
>      _raw_spin_lock_irqsave+0x6c/0x94
>      crng_make_state+0x148/0x1e4
>      _get_random_bytes.part.0+0x4c/0xe8
>      get_random_u32+0x4c/0x140
>      __kfence_alloc+0x460/0x5c4
>      kmem_cache_alloc_trace+0x194/0x1dc
>      __kthread_create_on_node+0x5c/0x1a8
>      kthread_create_on_node+0x58/0x7c
>      printk_start_kthread.part.0+0x34/0xa8
>      printk_activate_kthreads+0x4c/0x54
>      do_one_initcall+0xec/0x278
>      kernel_init_freeable+0x11c/0x214
>      kernel_init+0x24/0x124
>      ret_from_fork+0x10/0x20
>
> Cc: John Ogness <john.ogness@linutronix.de>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

Thank you, the splat is gone.

Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>

Gr{oetje,eeting}s,

                        Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2] mm/kfence: select random number before taking raw lock
  2022-06-09 12:29   ` Jason A. Donenfeld
@ 2022-06-09 12:33     ` Jason A. Donenfeld
  2022-06-09 12:41       ` Marco Elver
  2022-06-09 12:52       ` Petr Mladek
  0 siblings, 2 replies; 7+ messages in thread
From: Jason A. Donenfeld @ 2022-06-09 12:33 UTC (permalink / raw)
  To: linux-mm, linux-kernel, kasan-dev
  Cc: Jason A. Donenfeld, John Ogness, Alexander Potapenko,
	Marco Elver, Dmitry Vyukov, Geert Uytterhoeven,
	Geert Uytterhoeven

The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
its random numbers before taking its raw spinlocks. This also has the
nice effect of doing less work inside the lock. It should fix a splat
that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:

     dump_backtrace.part.0+0x98/0xc0
     show_stack+0x14/0x28
     dump_stack_lvl+0xac/0xec
     dump_stack+0x14/0x2c
     __lock_acquire+0x388/0x10a0
     lock_acquire+0x190/0x2c0
     _raw_spin_lock_irqsave+0x6c/0x94
     crng_make_state+0x148/0x1e4
     _get_random_bytes.part.0+0x4c/0xe8
     get_random_u32+0x4c/0x140
     __kfence_alloc+0x460/0x5c4
     kmem_cache_alloc_trace+0x194/0x1dc
     __kthread_create_on_node+0x5c/0x1a8
     kthread_create_on_node+0x58/0x7c
     printk_start_kthread.part.0+0x34/0xa8
     printk_activate_kthreads+0x4c/0x54
     do_one_initcall+0xec/0x278
     kernel_init_freeable+0x11c/0x214
     kernel_init+0x24/0x124
     ret_from_fork+0x10/0x20

Cc: John Ogness <john.ogness@linutronix.de>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
Changes v1->v2:
- Make the bools const to help compiler elide branch when possible,
  suggested by Marco.

 mm/kfence/core.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4e7cd4c8e687..4b5e5a3d3a63 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	unsigned long flags;
 	struct slab *slab;
 	void *addr;
+	const bool random_right_allocate = prandom_u32_max(2);
+	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
+				  !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 
 	/* Try to obtain a free object. */
 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	 * is that the out-of-bounds accesses detected are deterministic for
 	 * such allocations.
 	 */
-	if (prandom_u32_max(2)) {
+	if (random_right_allocate) {
 		/* Allocate on the "right" side, re-calculate address. */
 		meta->addr += PAGE_SIZE - size;
 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	if (cache->ctor)
 		cache->ctor(addr);
 
-	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
+	if (random_fault)
 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
 
 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/kfence: select random number before taking raw lock
  2022-06-09 12:33     ` [PATCH v2] " Jason A. Donenfeld
@ 2022-06-09 12:41       ` Marco Elver
  2022-06-09 12:52       ` Petr Mladek
  1 sibling, 0 replies; 7+ messages in thread
From: Marco Elver @ 2022-06-09 12:41 UTC (permalink / raw)
  To: Jason A. Donenfeld, Andrew Morton
  Cc: linux-mm, linux-kernel, kasan-dev, John Ogness,
	Alexander Potapenko, Dmitry Vyukov, Geert Uytterhoeven,
	Geert Uytterhoeven

On Thu, Jun 09, 2022 at 02:33PM +0200, Jason A. Donenfeld wrote:
> The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
> its random numbers before taking its raw spinlocks. This also has the
> nice effect of doing less work inside the lock. It should fix a splat
> that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:
> 
>      dump_backtrace.part.0+0x98/0xc0
>      show_stack+0x14/0x28
>      dump_stack_lvl+0xac/0xec
>      dump_stack+0x14/0x2c
>      __lock_acquire+0x388/0x10a0
>      lock_acquire+0x190/0x2c0
>      _raw_spin_lock_irqsave+0x6c/0x94
>      crng_make_state+0x148/0x1e4
>      _get_random_bytes.part.0+0x4c/0xe8
>      get_random_u32+0x4c/0x140
>      __kfence_alloc+0x460/0x5c4
>      kmem_cache_alloc_trace+0x194/0x1dc
>      __kthread_create_on_node+0x5c/0x1a8
>      kthread_create_on_node+0x58/0x7c
>      printk_start_kthread.part.0+0x34/0xa8
>      printk_activate_kthreads+0x4c/0x54
>      do_one_initcall+0xec/0x278
>      kernel_init_freeable+0x11c/0x214
>      kernel_init+0x24/0x124
>      ret_from_fork+0x10/0x20
> 
> Cc: John Ogness <john.ogness@linutronix.de>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

Reviewed-by: Marco Elver <elver@google.com>

Thank you.

> ---
> Changes v1->v2:
> - Make the bools const to help compiler elide branch when possible,
>   suggested by Marco.
> 
>  mm/kfence/core.c | 7 +++++--
>  1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 4e7cd4c8e687..4b5e5a3d3a63 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>  	unsigned long flags;
>  	struct slab *slab;
>  	void *addr;
> +	const bool random_right_allocate = prandom_u32_max(2);
> +	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
> +				  !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
>  
>  	/* Try to obtain a free object. */
>  	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
> @@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>  	 * is that the out-of-bounds accesses detected are deterministic for
>  	 * such allocations.
>  	 */
> -	if (prandom_u32_max(2)) {
> +	if (random_right_allocate) {
>  		/* Allocate on the "right" side, re-calculate address. */
>  		meta->addr += PAGE_SIZE - size;
>  		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
> @@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>  	if (cache->ctor)
>  		cache->ctor(addr);
>  
> -	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
> +	if (random_fault)
>  		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
>  
>  	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
> -- 
> 2.35.1
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/kfence: select random number before taking raw lock
  2022-06-09 12:33     ` [PATCH v2] " Jason A. Donenfeld
  2022-06-09 12:41       ` Marco Elver
@ 2022-06-09 12:52       ` Petr Mladek
  1 sibling, 0 replies; 7+ messages in thread
From: Petr Mladek @ 2022-06-09 12:52 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: linux-mm, linux-kernel, kasan-dev, John Ogness,
	Alexander Potapenko, Marco Elver, Dmitry Vyukov,
	Geert Uytterhoeven, Geert Uytterhoeven

On Thu 2022-06-09 14:33:19, Jason A. Donenfeld wrote:
> The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick
> its random numbers before taking its raw spinlocks. This also has the
> nice effect of doing less work inside the lock. It should fix a splat
> that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING:
> 
>      dump_backtrace.part.0+0x98/0xc0
>      show_stack+0x14/0x28
>      dump_stack_lvl+0xac/0xec
>      dump_stack+0x14/0x2c
>      __lock_acquire+0x388/0x10a0
>      lock_acquire+0x190/0x2c0
>      _raw_spin_lock_irqsave+0x6c/0x94
>      crng_make_state+0x148/0x1e4
>      _get_random_bytes.part.0+0x4c/0xe8
>      get_random_u32+0x4c/0x140
>      __kfence_alloc+0x460/0x5c4
>      kmem_cache_alloc_trace+0x194/0x1dc
>      __kthread_create_on_node+0x5c/0x1a8
>      kthread_create_on_node+0x58/0x7c
>      printk_start_kthread.part.0+0x34/0xa8
>      printk_activate_kthreads+0x4c/0x54
>      do_one_initcall+0xec/0x278
>      kernel_init_freeable+0x11c/0x214
>      kernel_init+0x24/0x124
>      ret_from_fork+0x10/0x20
> 
> Cc: John Ogness <john.ogness@linutronix.de>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Marco Elver <elver@google.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

Reviewed-by: Petr Mladek <pmladek@suse.com>

Thanks a lot for fixing this. It is great to know that the printk
kthreads were not the culprit here ;-)

Best Regards,
Petr

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-06-09 12:53 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-09 12:17 [PATCH] mm/kfence: select random number before taking raw lock Jason A. Donenfeld
2022-06-09 12:27 ` Marco Elver
2022-06-09 12:29   ` Jason A. Donenfeld
2022-06-09 12:33     ` [PATCH v2] " Jason A. Donenfeld
2022-06-09 12:41       ` Marco Elver
2022-06-09 12:52       ` Petr Mladek
2022-06-09 12:31 ` [PATCH] " Geert Uytterhoeven

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.