All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH REPOST] random: Add a spinlock_t to struct batched_entropy
@ 2019-04-26 10:34 Sebastian Andrzej Siewior
  2019-04-28 18:29 ` Theodore Ts'o
  0 siblings, 1 reply; 3+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-04-26 10:34 UTC (permalink / raw)
  To: Theodore Ts'o, Andrew Morton
  Cc: linux-kernel, tglx, Sebastian Andrzej Siewior

The per-CPU variable batched_entropy_uXX is protected by get_cpu_var().
This is just a preempt_disable() which protects against concurrent
access from process context on the local CPU. It does not protect
against users on the same CPU from another context. It is possible that
a preemptible context reads slot 0 and then an interrupt occurs and the
same value is read again from hard or soft IRQ context.

The above scenario is confirmed by lockdep if we add a spinlock:
| ================================
| WARNING: inconsistent lock state
| 5.1.0-rc3+ #42 Not tainted
| --------------------------------
| inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
| ksoftirqd/9/56 [HC0[0]:SC1[1]:HE0:SE0] takes:
| (____ptrval____) (batched_entropy_u32.lock){+.?.}, at: get_random_u32+0x3e/0xe0
| {SOFTIRQ-ON-W} state was registered at:
|   _raw_spin_lock+0x2a/0x40
|   get_random_u32+0x3e/0xe0
|   new_slab+0x15c/0x7b0
|   ___slab_alloc+0x492/0x620
|   __slab_alloc.isra.73+0x53/0xa0
|   kmem_cache_alloc_node+0xaf/0x2a0
|   copy_process.part.41+0x1e1/0x2370
|   _do_fork+0xdb/0x6d0
|   kernel_thread+0x20/0x30
|   kthreadd+0x1ba/0x220
|   ret_from_fork+0x3a/0x50
…
| other info that might help us debug this:
|  Possible unsafe locking scenario:
|
|        CPU0
|        ----
|   lock(batched_entropy_u32.lock);
|   <Interrupt>
|     lock(batched_entropy_u32.lock);
|
|  *** DEADLOCK ***
|
| stack backtrace:
| Call Trace:
…
|  kmem_cache_alloc_trace+0x20e/0x270
|  ipmi_alloc_recv_msg+0x16/0x40
…
|  __do_softirq+0xec/0x48d
|  run_ksoftirqd+0x37/0x60
|  smpboot_thread_fn+0x191/0x290
|  kthread+0xfe/0x130
|  ret_from_fork+0x3a/0x50

Add a spinlock_t to the batched_entropy data structure and acquire the
lock while accessing it. Acquire the lock with disabled interrupts
because this function may be used from interrupt context.

Remove the batched_entropy_reset_lock lock. Now that we have a lock for
the data structure, we can access it from a remote CPU.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
RFC pointing out the problem 2019-04-04:
	https://lkml.kernel.org/r/20190404170528.tlzai3jclzcbns3z@linutronix.de

Proper patch sent 2019-04-16:
	https://lkml.kernel.org/r/20190416140858.31980-1-bigeasy@linutronix.de

 drivers/char/random.c | 52 ++++++++++++++++++++++---------------------
 1 file changed, 27 insertions(+), 25 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38c6d1af6d1c0..8d25c7b1f99bc 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2211,8 +2211,8 @@ struct batched_entropy {
 		u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
 	};
 	unsigned int position;
+	spinlock_t batch_lock;
 };
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2222,12 +2222,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
  * wait_for_random_bytes() should be called and return 0 at least once
  * at any point prior.
  */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+	.batch_lock	= __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
 u64 get_random_u64(void)
 {
 	u64 ret;
-	bool use_lock;
-	unsigned long flags = 0;
+	unsigned long flags;
 	struct batched_entropy *batch;
 	static void *previous;
 
@@ -2242,28 +2244,25 @@ u64 get_random_u64(void)
 
 	warn_unseeded_randomness(&previous);
 
-	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u64);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	batch = raw_cpu_ptr(&batched_entropy_u64);
+	spin_lock_irqsave(&batch->batch_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
 		extract_crng((u8 *)batch->entropy_u64);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u64[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u64);
+	spin_unlock_irqrestore(&batch->batch_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u64);
 
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+	.batch_lock	= __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
 u32 get_random_u32(void)
 {
 	u32 ret;
-	bool use_lock;
-	unsigned long flags = 0;
+	unsigned long flags;
 	struct batched_entropy *batch;
 	static void *previous;
 
@@ -2272,18 +2271,14 @@ u32 get_random_u32(void)
 
 	warn_unseeded_randomness(&previous);
 
-	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u32);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
+	batch = raw_cpu_ptr(&batched_entropy_u32);
+	spin_lock_irqsave(&batch->batch_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
 		extract_crng((u8 *)batch->entropy_u32);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u32[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u32);
+	spin_unlock_irqrestore(&batch->batch_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
@@ -2297,12 +2292,19 @@ static void invalidate_batched_entropy(void)
 	int cpu;
 	unsigned long flags;
 
-	write_lock_irqsave(&batched_entropy_reset_lock, flags);
 	for_each_possible_cpu (cpu) {
-		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
-		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+		struct batched_entropy *batched_entropy;
+
+		batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+		spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+		batched_entropy->position = 0;
+		spin_unlock(&batched_entropy->batch_lock);
+
+		batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+		spin_lock(&batched_entropy->batch_lock);
+		batched_entropy->position = 0;
+		spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
 	}
-	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 }
 
 /**
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH REPOST] random: Add a spinlock_t to struct batched_entropy
  2019-04-26 10:34 [PATCH REPOST] random: Add a spinlock_t to struct batched_entropy Sebastian Andrzej Siewior
@ 2019-04-28 18:29 ` Theodore Ts'o
  2019-04-30  9:02   ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 3+ messages in thread
From: Theodore Ts'o @ 2019-04-28 18:29 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: Andrew Morton, linux-kernel, tglx

On Fri, Apr 26, 2019 at 12:34:38PM +0200, Sebastian Andrzej Siewior wrote:
> The per-CPU variable batched_entropy_uXX is protected by get_cpu_var().
> This is just a preempt_disable() which protects against concurrent
> access from process context on the local CPU. It does not protect
> against users on the same CPU from another context. It is possible that
> a preemptible context reads slot 0 and then an interrupt occurs and the
> same value is read again from hard or soft IRQ context.

This patch is already on the random.git tree.  (It wasn't in
linux-next because I had forgotten that linux-next pulls from the dev
branch, as opposed to the master branch.)

Cheers,

					- Ted

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH REPOST] random: Add a spinlock_t to struct batched_entropy
  2019-04-28 18:29 ` Theodore Ts'o
@ 2019-04-30  9:02   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 3+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-04-30  9:02 UTC (permalink / raw)
  To: Theodore Ts'o, Andrew Morton, linux-kernel, tglx

On 2019-04-28 14:29:28 [-0400], Theodore Ts'o wrote:
> This patch is already on the random.git tree.  (It wasn't in
> linux-next because I had forgotten that linux-next pulls from the dev
> branch, as opposed to the master branch.)

Oh, Thank you!

> Cheers,
> 
> 					- Ted

Sebastian

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-04-30  9:02 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-26 10:34 [PATCH REPOST] random: Add a spinlock_t to struct batched_entropy Sebastian Andrzej Siewior
2019-04-28 18:29 ` Theodore Ts'o
2019-04-30  9:02   ` Sebastian Andrzej Siewior

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.