All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jason A. Donenfeld" <Jason@zx2c4.com>
To: Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	LKML <linux-kernel@vger.kernel.org>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Theodore Ts'o" <tytso@mit.edu>,
	"Sultan Alsawaf" <sultan@kerneltoast.com>,
	"Jonathan Neuschäfer" <j.neuschaefer@gmx.net>
Subject: [PATCH v2] random: defer fast pool mixing to worker
Date: Sat,  5 Feb 2022 00:09:57 +0100	[thread overview]
Message-ID: <20220204230957.220277-1-Jason@zx2c4.com> (raw)
In-Reply-To: <CAHmME9oYj6tp+THaz_74SWikpTLSzukH-DynypB+7SZ56hucog@mail.gmail.com>

On PREEMPT_RT, it's problematic to take spinlocks from hard irq
handlers. We can fix this by deferring to a work queue the dumping of
the fast pool into the input pool.

We accomplish this with some careful rules on fast_pool->count:

  - When it's incremented to >= 64, we schedule the work.
  - If the top bit is set, we never schedule the work, even if >= 64.
  - The worker is responsible for setting it back to 0 when it's done.

In the worst case, an irq handler is mixing a new irq into the pool at
the same time as the worker is dumping it into the input pool. In this
case, we only ever set the count back to 0 _after_ we're done, so that
subsequent cycles will require a full 64 to dump it in again. In other
words, the result of this race is only ever adding a little bit more
information than normal, but never less, and never crediting any more
for this partial additional information.

Note that this doesn't deal with the spinlocks in crng_fast_load(),
which will have to be dealt with some other way.

Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Sultan Alsawaf <sultan@kerneltoast.com>
Cc: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 drivers/char/random.c         | 54 ++++++++++++++++++++---------------
 include/trace/events/random.h |  6 ----
 2 files changed, 31 insertions(+), 29 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5d7d6e01bbc4..575616de2e16 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -383,12 +383,6 @@ static void _mix_pool_bytes(const void *in, int nbytes)
 	blake2s_update(&input_pool.hash, in, nbytes);
 }
 
-static void __mix_pool_bytes(const void *in, int nbytes)
-{
-	trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
-	_mix_pool_bytes(in, nbytes);
-}
-
 static void mix_pool_bytes(const void *in, int nbytes)
 {
 	unsigned long flags;
@@ -400,11 +394,13 @@ static void mix_pool_bytes(const void *in, int nbytes)
 }
 
 struct fast_pool {
-	u32 pool[4];
+	struct work_struct mix;
 	unsigned long last;
+	u32 pool[4];
+	unsigned int count;
 	u16 reg_idx;
-	u8 count;
 };
+#define FAST_POOL_MIX_INFLIGHT (1U << 31)
 
 /*
  * This is a fast mixing routine used by the interrupt randomness
@@ -434,7 +430,6 @@ static void fast_mix(struct fast_pool *f)
 
 	f->pool[0] = a;  f->pool[1] = b;
 	f->pool[2] = c;  f->pool[3] = d;
-	f->count++;
 }
 
 static void process_random_ready_list(void)
@@ -985,12 +980,30 @@ static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 	return *ptr;
 }
 
+static void mix_interrupt_randomness(struct work_struct *work)
+{
+	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+
+	/*
+	 * Since this is the result of a trip through the scheduler, xor in
+	 * a cycle counter. It can't hurt, and might help.
+	 */
+	fast_pool->pool[3] ^= random_get_entropy();
+
+	mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+	/* We take care to zero out the count only after we're done reading the pool. */
+	WRITE_ONCE(fast_pool->count, 0);
+	fast_pool->last = jiffies;
+	credit_entropy_bits(1);
+}
+
 void add_interrupt_randomness(int irq)
 {
 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
 	struct pt_regs *regs = get_irq_regs();
 	unsigned long now = jiffies;
 	cycles_t cycles = random_get_entropy();
+	unsigned int new_count;
 	u32 c_high, j_high;
 	u64 ip;
 
@@ -1007,9 +1020,10 @@ void add_interrupt_randomness(int irq)
 
 	fast_mix(fast_pool);
 	add_interrupt_bench(cycles);
+	new_count = __this_cpu_inc_return(irq_randomness.count);
 
 	if (unlikely(crng_init == 0)) {
-		if ((fast_pool->count >= 64) &&
+		if (new_count >= 64 &&
 		    crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
 			fast_pool->count = 0;
 			fast_pool->last = now;
@@ -1017,20 +1031,14 @@ void add_interrupt_randomness(int irq)
 		return;
 	}
 
-	if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
-		return;
-
-	if (!spin_trylock(&input_pool.lock))
-		return;
-
-	fast_pool->last = now;
-	__mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
-	spin_unlock(&input_pool.lock);
-
-	fast_pool->count = 0;
+	if (new_count >= 64 && new_count < FAST_POOL_MIX_INFLIGHT &&
+	    time_after(now, fast_pool->last + HZ)) {
+		if (unlikely(!fast_pool->mix.func))
+			INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+		__this_cpu_or(irq_randomness.count, FAST_POOL_MIX_INFLIGHT);
+		queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
 
-	/* award one bit for the contents of the fast pool */
-	credit_entropy_bits(1);
+	}
 }
 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
 
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index ad149aeaf42c..833f42afc70f 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -52,12 +52,6 @@ DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
 	TP_ARGS(bytes, IP)
 );
 
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
-	TP_PROTO(int bytes, unsigned long IP),
-
-	TP_ARGS(bytes, IP)
-);
-
 TRACE_EVENT(credit_entropy_bits,
 	TP_PROTO(int bits, int entropy_count, unsigned long IP),
 
-- 
2.35.0


  reply	other threads:[~2022-02-04 23:10 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-07 12:17 [PATCH 0/5] random: Add PREEMPT_RT support Sebastian Andrzej Siewior
2021-12-07 12:17 ` [PATCH 1/5] random: Remove unused irq_flags argument from add_interrupt_randomness() Sebastian Andrzej Siewior
2021-12-07 12:44   ` Wei Liu
2021-12-07 17:35   ` Jason A. Donenfeld
2021-12-07 12:17 ` [PATCH 2/5] irq: Remove unsued flags argument from __handle_irq_event_percpu() Sebastian Andrzej Siewior
2021-12-07 17:41   ` Jason A. Donenfeld
2021-12-11 22:39     ` Thomas Gleixner
2021-12-12 21:13       ` Jason A. Donenfeld
2021-12-07 12:17 ` [PATCH 3/5] random: Split add_interrupt_randomness() Sebastian Andrzej Siewior
2021-12-07 12:17 ` [PATCH 4/5] random: Move the fast_pool reset into the caller Sebastian Andrzej Siewior
2021-12-07 12:17 ` [PATCH 5/5] random: Defer processing of randomness on PREEMPT_RT Sebastian Andrzej Siewior
2021-12-07 18:14   ` Jason A. Donenfeld
2021-12-07 20:10     ` Sebastian Andrzej Siewior
2021-12-13 15:16       ` Sebastian Andrzej Siewior
2021-12-17  2:23       ` Herbert Xu
2021-12-17  9:00         ` Sebastian Andrzej Siewior
2021-12-18  3:24           ` Herbert Xu
2021-12-19  9:48             ` Sebastian Andrzej Siewior
2021-12-20  2:56               ` Herbert Xu
2021-12-20 14:38       ` Jason A. Donenfeld
2022-01-12 17:49         ` Sebastian Andrzej Siewior
2022-01-30 22:55           ` Jason A. Donenfeld
2022-01-31 16:33             ` Sebastian Andrzej Siewior
2022-02-04 15:31               ` [PATCH RFC v1] random: do not take spinlocks in irq handler Jason A. Donenfeld
2022-02-04 15:58                 ` Jason A. Donenfeld
2022-02-04 20:53                   ` Sebastian Andrzej Siewior
2022-02-04 20:47                 ` Sebastian Andrzej Siewior
2022-02-04 22:08                   ` Jason A. Donenfeld
2022-02-04 23:09                     ` Jason A. Donenfeld [this message]
2022-02-05  4:02                   ` Sultan Alsawaf
2022-02-05 12:50                     ` Jason A. Donenfeld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220204230957.220277-1-Jason@zx2c4.com \
    --to=jason@zx2c4.com \
    --cc=bigeasy@linutronix.de \
    --cc=j.neuschaefer@gmx.net \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=sultan@kerneltoast.com \
    --cc=tglx@linutronix.de \
    --cc=tytso@mit.edu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.