linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/2] cpuhp enhancements
@ 2022-02-15 21:22 Jason A. Donenfeld
  2022-02-15 21:22 ` [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare Jason A. Donenfeld
  2022-02-15 21:22 ` [PATCH v4 2/2] random: invalidate crngs and batches " Jason A. Donenfeld
  0 siblings, 2 replies; 4+ messages in thread
From: Jason A. Donenfeld @ 2022-02-15 21:22 UTC (permalink / raw)
  To: linux-kernel, bigeasy, linux, sultan; +Cc: Jason A. Donenfeld

This series combines the two uses thus far identified for cpuhp in
random.c. v3 uses prepare instead of teardown. v4 guards the added
function with CONFIG_SMP, since it's not used on !CONFIG_SMP.

Jason A. Donenfeld (2):
  random: set fast pool count to zero in cpuhp prepare
  random: invalidate crngs and batches in cpuhp prepare

 drivers/char/random.c      | 47 ++++++++++++++++++++++++++------------
 include/linux/cpuhotplug.h |  1 +
 include/linux/random.h     |  4 ++++
 kernel/cpu.c               |  6 +++++
 4 files changed, 43 insertions(+), 15 deletions(-)

-- 
2.35.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare
  2022-02-15 21:22 [PATCH v4 0/2] cpuhp enhancements Jason A. Donenfeld
@ 2022-02-15 21:22 ` Jason A. Donenfeld
  2022-02-17 12:01   ` Jason A. Donenfeld
  2022-02-15 21:22 ` [PATCH v4 2/2] random: invalidate crngs and batches " Jason A. Donenfeld
  1 sibling, 1 reply; 4+ messages in thread
From: Jason A. Donenfeld @ 2022-02-15 21:22 UTC (permalink / raw)
  To: linux-kernel, bigeasy, linux, sultan
  Cc: Jason A. Donenfeld, Thomas Gleixner, Peter Zijlstra, Theodore Ts'o

Rather than having to use expensive atomics, which were visibly the most
expensive thing in the entire irq handler, simply take care of the
extreme edge case of resetting count to 0 in the cpuhp prepare handler,
before irqs have started. This simplifies the code a bit and lets us use
vanilla variables rather than atomics, and performance should be
improved.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Sultan Alsawaf <sultan@kerneltoast.com>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 drivers/char/random.c      | 39 +++++++++++++++++++++++---------------
 include/linux/cpuhotplug.h |  1 +
 include/linux/random.h     |  4 ++++
 kernel/cpu.c               |  6 ++++++
 4 files changed, 35 insertions(+), 15 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index ee21f301ff16..a3cc147406b0 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1182,7 +1182,7 @@ struct fast_pool {
 	};
 	struct work_struct mix;
 	unsigned long last;
-	atomic_t count;
+	unsigned int count;
 	u16 reg_idx;
 };
 
@@ -1218,6 +1218,25 @@ static void fast_mix(u32 pool[4])
 
 static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
 
+#ifdef CONFIG_SMP
+int random_prepare_cpu(unsigned int cpu)
+{
+	/*
+	 * We want to reset a few things when a CPU comes online, in case
+	 * it was previously offlined and therefore has stale information.
+	 */
+
+	/*
+	 * Set irq randomness count to zero so that new accumulated
+	 * irqs are fresh, and more importantly, so that its worker
+	 * is permitted to schedule again when it comes back online,
+	 * since the MIX_INFLIGHT flag will be cleared.
+	 */
+	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+	return 0;
+}
+#endif
+
 static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
 	u32 *ptr = (u32 *)regs;
@@ -1242,15 +1261,6 @@ static void mix_interrupt_randomness(struct work_struct *work)
 	local_irq_disable();
 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
 		local_irq_enable();
-		/*
-		 * If we are unlucky enough to have been moved to another CPU,
-		 * during CPU hotplug while the CPU was shutdown then we set
-		 * our count to zero atomically so that when the CPU comes
-		 * back online, it can enqueue work again. The _release here
-		 * pairs with the atomic_inc_return_acquire in
-		 * add_interrupt_randomness().
-		 */
-		atomic_set_release(&fast_pool->count, 0);
 		return;
 	}
 
@@ -1259,7 +1269,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
 	 * consistent view, before we reenable irqs again.
 	 */
 	memcpy(pool, fast_pool->pool32, sizeof(pool));
-	atomic_set(&fast_pool->count, 0);
+	fast_pool->count = 0;
 	fast_pool->last = jiffies;
 	local_irq_enable();
 
@@ -1295,14 +1305,13 @@ void add_interrupt_randomness(int irq)
 	}
 
 	fast_mix(fast_pool->pool32);
-	/* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
-	new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
+	new_count = ++fast_pool->count;
 
 	if (unlikely(crng_init == 0)) {
 		if (new_count >= 64 &&
 		    crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
 					 true, true) > 0) {
-			atomic_set(&fast_pool->count, 0);
+			fast_pool->count = 0;
 			fast_pool->last = now;
 			if (spin_trylock(&input_pool.lock)) {
 				_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
@@ -1320,7 +1329,7 @@ void add_interrupt_randomness(int irq)
 
 	if (unlikely(!fast_pool->mix.func))
 		INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
-	atomic_or(MIX_INFLIGHT, &fast_pool->count);
+	fast_pool->count |= MIX_INFLIGHT;
 	queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
 }
 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 411a428ace4d..38294af566e4 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -127,6 +127,7 @@ enum cpuhp_state {
 	CPUHP_MM_ZSWP_POOL_PREPARE,
 	CPUHP_KVM_PPC_BOOK3S_PREPARE,
 	CPUHP_ZCOMP_PREPARE,
+	CPUHP_RANDOM_PREPARE,
 	CPUHP_TIMERS_PREPARE,
 	CPUHP_MIPS_SOC_PREPARE,
 	CPUHP_BP_PREPARE_DYN,
diff --git a/include/linux/random.h b/include/linux/random.h
index d7354de9351e..41da6628e838 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -35,6 +35,10 @@ extern void add_interrupt_randomness(int irq) __latent_entropy;
 extern void add_hwgenerator_randomness(const void *buffer, size_t count,
 				       size_t entropy);
 
+#ifdef CONFIG_SMP
+extern int random_prepare_cpu(unsigned int cpu);
+#endif
+
 extern void get_random_bytes(void *buf, size_t nbytes);
 extern int wait_for_random_bytes(void);
 extern int __init rand_initialize(void);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 407a2568f35e..8da392a6dc80 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -34,6 +34,7 @@
 #include <linux/scs.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/cpuset.h>
+#include <linux/random.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -1689,6 +1690,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
 		.startup.single		= rcutree_prepare_cpu,
 		.teardown.single	= rcutree_dead_cpu,
 	},
+	[CPUHP_RANDOM_PREPARE] = {
+		.name			= "random:prepare",
+		.startup.single		= random_prepare_cpu,
+		.teardown.single	= NULL,
+	},
 	/*
 	 * On the tear-down path, timers_dead_cpu() must be invoked
 	 * before blk_mq_queue_reinit_notify() from notify_dead(),
-- 
2.35.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v4 2/2] random: invalidate crngs and batches in cpuhp prepare
  2022-02-15 21:22 [PATCH v4 0/2] cpuhp enhancements Jason A. Donenfeld
  2022-02-15 21:22 ` [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare Jason A. Donenfeld
@ 2022-02-15 21:22 ` Jason A. Donenfeld
  1 sibling, 0 replies; 4+ messages in thread
From: Jason A. Donenfeld @ 2022-02-15 21:22 UTC (permalink / raw)
  To: linux-kernel, bigeasy, linux, sultan
  Cc: Jason A. Donenfeld, Theodore Ts'o

Now that we have a cpuhp prepare notifier, we can invalidate the keys
used by the per-cpu crngs and the batches used by per-cpu batched
entropy, so that if the cpus come back online, and the generation
counter happens to have cycled all the way around to where it was
before, it doesn't mistakenly use the old data. The chances of this
happening are exceedingly rare, but since we now have the notifier
setup, doing this is basically free.

Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Sultan Alsawaf <sultan@kerneltoast.com>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 drivers/char/random.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index a3cc147406b0..41188a49d43e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1233,6 +1233,14 @@ int random_prepare_cpu(unsigned int cpu)
 	 * since the MIX_INFLIGHT flag will be cleared.
 	 */
 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+
+	/*
+	 * We also want to invalidate per-cpu crngs and batches, so
+	 * that we always use fresh entropy.
+	 */
+	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
 	return 0;
 }
 #endif
-- 
2.35.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare
  2022-02-15 21:22 ` [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare Jason A. Donenfeld
@ 2022-02-17 12:01   ` Jason A. Donenfeld
  0 siblings, 0 replies; 4+ messages in thread
From: Jason A. Donenfeld @ 2022-02-17 12:01 UTC (permalink / raw)
  To: LKML, Sebastian Siewior, Dominik Brodowski, Sultan Alsawaf
  Cc: Thomas Gleixner, Peter Zijlstra, Theodore Ts'o

Alas I've finally realized the rollback logic makes this inoperable
too, in which those workers are unbound at one phase, but then we
never hit this stage, so we never zero them back. So, a v5 is coming
up, where I'll implement basically your exact suggestion from before.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-02-17 12:02 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-15 21:22 [PATCH v4 0/2] cpuhp enhancements Jason A. Donenfeld
2022-02-15 21:22 ` [PATCH v4 1/2] random: set fast pool count to zero in cpuhp prepare Jason A. Donenfeld
2022-02-17 12:01   ` Jason A. Donenfeld
2022-02-15 21:22 ` [PATCH v4 2/2] random: invalidate crngs and batches " Jason A. Donenfeld

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).