All of lore.kernel.org
 help / color / mirror / Atom feed
From: "tip-bot2 for Thomas Gleixner" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	"Borislav Petkov (AMD)" <bp@alien8.de>,
	x86@kernel.org, linux-kernel@vger.kernel.org
Subject: [tip: x86/microcode] x86/microcode: Rendezvous and load in NMI
Date: Fri, 20 Oct 2023 11:37:48 -0000	[thread overview]
Message-ID: <169780186816.3135.11385598800295455799.tip-bot2@tip-bot2> (raw)
In-Reply-To: <20231002115903.489900814@linutronix.de>

The following commit has been merged into the x86/microcode branch of tip:

Commit-ID:     8858b12182058468542cb5d0a4587895f6ec473c
Gitweb:        https://git.kernel.org/tip/8858b12182058468542cb5d0a4587895f6ec473c
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Mon, 02 Oct 2023 14:00:05 +02:00
Committer:     Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Thu, 19 Oct 2023 15:59:38 +02:00

x86/microcode: Rendezvous and load in NMI

stop_machine() does not prevent the spin-waiting sibling from handling
an NMI, which is obviously violating the whole concept of rendezvous.

Implement a static branch right in the beginning of the NMI handler
which is nopped out except when enabled by the late loading mechanism.

The late loader enables the static branch before stop_machine() is
invoked. Each CPU has an nmi_enable in its control structure which
indicates whether the CPU should go into the update routine.

This is required to bridge the gap between enabling the branch and
actually being at the point where it is required to enter the loader
wait loop.

Each CPU which arrives in the stopper thread function sets that flag and
issues a self NMI right after that. If the NMI function sees the flag
clear, it returns. If it's set it clears the flag and enters the
rendezvous.

This is safe against a real NMI which hits in between setting the flag
and sending the NMI to itself. The real NMI will be swallowed by the
microcode update and the self NMI will then let stuff continue.
Otherwise this would end up with a spurious NMI.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20231002115903.489900814@linutronix.de
---
 arch/x86/include/asm/microcode.h         | 12 +++++++-
 arch/x86/kernel/cpu/microcode/core.c     | 42 ++++++++++++++++++++---
 arch/x86/kernel/cpu/microcode/intel.c    |  1 +-
 arch/x86/kernel/cpu/microcode/internal.h |  3 +-
 arch/x86/kernel/nmi.c                    |  4 ++-
 5 files changed, 57 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 78f1eb2..8292482 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -72,4 +72,16 @@ static inline u32 intel_get_microcode_revision(void)
 }
 #endif /* !CONFIG_CPU_SUP_INTEL */
 
+bool microcode_nmi_handler(void);
+
+#ifdef CONFIG_MICROCODE_LATE_LOADING
+DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static __always_inline bool microcode_nmi_handler_enabled(void)
+{
+	return static_branch_unlikely(&microcode_nmi_handler_enable);
+}
+#else
+static __always_inline bool microcode_nmi_handler_enabled(void) { return false; }
+#endif
+
 #endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 1c2710b..7b8ade5 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -23,6 +23,7 @@
 #include <linux/miscdevice.h>
 #include <linux/capability.h>
 #include <linux/firmware.h>
+#include <linux/cpumask.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
@@ -31,6 +32,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 
+#include <asm/apic.h>
 #include <asm/cpu_device_id.h>
 #include <asm/perf_event.h>
 #include <asm/processor.h>
@@ -265,8 +267,10 @@ struct microcode_ctrl {
 	enum sibling_ctrl	ctrl;
 	enum ucode_state	result;
 	unsigned int		ctrl_cpu;
+	bool			nmi_enabled;
 };
 
+DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
 static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
 static atomic_t late_cpus_in;
 
@@ -282,7 +286,8 @@ static bool wait_for_cpus(atomic_t *cnt)
 
 		udelay(1);
 
-		if (!(timeout % USEC_PER_MSEC))
+		/* If invoked directly, tickle the NMI watchdog */
+		if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC))
 			touch_nmi_watchdog();
 	}
 	/* Prevent the late comers from making progress and let them time out */
@@ -298,7 +303,8 @@ static bool wait_for_ctrl(void)
 		if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
 			return true;
 		udelay(1);
-		if (!(timeout % 1000))
+		/* If invoked directly, tickle the NMI watchdog */
+		if (!microcode_ops->use_nmi && !(timeout % 1000))
 			touch_nmi_watchdog();
 	}
 	return false;
@@ -374,7 +380,7 @@ static void load_primary(unsigned int cpu)
 	}
 }
 
-static int load_cpus_stopped(void *unused)
+static bool microcode_update_handler(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -383,7 +389,29 @@ static int load_cpus_stopped(void *unused)
 	else
 		load_secondary(cpu);
 
-	/* No point to wait here. The CPUs will all wait in stop_machine(). */
+	touch_nmi_watchdog();
+	return true;
+}
+
+bool microcode_nmi_handler(void)
+{
+	if (!this_cpu_read(ucode_ctrl.nmi_enabled))
+		return false;
+
+	this_cpu_write(ucode_ctrl.nmi_enabled, false);
+	return microcode_update_handler();
+}
+
+static int load_cpus_stopped(void *unused)
+{
+	if (microcode_ops->use_nmi) {
+		/* Enable the NMI handler and raise NMI */
+		this_cpu_write(ucode_ctrl.nmi_enabled, true);
+		apic->send_IPI(smp_processor_id(), NMI_VECTOR);
+	} else {
+		/* Just invoke the handler directly */
+		microcode_update_handler();
+	}
 	return 0;
 }
 
@@ -404,8 +432,14 @@ static int load_late_stop_cpus(void)
 	 */
 	store_cpu_caps(&prev_info);
 
+	if (microcode_ops->use_nmi)
+		static_branch_enable_cpuslocked(&microcode_nmi_handler_enable);
+
 	stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
 
+	if (microcode_ops->use_nmi)
+		static_branch_disable_cpuslocked(&microcode_nmi_handler_enable);
+
 	/* Analyze the results */
 	for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
 		switch (per_cpu(ucode_ctrl.result, cpu)) {
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 9dffb0c..5c4c2b6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -595,6 +595,7 @@ static struct microcode_ops microcode_intel_ops = {
 	.collect_cpu_info	= collect_cpu_info,
 	.apply_microcode	= apply_microcode_late,
 	.finalize_late_load	= finalize_late_load,
+	.use_nmi		= IS_ENABLED(CONFIG_X86_64),
 };
 
 static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index c699043..627d238 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -31,7 +31,8 @@ struct microcode_ops {
 	enum ucode_state	(*apply_microcode)(int cpu);
 	int			(*collect_cpu_info)(int cpu, struct cpu_signature *csig);
 	void			(*finalize_late_load)(int result);
-	unsigned int		nmi_safe	: 1;
+	unsigned int		nmi_safe	: 1,
+				use_nmi		: 1;
 };
 
 extern struct ucode_cpu_info ucode_cpu_info[];
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index a0c5518..a87d856 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
 #include <asm/reboot.h>
 #include <asm/cache.h>
 #include <asm/nospec-branch.h>
+#include <asm/microcode.h>
 #include <asm/sev.h>
 
 #define CREATE_TRACE_POINTS
@@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
 
 	instrumentation_begin();
 
+	if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
+		goto out;
+
 	handled = nmi_handle(NMI_LOCAL, regs);
 	__this_cpu_add(nmi_stats.normal, handled);
 	if (handled) {

  parent reply	other threads:[~2023-10-20 11:39 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-02 11:59 [patch V4 00/30] x86/microcode: Cleanup and late loading enhancements Thomas Gleixner
2023-10-02 11:59 ` [patch V4 01/30] x86/microcode/32: Move early loading after paging enable Thomas Gleixner
2023-10-02 11:59 ` [patch V4 02/30] x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:38   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 03/30] x86/microcode/intel: Rip out mixed stepping support for Intel CPUs Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Ashok Raj
2023-10-02 11:59 ` [patch V4 04/30] x86/microcode/intel: Simplify scan_microcode() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:38   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 05/30] x86/microcode/intel: Simplify and rename generic_load_microcode() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:38   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 06/30] x86/microcode/intel: Cleanup code further Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:38   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 07/30] x86/microcode/intel: Simplify early loading Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 08/30] x86/microcode/intel: Save the microcode only after a successful late-load Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:21   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 09/30] x86/microcode/intel: Switch to kvmalloc() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:21   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 10/30] x86/microcode/intel: Unify microcode apply() functions Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 11/30] x86/microcode/intel: Rework intel_cpu_collect_info() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 12/30] x86/microcode/intel: Reuse intel_cpu_collect_info() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:21   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 13/30] x86/microcode/intel: Rework intel_find_matching_signature() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:21   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 14/30] x86/microcode/amd: Read revision from hardware in collect_cpu_info_amd() Thomas Gleixner
2023-10-04  8:32   ` Borislav Petkov
2023-10-02 11:59 ` [patch V4 15/30] x86/microcode: Remove pointless apply() invocation Thomas Gleixner
2023-10-06 13:26   ` Borislav Petkov
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 16/30] x86/microcode: Get rid of the schedule work indirection Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 17/30] x86/microcode: Clean up mc_cpu_down_prep() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 18/30] x86/microcode: Handle "nosmt" correctly Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 19/30] x86/microcode: Clarify the late load logic Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 11:59 ` [patch V4 20/30] x86/microcode: Sanitize __wait_for_cpus() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 21/30] x86/microcode: Add per CPU result state Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 22/30] x86/microcode: Add per CPU control field Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 23/30] x86/microcode: Provide new control functions Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 24/30] x86/microcode: Replace the all in one rendevouz handler Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] x86/microcode: Replace the all-in-one rendevous handler tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 25/30] x86/microcode: Rendezvous and load in NMI Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner [this message]
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 26/30] x86/microcode: Protect against instrumentation Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 27/30] x86/apic: Provide apic_force_nmi_on_cpu() Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 28/30] x86/microcode: Handle "offline" CPUs correctly Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-20 11:37   ` tip-bot2 for Thomas Gleixner
2023-10-24 13:20   ` tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 29/30] x86/microcode: Prepare for minimal revision check Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] " tip-bot2 for Thomas Gleixner
2023-10-02 12:00 ` [patch V4 30/30] x86/microcode/intel: Add a minimum required revision for late-loads Thomas Gleixner
2023-10-09 12:29   ` [tip: x86/microcode] x86/microcode/intel: Add a minimum required revision for late loading tip-bot2 for Ashok Raj
2023-10-20 11:37   ` tip-bot2 for Ashok Raj
2023-10-24 13:20   ` tip-bot2 for Ashok Raj
2023-10-08  8:54 ` [patch V4 00/30] x86/microcode: Cleanup and late loading enhancements Qiuxu Zhuo
2023-10-08 13:08   ` Borislav Petkov
2023-10-09  5:03     ` Zhuo, Qiuxu
2023-10-10  8:00     ` Zhuo, Qiuxu
2023-10-10  8:11       ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=169780186816.3135.11385598800295455799.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=bp@alien8.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.