All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: speck@linutronix.de
Subject: [patch 06/15] SSB updates V17 6
Date: Wed, 16 May 2018 15:51:38 +0200	[thread overview]
Message-ID: <20180516135209.810720423@linutronix.de> (raw)
In-Reply-To: 20180516135132.687640705@linutronix.de

Subject: [patch 06/15] x86/speculation: Handle HT correctly on AMD
From: Thomas Gleixner <tglx@linutronix.de>

The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
hyperthreading is enabled the SSBD bit toggle needs to take both cores into
account. Otherwise the following situation can happen:

CPU0		CPU1

disable SSB
		disable SSB
		enable  SSB <- Enables it for the Core, i.e. for CPU0 as well

So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
again.

On Intel the SSBD control is per core as well, but the synchronization
logic is implemented behind the per thread SPEC_CTRL MSR. It works like
this:

  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL

i.e. if one of the threads enables a mitigation then this affects both and
the mitigation is only disabled in the core when both threads disabled it.

Add the necessary synchronization logic for AMD family 17H. Unfortunately
that requires a spinlock to serialize the access to the MSR, but the locks
are only shared between siblings.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
V2: Amended changelog and clarified code comments.
---
 arch/x86/include/asm/spec-ctrl.h |    6 +
 arch/x86/kernel/process.c        |  125 +++++++++++++++++++++++++++++++++++++--
 arch/x86/kernel/smpboot.c        |    5 +
 3 files changed, 130 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg
 	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
 }
 
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
 extern void speculative_store_bypass_update(void);
 
 #endif
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -279,22 +279,135 @@ static inline void switch_to_bitmap(stru
 	}
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+	struct ssb_state	*shared_state;
+	raw_spinlock_t		lock;
+	unsigned int		disable_state;
+	unsigned long		local_state;
+};
+
+#define LSTATE_SSB	0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	st->local_state = 0;
+
+	/*
+	 * Shared state setup happens once on the first bringup
+	 * of the CPU. It's not destroyed on CPU hotunplug.
+	 */
+	if (st->shared_state)
+		return;
+
+	raw_spin_lock_init(&st->lock);
+
+	/*
+	 * Go over HT siblings and check whether one of them has set up the
+	 * shared state pointer already.
+	 */
+	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+		if (cpu == this_cpu)
+			continue;
+
+		if (!per_cpu(ssb_state, cpu).shared_state)
+			continue;
+
+		/* Link it to the state of the sibling: */
+		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+		return;
+	}
+
+	/*
+	 * First HT sibling to come up on the core.  Link shared state of
+	 * the first HT sibling to itself. The siblings on the same core
+	 * which come up later will see the shared state pointer and link
+	 * themself to the state of this CPU.
+	 */
+	st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
 {
-	u64 msr;
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	u64 msr = x86_amd_ls_cfg_base;
 
-	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
-		msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+	if (!static_cpu_has(X86_FEATURE_ZEN)) {
+		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
 		wrmsrl(MSR_AMD64_LS_CFG, msr);
+		return;
+	}
+
+	if (tifn & _TIF_SSBD) {
+		/*
+		 * Since this can race with prctl(), block reentry on the
+		 * same CPU.
+		 */
+		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		msr |= x86_amd_ls_cfg_ssbd_mask;
+
+		raw_spin_lock(&st->shared_state->lock);
+		/* First sibling enables SSBD: */
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		st->shared_state->disable_state++;
+		raw_spin_unlock(&st->shared_state->lock);
 	} else {
-		msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
-		wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		raw_spin_lock(&st->shared_state->lock);
+		st->shared_state->disable_state--;
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		raw_spin_unlock(&st->shared_state->lock);
 	}
 }
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+	wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		amd_set_core_ssb_state(tifn);
+	else
+		intel_set_ssb_state(tifn);
+}
 
 void speculative_store_bypass_update(void)
 {
+	preempt_disable();
 	__speculative_store_bypass_update(current_thread_info()->flags);
+	preempt_enable();
 }
 
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -79,6 +79,7 @@
 #include <asm/qspinlock.h>
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -244,6 +245,8 @@ static void notrace start_secondary(void
 	 */
 	check_tsc_sync_target();
 
+	speculative_store_bypass_ht_init();
+
 	/*
 	 * Lock vector_lock, set CPU online and bring the vector
 	 * allocator online. Online must be set with vector_lock held
@@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsi
 	set_mtrr_aps_delayed_init();
 
 	smp_quirk_init_udelay();
+
+	speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)

  parent reply	other threads:[~2018-05-16 14:01 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
2018-05-16 13:51 ` [patch 01/15] SSB updates V17 1 Thomas Gleixner
2018-05-16 13:51 ` [patch 02/15] SSB updates V17 2 Thomas Gleixner
2018-05-16 14:29   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 03/15] SSB updates V17 3 Thomas Gleixner
2018-05-17  1:06   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 04/15] SSB updates V17 4 Thomas Gleixner
2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 05/15] SSB updates V17 5 Thomas Gleixner
2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` Thomas Gleixner [this message]
2018-05-17  1:28   ` [MODERATED] Re: [patch 06/15] SSB updates V17 6 Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 07/15] SSB updates V17 7 Thomas Gleixner
2018-05-17  1:29   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 08/15] SSB updates V17 8 Thomas Gleixner
2018-05-16 21:13   ` [MODERATED] " Tom Lendacky
2018-05-17  2:56     ` Konrad Rzeszutek Wilk
2018-05-17 16:13       ` Tom Lendacky
2018-05-17 16:17         ` Paolo Bonzini
2018-05-17 16:23           ` Konrad Rzeszutek Wilk
2018-05-17 21:25           ` Tom Lendacky
2018-05-17 16:18         ` Tom Lendacky
2018-05-16 13:51 ` [patch 09/15] SSB updates V17 9 Thomas Gleixner
2018-05-17  1:40   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 10/15] SSB updates V17 10 Thomas Gleixner
2018-05-17  1:43   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 11/15] SSB updates V17 11 Thomas Gleixner
2018-05-17  1:45   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 12/15] SSB updates V17 12 Thomas Gleixner
2018-05-16 13:51 ` [patch 13/15] SSB updates V17 13 Thomas Gleixner
2018-05-17  2:08   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-17  8:45     ` Thomas Gleixner
2018-05-16 13:51 ` [patch 14/15] SSB updates V17 14 Thomas Gleixner
2018-05-16 16:34   ` [MODERATED] " Tom Lendacky
2018-05-16 21:26     ` Thomas Gleixner
2018-05-16 13:51 ` [patch 15/15] SSB updates V17 15 Thomas Gleixner
2018-05-17  2:18   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-17 12:42     ` Paolo Bonzini
2018-05-17 15:09       ` Thomas Gleixner
2018-05-16 14:09 ` [patch 00/15] SSB updates V17 0 Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180516135209.810720423@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=speck@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.