All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch 00/15] SSB updates V17 0
@ 2018-05-16 13:51 Thomas Gleixner
  2018-05-16 13:51 ` [patch 01/15] SSB updates V17 1 Thomas Gleixner
                   ` (15 more replies)
  0 siblings, 16 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

This is an update to the previous 'SSB updates V16' series which addresses
various review comments.

Delta patch below. Git bundle comes in follow up mail.

Thanks,

	tglx

8<-----------------

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ae42e30e7b41..e15c27f5540c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -140,8 +140,8 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
-	u64 hostssbd = ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
-	u64 msr, guest, host = x86_spec_ctrl_base;
+	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+	struct thread_info *ti = current_thread_info();
 
 	/* Is MSR_SPEC_CTRL implemented ? */
 	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
@@ -150,39 +150,45 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 		 * modifiable bits in the host base value and or the
 		 * modifiable bits from the guest value.
 		 */
-		guest = host & ~x86_spec_ctrl_mask;
-		guest |= guest_spec_ctrl & x86_spec_ctrl_mask;
+		guestval = hostval & ~x86_spec_ctrl_mask;
+		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
 		/* SSBD controlled in MSR_SPEC_CTRL */
 		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
-			host |= hostssbd;
+			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
-		if (host != guest) {
-			msr = setguest ? guest : host;
-			wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+		if (hostval != guest_spec_ctrl) {
+			msrval = setguest ? guest_spec_ctrl : hostval;
+			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 		}
 	}
 
 	/*
-	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD update
+	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
 	 */
 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
 		return;
 
-	/* If host has SSBD disabled via command line, force it */
+	/*
+	 * If the host has SSBD mitigation enabled, force it in the host's
+	 * virtual MSR value. If its not permanently enabled, evaluate
+	 * current's TIF_SSBD thread flag.
+	 */
 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
-		hostssbd |= SPEC_CTRL_SSBD;
+		hostsval = SPEC_CTRL_SSBD;
+	else
+		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
 
 	/* Sanitize the guest value */
-	guest = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
 
-	if (hostssbd != guest) {
+	if (hostval != guestval) {
 		unsigned long tif;
 
-		tif = setguest ? ssbd_spec_ctrl_to_tif(guest) :
-				 ssbd_spec_ctrl_to_tif(hostssbd);
+		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+				 ssbd_spec_ctrl_to_tif(hostval);
 
 		speculative_store_bypass_update(tif);
 	}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dd37244c587a..577e7f7ae273 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -191,6 +191,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
+		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 	}
 
 	/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 80ef41a2097f..30ca2d1a9231 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -299,12 +299,20 @@ void speculative_store_bypass_ht_init(void)
 	unsigned int cpu;
 
 	st->local_state = 0;
+
+	/*
+	 * Shared state setup happens once on the first bringup
+	 * of the CPU. It's not destroyed on CPU hotunplug.
+	 */
 	if (st->shared_state)
 		return;
 
 	raw_spin_lock_init(&st->lock);
 
-	/* Go over HT siblings: */
+	/*
+	 * Go over HT siblings and check whether one of them has set up the
+	 * shared state pointer already.
+	 */
 	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
 		if (cpu == this_cpu)
 			continue;
@@ -316,13 +324,22 @@ void speculative_store_bypass_ht_init(void)
 		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
 		return;
 	}
-	/* Link shared state of the first HT sibling to itself. */
+
+	/*
+	 * First HT sibling to come up on the core.  Link shared state of
+	 * the first HT sibling to itself. The siblings on the same core
+	 * which come up later will see the shared state pointer and link
+	 * themself to the state of this CPU.
+	 */
 	st->shared_state = st;
 }
 
 /*
- * Logic is: first HT sibling enables SSBD for both siblings in the core and
- * last sibling to disable it, disables it for the whole core.
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
  */
 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
 {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c23d2bb0a8bf..47b1c94e035b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -214,9 +214,9 @@ struct vcpu_svm {
 
 	u64 spec_ctrl;
 	/*
-	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which
-	 * will be translated into the appropriate bits to perform
-	 * speculative control.
+	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+	 * translated into the appropriate L2_CFG bits on the host to
+	 * perform speculative control.
 	 */
 	u64 virt_spec_ctrl;
 

^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [patch 01/15] SSB updates V17 1
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-16 13:51 ` [patch 02/15] SSB updates V17 2 Thomas Gleixner
                   ` (14 subsequent siblings)
  15 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 01/15] KVM: SVM: Move spec control call after restore of GS
From: Thomas Gleixner <tglx@linutronix.de>

svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
to determine the host SSBD state of the thread. 'current' is GS based, but
host GS is not yet restored and the access causes a triple fault.

Move the call after the host GS restore.

Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
---
V2: Corrected 'Fixes:' tag
---
 arch/x86/kvm/svm.c |   24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5651,6 +5651,18 @@ static void svm_vcpu_run(struct kvm_vcpu
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+	loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+	loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
 	/*
 	 * We do not use IBRS in the kernel. If this vCPU has used the
 	 * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5671,18 +5683,6 @@ static void svm_vcpu_run(struct kvm_vcpu
 
 	x86_spec_ctrl_restore_host(svm->spec_ctrl);
 
-	/* Eliminate branch target predictions from guest mode */
-	vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
-	loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
-	loadsegment(gs, svm->host.gs);
-#endif
-#endif
-
 	reload_tss(vcpu);
 
 	local_irq_disable();

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 02/15] SSB updates V17 2
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
  2018-05-16 13:51 ` [patch 01/15] SSB updates V17 1 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-16 14:29   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 03/15] SSB updates V17 3 Thomas Gleixner
                   ` (13 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 02/15] x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
From: Borislav Petkov <bp@suse.de>

Intel and AMD have different CPUID bits hence for those use synthetic bits
which get set on the respective vendor's in init_speculation_control(). So
that debacles like what he commit message of

  c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")

talks about don't happen anymore.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Jörg Otte <jrg.otte@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic
---
V2: Amended changelog
---
 arch/x86/include/asm/cpufeatures.h |   10 ++++++----
 arch/x86/kernel/cpu/common.c       |   14 ++++++++++----
 arch/x86/kvm/cpuid.c               |   10 +++++-----
 arch/x86/kvm/svm.c                 |    6 +++---
 arch/x86/kvm/vmx.c                 |    9 ++-------
 5 files changed, 26 insertions(+), 23 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -198,7 +198,6 @@
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
@@ -216,6 +215,9 @@
 #define X86_FEATURE_USE_IBRS_FW		( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE	( 7*32+23) /* "" Disable Speculative Store Bypass. */
 #define X86_FEATURE_AMD_SSBD		( 7*32+24)  /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS		( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB		( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
@@ -276,9 +278,9 @@
 #define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */
-#define X86_FEATURE_IBPB		(13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_IBRS		(13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_STIBP		(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB		(13*32+12) /* "" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS		(13*32+14) /* "" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP		(13*32+15) /* "" Single Thread Indirect Branch Predictors */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -757,17 +757,23 @@ static void init_speculation_control(str
 	 * and they also have a different bit for STIBP support. Also,
 	 * a hypervisor might have set the individual AMD bits even on
 	 * Intel CPUs, for finer-grained selection of what's available.
-	 *
-	 * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
-	 * features, which are visible in /proc/cpuinfo and used by the
-	 * kernel. So set those accordingly from the Intel bits.
 	 */
 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
 		set_cpu_cap(c, X86_FEATURE_IBPB);
 	}
+
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBRS))
+		set_cpu_cap(c, X86_FEATURE_IBRS);
+
+	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+		set_cpu_cap(c, X86_FEATURE_IBPB);
+
+	if (cpu_has(c, X86_FEATURE_AMD_STIBP))
+		set_cpu_cap(c, X86_FEATURE_STIBP);
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct
 
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-		F(IBPB) | F(IBRS);
+		F(AMD_IBPB) | F(AMD_IBRS);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -648,10 +648,10 @@ static inline int __do_cpuid_ent(struct
 		entry->eax = g_phys_as | (virt_as << 8);
 		entry->edx = 0;
 		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
-		if (boot_cpu_has(X86_FEATURE_IBPB))
-			entry->ebx |= F(IBPB);
-		if (boot_cpu_has(X86_FEATURE_IBRS))
-			entry->ebx |= F(IBRS);
+		if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+			entry->ebx |= F(AMD_IBPB);
+		if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+			entry->ebx |= F(AMD_IBRS);
 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
 		break;
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4108,7 +4108,7 @@ static int svm_get_msr(struct kvm_vcpu *
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
 			return 1;
 
 		msr_info->data = svm->spec_ctrl;
@@ -4203,7 +4203,7 @@ static int svm_set_msr(struct kvm_vcpu *
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
@@ -4230,7 +4230,7 @@ static int svm_set_msr(struct kvm_vcpu *
 		break;
 	case MSR_IA32_PRED_CMD:
 		if (!msr->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
+		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
 			return 1;
 
 		if (data & ~PRED_CMD_IBPB)
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3523,9 +3523,7 @@ static int vmx_get_msr(struct kvm_vcpu *
 		return kvm_get_msr_common(vcpu, msr_info);
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
+		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
 			return 1;
 
 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -3643,9 +3641,7 @@ static int vmx_set_msr(struct kvm_vcpu *
 		break;
 	case MSR_IA32_SPEC_CTRL:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
+		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
@@ -3675,7 +3671,6 @@ static int vmx_set_msr(struct kvm_vcpu *
 		break;
 	case MSR_IA32_PRED_CMD:
 		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
 			return 1;
 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 03/15] SSB updates V17 3
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
  2018-05-16 13:51 ` [patch 01/15] SSB updates V17 1 Thomas Gleixner
  2018-05-16 13:51 ` [patch 02/15] SSB updates V17 2 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:06   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 04/15] SSB updates V17 4 Thomas Gleixner
                   ` (12 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 03/15] x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
From: Thomas Gleixner <tglx@linutronix.de>

The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
Intel and implied by IBRS or STIBP support on AMD. That's just confusing
and in case an AMD CPU has IBRS not supported because the underlying
problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
the thing falls apart.

Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
availability on both Intel and AMD.

While at it replace the boot_cpu_has() checks with static_cpu_has() where
possible. This prevents late microcode loading from exposing SPEC_CTRL, but
late loading is already very limited as it does not reevaluate the
mitigation options and other bits and pieces. Having static_cpu_has() is
the simplest and least fragile solution.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/cpufeatures.h |    1 +
 arch/x86/kernel/cpu/bugs.c         |   18 +++++++++++-------
 arch/x86/kernel/cpu/common.c       |    9 +++++++--
 arch/x86/kernel/cpu/intel.c        |    1 +
 4 files changed, 20 insertions(+), 9 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -206,6 +206,7 @@
 #define X86_FEATURE_RETPOLINE_AMD	( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
+#define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 
 #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* "" Fill RSB on context switches */
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -64,7 +64,7 @@ void __init check_bugs(void)
 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
 	 * init code as it is not enumerated and depends on the family.
 	 */
-	if (boot_cpu_has(X86_FEATURE_IBRS))
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
 	/* Select the proper spectre mitigation before patching alternatives */
@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
 {
 	u64 msrval = x86_spec_ctrl_base;
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
 		msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 	return msrval;
 }
@@ -155,10 +155,12 @@ void x86_spec_ctrl_set_guest(u64 guest_s
 {
 	u64 host = x86_spec_ctrl_base;
 
-	if (!boot_cpu_has(X86_FEATURE_IBRS))
+	/* Is MSR_SPEC_CTRL implemented ? */
+	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		return;
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+	/* Intel controls SSB in MSR_SPEC_CTRL */
+	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
 		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
 	if (host != guest_spec_ctrl)
@@ -170,10 +172,12 @@ void x86_spec_ctrl_restore_host(u64 gues
 {
 	u64 host = x86_spec_ctrl_base;
 
-	if (!boot_cpu_has(X86_FEATURE_IBRS))
+	/* Is MSR_SPEC_CTRL implemented ? */
+	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		return;
 
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+	/* Intel controls SSB in MSR_SPEC_CTRL */
+	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
 		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
 	if (host != guest_spec_ctrl)
@@ -631,7 +635,7 @@ int arch_prctl_spec_ctrl_get(struct task
 
 void x86_spec_ctrl_setup_ap(void)
 {
-	if (boot_cpu_has(X86_FEATURE_IBRS))
+	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
 
 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -761,19 +761,24 @@ static void init_speculation_control(str
 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
 		set_cpu_cap(c, X86_FEATURE_IBPB);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 	}
 
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
 
-	if (cpu_has(c, X86_FEATURE_AMD_IBRS))
+	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
 
 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
 		set_cpu_cap(c, X86_FEATURE_IBPB);
 
-	if (cpu_has(c, X86_FEATURE_AMD_STIBP))
+	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
 		set_cpu_cap(c, X86_FEATURE_STIBP);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+	}
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -188,6 +188,7 @@ static void early_init_intel(struct cpui
 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
 	}

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 04/15] SSB updates V17 4
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (2 preceding siblings ...)
  2018-05-16 13:51 ` [patch 03/15] SSB updates V17 3 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 05/15] SSB updates V17 5 Thomas Gleixner
                   ` (11 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 04/15] x86/cpufeatures: Disentangle SSBD enumeration
From: Thomas Gleixner <tglx@linutronix.de>

The SSBD enumeration is similarly to the other bits magically shared
between Intel and AMD though the mechanisms are different.

Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
features or family dependent setup.

Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
controlled via MSR_SPEC_CTRL and fix up the usage sites.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
V2: Clear X86_FEATURE_SPEC_CTRL_SSBD when buggy microcode is detected
---
 arch/x86/include/asm/cpufeatures.h |    7 +++----
 arch/x86/kernel/cpu/amd.c          |    7 +------
 arch/x86/kernel/cpu/bugs.c         |   10 +++++-----
 arch/x86/kernel/cpu/common.c       |    3 +++
 arch/x86/kernel/cpu/intel.c        |    1 +
 arch/x86/kernel/process.c          |    2 +-
 6 files changed, 14 insertions(+), 16 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -207,15 +207,14 @@
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
-
+#define X86_FEATURE_SSBD		( 7*32+17) /* Speculative Store Bypass Disable */
 #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* "" Fill RSB on context switches */
 #define X86_FEATURE_SEV			( 7*32+20) /* AMD Secure Encrypted Virtualization */
-
 #define X86_FEATURE_USE_IBPB		( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW		( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE	( 7*32+23) /* "" Disable Speculative Store Bypass. */
-#define X86_FEATURE_AMD_SSBD		( 7*32+24)  /* "" AMD SSBD implementation */
+#define X86_FEATURE_LS_CFG_SSBD		( 7*32+24)  /* "" AMD SSBD implementation via LS_CFG MSR */
 #define X86_FEATURE_IBRS		( 7*32+25) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB		( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
@@ -339,7 +338,7 @@
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
-#define X86_FEATURE_SSBD		(18*32+31) /* Speculative Store Bypass Disable */
+#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
 /*
  * BUG word(s)
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -570,8 +570,8 @@ static void bsp_init_amd(struct cpuinfo_
 		 * avoid RMW. If that faults, do not enable SSBD.
 		 */
 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 			setup_force_cpu_cap(X86_FEATURE_SSBD);
-			setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
 			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 		}
 	}
@@ -919,11 +919,6 @@ static void init_amd(struct cpuinfo_x86
 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
 	if (!cpu_has(c, X86_FEATURE_XENPV))
 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
-
-	if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
-		set_cpu_cap(c, X86_FEATURE_SSBD);
-		set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
-	}
 }
 
 #ifdef CONFIG_X86_32
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -159,8 +159,8 @@ void x86_spec_ctrl_set_guest(u64 guest_s
 	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		return;
 
-	/* Intel controls SSB in MSR_SPEC_CTRL */
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+	/* SSBD controlled in MSR_SPEC_CTRL */
+	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
 		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
 	if (host != guest_spec_ctrl)
@@ -176,8 +176,8 @@ void x86_spec_ctrl_restore_host(u64 gues
 	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		return;
 
-	/* Intel controls SSB in MSR_SPEC_CTRL */
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+	/* SSBD controlled in MSR_SPEC_CTRL */
+	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
 		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
 	if (host != guest_spec_ctrl)
@@ -189,7 +189,7 @@ static void x86_amd_ssb_disable(void)
 {
 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 
-	if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -767,6 +767,9 @@ static void init_speculation_control(str
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
 
+	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+		set_cpu_cap(c, X86_FEATURE_SSBD);
+
 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
 		set_cpu_cap(c, X86_FEATURE_IBRS);
 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -191,6 +191,7 @@ static void early_init_intel(struct cpui
 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
+		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 	}
 
 	/*
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -283,7 +283,7 @@ static __always_inline void __speculativ
 {
 	u64 msr;
 
-	if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
 		msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
 		wrmsrl(MSR_AMD64_LS_CFG, msr);
 	} else {

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 05/15] SSB updates V17 5
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (3 preceding siblings ...)
  2018-05-16 13:51 ` [patch 04/15] SSB updates V17 4 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 06/15] SSB updates V17 6 Thomas Gleixner
                   ` (10 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 05/15] x86/cpufeatures: Add FEATURE_ZEN
From: Thomas Gleixner <tglx@linutronix.de>

Add a ZEN feature bit so family-dependent static_cpu_has() optimizations
can be built for ZEN.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/cpufeatures.h |    1 +
 arch/x86/kernel/cpu/amd.c          |    1 +
 2 files changed, 2 insertions(+)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -218,6 +218,7 @@
 #define X86_FEATURE_IBRS		( 7*32+25) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB		( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN			( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -812,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
+	set_cpu_cap(c, X86_FEATURE_ZEN);
 	/*
 	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
 	 * all up to and including B1.

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 06/15] SSB updates V17 6
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (4 preceding siblings ...)
  2018-05-16 13:51 ` [patch 05/15] SSB updates V17 5 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:28   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 07/15] SSB updates V17 7 Thomas Gleixner
                   ` (9 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 06/15] x86/speculation: Handle HT correctly on AMD
From: Thomas Gleixner <tglx@linutronix.de>

The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
hyperthreading is enabled the SSBD bit toggle needs to take both cores into
account. Otherwise the following situation can happen:

CPU0		CPU1

disable SSB
		disable SSB
		enable  SSB <- Enables it for the Core, i.e. for CPU0 as well

So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
again.

On Intel the SSBD control is per core as well, but the synchronization
logic is implemented behind the per thread SPEC_CTRL MSR. It works like
this:

  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL

i.e. if one of the threads enables a mitigation then this affects both and
the mitigation is only disabled in the core when both threads disabled it.

Add the necessary synchronization logic for AMD family 17H. Unfortunately
that requires a spinlock to serialize the access to the MSR, but the locks
are only shared between siblings.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
V2: Amended changelog and clarified code comments.
---
 arch/x86/include/asm/spec-ctrl.h |    6 +
 arch/x86/kernel/process.c        |  125 +++++++++++++++++++++++++++++++++++++--
 arch/x86/kernel/smpboot.c        |    5 +
 3 files changed, 130 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg
 	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
 }
 
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
 extern void speculative_store_bypass_update(void);
 
 #endif
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -279,22 +279,135 @@ static inline void switch_to_bitmap(stru
 	}
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+	struct ssb_state	*shared_state;
+	raw_spinlock_t		lock;
+	unsigned int		disable_state;
+	unsigned long		local_state;
+};
+
+#define LSTATE_SSB	0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	unsigned int this_cpu = smp_processor_id();
+	unsigned int cpu;
+
+	st->local_state = 0;
+
+	/*
+	 * Shared state setup happens once on the first bringup
+	 * of the CPU. It's not destroyed on CPU hotunplug.
+	 */
+	if (st->shared_state)
+		return;
+
+	raw_spin_lock_init(&st->lock);
+
+	/*
+	 * Go over HT siblings and check whether one of them has set up the
+	 * shared state pointer already.
+	 */
+	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+		if (cpu == this_cpu)
+			continue;
+
+		if (!per_cpu(ssb_state, cpu).shared_state)
+			continue;
+
+		/* Link it to the state of the sibling: */
+		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+		return;
+	}
+
+	/*
+	 * First HT sibling to come up on the core.  Link shared state of
+	 * the first HT sibling to itself. The siblings on the same core
+	 * which come up later will see the shared state pointer and link
+	 * themself to the state of this CPU.
+	 */
+	st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
 {
-	u64 msr;
+	struct ssb_state *st = this_cpu_ptr(&ssb_state);
+	u64 msr = x86_amd_ls_cfg_base;
 
-	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
-		msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+	if (!static_cpu_has(X86_FEATURE_ZEN)) {
+		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
 		wrmsrl(MSR_AMD64_LS_CFG, msr);
+		return;
+	}
+
+	if (tifn & _TIF_SSBD) {
+		/*
+		 * Since this can race with prctl(), block reentry on the
+		 * same CPU.
+		 */
+		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		msr |= x86_amd_ls_cfg_ssbd_mask;
+
+		raw_spin_lock(&st->shared_state->lock);
+		/* First sibling enables SSBD: */
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		st->shared_state->disable_state++;
+		raw_spin_unlock(&st->shared_state->lock);
 	} else {
-		msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
-		wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+			return;
+
+		raw_spin_lock(&st->shared_state->lock);
+		st->shared_state->disable_state--;
+		if (!st->shared_state->disable_state)
+			wrmsrl(MSR_AMD64_LS_CFG, msr);
+		raw_spin_unlock(&st->shared_state->lock);
 	}
 }
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+	wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		amd_set_core_ssb_state(tifn);
+	else
+		intel_set_ssb_state(tifn);
+}
 
 void speculative_store_bypass_update(void)
 {
+	preempt_disable();
 	__speculative_store_bypass_update(current_thread_info()->flags);
+	preempt_enable();
 }
 
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -79,6 +79,7 @@
 #include <asm/qspinlock.h>
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -244,6 +245,8 @@ static void notrace start_secondary(void
 	 */
 	check_tsc_sync_target();
 
+	speculative_store_bypass_ht_init();
+
 	/*
 	 * Lock vector_lock, set CPU online and bring the vector
 	 * allocator online. Online must be set with vector_lock held
@@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsi
 	set_mtrr_aps_delayed_init();
 
 	smp_quirk_init_udelay();
+
+	speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 07/15] SSB updates V17 7
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (5 preceding siblings ...)
  2018-05-16 13:51 ` [patch 06/15] SSB updates V17 6 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:29   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 08/15] SSB updates V17 8 Thomas Gleixner
                   ` (8 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 07/15] x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
From: Thomas Gleixner <tglx@linutronix.de>

AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
about the bit position of the SSBD bit and thus facilitate migration.
Also, the sibling coordination on Family 17H CPUs can only be done on
the host.

Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
extra argument for the VIRT_SPEC_CTRL MSR.

Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
data structure which is going to be used in later patches for the actual
implementation.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/spec-ctrl.h |    9 ++++++---
 arch/x86/kernel/cpu/bugs.c       |   20 ++++++++++++++++++--
 arch/x86/kvm/svm.c               |   11 +++++++++--
 arch/x86/kvm/vmx.c               |    4 ++--
 4 files changed, 35 insertions(+), 9 deletions(-)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -10,10 +10,13 @@
  * the guest has, while on VMEXIT we restore the host view. This
  * would be easier if SPEC_CTRL were architecturally maskable or
  * shadowable for guests but this is not (currently) the case.
- * Takes the guest view of SPEC_CTRL MSR as a parameter.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
  */
-extern void x86_spec_ctrl_set_guest(u64);
-extern void x86_spec_ctrl_restore_host(u64);
+extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
+				    u64 guest_virt_spec_ctrl);
+extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
+				       u64 guest_virt_spec_ctrl);
 
 /* AMD specific Speculative Store Bypass MSR data */
 extern u64 x86_amd_ls_cfg_base;
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -151,7 +151,15 @@ u64 x86_spec_ctrl_get_default(void)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
 {
 	u64 host = x86_spec_ctrl_base;
 
@@ -168,7 +176,15 @@ void x86_spec_ctrl_set_guest(u64 guest_s
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
 
-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
 {
 	u64 host = x86_spec_ctrl_base;
 
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -213,6 +213,12 @@ struct vcpu_svm {
 	} host;
 
 	u64 spec_ctrl;
+	/*
+	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+	 * translated into the appropriate L2_CFG bits on the host to
+	 * perform speculative control.
+	 */
+	u64 virt_spec_ctrl;
 
 	u32 *msrpm;
 
@@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vc
 
 	vcpu->arch.microcode_version = 0x01000065;
 	svm->spec_ctrl = 0;
+	svm->virt_spec_ctrl = 0;
 
 	if (!init_event) {
 		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -5557,7 +5564,7 @@ static void svm_vcpu_run(struct kvm_vcpu
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	x86_spec_ctrl_set_guest(svm->spec_ctrl);
+	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5681,7 +5688,7 @@ static void svm_vcpu_run(struct kvm_vcpu
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	x86_spec_ctrl_restore_host(svm->spec_ctrl);
+	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
 	reload_tss(vcpu);
 
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9717,7 +9717,7 @@ static void __noclone vmx_vcpu_run(struc
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	x86_spec_ctrl_set_guest(vmx->spec_ctrl);
+	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 
@@ -9865,7 +9865,7 @@ static void __noclone vmx_vcpu_run(struc
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	x86_spec_ctrl_restore_host(vmx->spec_ctrl);
+	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 08/15] SSB updates V17 8
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (6 preceding siblings ...)
  2018-05-16 13:51 ` [patch 07/15] SSB updates V17 7 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-16 21:13   ` [MODERATED] " Tom Lendacky
  2018-05-16 13:51 ` [patch 09/15] SSB updates V17 9 Thomas Gleixner
                   ` (7 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 08/15] x86/speculation: Add virtualized speculative store bypass disable support
From: Tom Lendacky <thomas.lendacky@amd.com>

Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD).  To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.

Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/cpufeatures.h |    1 +
 arch/x86/include/asm/msr-index.h   |    2 ++
 arch/x86/kernel/cpu/bugs.c         |    4 +++-
 arch/x86/kernel/process.c          |   13 ++++++++++++-
 4 files changed, 18 insertions(+), 2 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -282,6 +282,7 @@
 #define X86_FEATURE_AMD_IBPB		(13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS		(13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP		(13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD		(13*32+25) /* Virtualized Speculative Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -347,6 +347,8 @@
 #define MSR_AMD64_SEV_ENABLED_BIT	0
 #define MSR_AMD64_SEV_ENABLED		BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
 
+#define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF			0xc00000e9
 
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -205,7 +205,9 @@ static void x86_amd_ssb_disable(void)
 {
 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 
-	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -388,6 +388,15 @@ static __always_inline void amd_set_core
 }
 #endif
 
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+	/*
+	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+	 * so ssbd_tif_to_spec_ctrl() just works.
+	 */
+	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
 static __always_inline void intel_set_ssb_state(unsigned long tifn)
 {
 	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
@@ -397,7 +406,9 @@ static __always_inline void intel_set_ss
 
 static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
 {
-	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		amd_set_ssb_virt_state(tifn);
+	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
 		amd_set_core_ssb_state(tifn);
 	else
 		intel_set_ssb_state(tifn);

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 09/15] SSB updates V17 9
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (7 preceding siblings ...)
  2018-05-16 13:51 ` [patch 08/15] SSB updates V17 8 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:40   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 10/15] SSB updates V17 10 Thomas Gleixner
                   ` (6 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 09/15] x86/speculation: Rework speculative_store_bypass_update()
From: Thomas Gleixner <tglx@linutronix.de>

The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse
speculative_store_bypass_update() to avoid code duplication. Add an
argument for supplying a thread info (TIF) value and create a wrapper
speculative_store_bypass_update_current() which is used at the existing
call site.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
 arch/x86/include/asm/spec-ctrl.h |    7 ++++++-
 arch/x86/kernel/cpu/bugs.c       |    2 +-
 arch/x86/kernel/process.c        |    4 ++--
 3 files changed, 9 insertions(+), 4 deletions(-)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -42,6 +42,11 @@ extern void speculative_store_bypass_ht_
 static inline void speculative_store_bypass_ht_init(void) { }
 #endif
 
-extern void speculative_store_bypass_update(void);
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+	speculative_store_bypass_update(current_thread_info()->flags);
+}
 
 #endif
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -598,7 +598,7 @@ static int ssb_prctl_set(struct task_str
 	 * mitigation until it is next scheduled.
 	 */
 	if (task == current && update)
-		speculative_store_bypass_update();
+		speculative_store_bypass_update_current();
 
 	return 0;
 }
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -392,10 +392,10 @@ static __always_inline void __speculativ
 		intel_set_ssb_state(tifn);
 }
 
-void speculative_store_bypass_update(void)
+void speculative_store_bypass_update(unsigned long tif)
 {
 	preempt_disable();
-	__speculative_store_bypass_update(current_thread_info()->flags);
+	__speculative_store_bypass_update(tif);
 	preempt_enable();
 }
 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 10/15] SSB updates V17 10
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (8 preceding siblings ...)
  2018-05-16 13:51 ` [patch 09/15] SSB updates V17 9 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:43   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 11/15] SSB updates V17 11 Thomas Gleixner
                   ` (5 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

From: Borislav Petkov <bp@suse.de>
Subject: [patch 10/15] x86/bugs: Unify  x86_spec_ctrl_{set_guest,restore_host}

Function bodies are very similar and are going to grow more almost
identical code. Add a bool arg to determine whether SPEC_CTRL is being set
for the guest or restored to the host.

No functional changes.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: Hide ssbd_tif_to_spec_ctrl() evaluation
---
 arch/x86/include/asm/spec-ctrl.h |   33 ++++++++++++++++++---
 arch/x86/kernel/cpu/bugs.c       |   60 +++++++++------------------------------
 2 files changed, 44 insertions(+), 49 deletions(-)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -13,10 +13,35 @@
  * Takes the guest view of SPEC_CTRL MSR as a parameter and also
  * the guest's version of VIRT_SPEC_CTRL, if emulated.
  */
-extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
-				    u64 guest_virt_spec_ctrl);
-extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
-				       u64 guest_virt_spec_ctrl);
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *				(may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
 
 /* AMD specific Speculative Store Bypass MSR data */
 extern u64 x86_amd_ls_cfg_base;
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -151,55 +151,25 @@ u64 x86_spec_ctrl_get_default(void)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
-/**
- * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
- * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
- * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
- *				(may get translated to MSR_AMD64_LS_CFG bits)
- *
- * Avoids writing to the MSR if the content/bits are the same
- */
-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
 {
-	u64 host = x86_spec_ctrl_base;
+	struct thread_info *ti = current_thread_info();
+	u64 msr, host = x86_spec_ctrl_base;
 
 	/* Is MSR_SPEC_CTRL implemented ? */
-	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
-		return;
-
-	/* SSBD controlled in MSR_SPEC_CTRL */
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
-		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
-
-	if (host != guest_spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
-}
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
-
-/**
- * x86_spec_ctrl_restore_host - Restore host speculation control registers
- * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
- * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
- *				(may get translated to MSR_AMD64_LS_CFG bits)
- *
- * Avoids writing to the MSR if the content/bits are the same
- */
-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
-{
-	u64 host = x86_spec_ctrl_base;
-
-	/* Is MSR_SPEC_CTRL implemented ? */
-	if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
-		return;
-
-	/* SSBD controlled in MSR_SPEC_CTRL */
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
-		host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
-
-	if (host != guest_spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, host);
+	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+		/* SSBD controlled in MSR_SPEC_CTRL */
+		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+			host |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+		if (host != guest_spec_ctrl) {
+			msr = guest ? guest_spec_ctrl : host;
+			wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+		}
+	}
 }
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
 
 static void x86_amd_ssb_disable(void)
 {

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 11/15] SSB updates V17 11
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (9 preceding siblings ...)
  2018-05-16 13:51 ` [patch 10/15] SSB updates V17 10 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  1:45   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 12/15] SSB updates V17 12 Thomas Gleixner
                   ` (4 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 11/15] x86/bugs: Expose x86_spec_ctrl_base directly
From: Thomas Gleixner <tglx@linutronix.de>

x86_spec_ctrl_base is the system wide default value for MSR_SPEC_CTRL.
x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
prevent modification to that variable. Though the variable is read only
after init and globaly visible already.

Remove the function and export the variable instead.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
V2: Removed the hunk which inverted x86_spec_ctrl_mask
---
 arch/x86/include/asm/nospec-branch.h |   16 +++++-----------
 arch/x86/include/asm/spec-ctrl.h     |    3 ---
 arch/x86/kernel/cpu/bugs.c           |   11 +----------
 3 files changed, 6 insertions(+), 24 deletions(-)

--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
 	SPECTRE_V2_IBRS,
 };
 
-/*
- * The Intel specification for the SPEC_CTRL MSR requires that we
- * preserve any already set reserved bits at boot time (e.g. for
- * future additions that this kernel is not currently aware of).
- * We then set any additional mitigation bits that we want
- * ourselves and always use this as the base for SPEC_CTRL.
- * We also use this when handling guest entry/exit as below.
- */
 extern void x86_spec_ctrl_set(u64);
-extern u64 x86_spec_ctrl_get_default(void);
 
 /* The Speculative Store Bypass disable variants */
 enum ssb_mitigation {
@@ -278,6 +269,9 @@ static inline void indirect_branch_predi
 	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
  * before calling into firmware.
@@ -286,7 +280,7 @@ static inline void indirect_branch_predi
  */
 #define firmware_restrict_branch_speculation_start()			\
 do {									\
-	u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS;		\
+	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
 									\
 	preempt_disable();						\
 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
@@ -295,7 +289,7 @@ do {									\
 
 #define firmware_restrict_branch_speculation_end()			\
 do {									\
-	u64 val = x86_spec_ctrl_get_default();				\
+	u64 val = x86_spec_ctrl_base;					\
 									\
 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 gues
 extern u64 x86_amd_ls_cfg_base;
 extern u64 x86_amd_ls_cfg_ssbd_mask;
 
-/* The Intel SPEC CTRL MSR base value cache */
-extern u64 x86_spec_ctrl_base;
-
 static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
 {
 	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation
  * writes to SPEC_CTRL contain whatever reserved bits have been set.
  */
 u64 __ro_after_init x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
 
 /*
  * The vendor and possibly platform specific bits which can be modified in
@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val)
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
 
-u64 x86_spec_ctrl_get_default(void)
-{
-	u64 msrval = x86_spec_ctrl_base;
-
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
-		msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
-	return msrval;
-}
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
-
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
 {

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 12/15] SSB updates V17 12
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (10 preceding siblings ...)
  2018-05-16 13:51 ` [patch 11/15] SSB updates V17 11 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-16 13:51 ` [patch 13/15] SSB updates V17 13 Thomas Gleixner
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 12/15] x86/bugs: Remove x86_spec_ctrl_set()
From: Thomas Gleixner <tglx@linutronix.de>

x86_spec_ctrl_set() is only used in bugs.c and the extra mask checks there
provide no real value as both call sites can just write x86_spec_ctrl_base
to MSR_SPEC_CTRL. x86_spec_ctrl_base is valid and does not need any extra
masking or checking.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 arch/x86/include/asm/nospec-branch.h |    2 --
 arch/x86/kernel/cpu/bugs.c           |   13 ++-----------
 2 files changed, 2 insertions(+), 13 deletions(-)

--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,8 +217,6 @@ enum spectre_v2_mitigation {
 	SPECTRE_V2_IBRS,
 };
 
-extern void x86_spec_ctrl_set(u64);
-
 /* The Speculative Store Bypass disable variants */
 enum ssb_mitigation {
 	SPEC_STORE_BYPASS_NONE,
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -133,15 +133,6 @@ static const char *spectre_v2_strings[]
 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 	SPECTRE_V2_NONE;
 
-void x86_spec_ctrl_set(u64 val)
-{
-	if (val & x86_spec_ctrl_mask)
-		WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
-	else
-		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
-}
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
-
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
 {
@@ -503,7 +494,7 @@ static enum ssb_mitigation __init __ssb_
 		case X86_VENDOR_INTEL:
 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
 			x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
-			x86_spec_ctrl_set(SPEC_CTRL_SSBD);
+			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 			break;
 		case X86_VENDOR_AMD:
 			x86_amd_ssb_disable();
@@ -615,7 +606,7 @@ int arch_prctl_spec_ctrl_get(struct task
 void x86_spec_ctrl_setup_ap(void)
 {
 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
-		x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
 		x86_amd_ssb_disable();

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 13/15] SSB updates V17 13
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (11 preceding siblings ...)
  2018-05-16 13:51 ` [patch 12/15] SSB updates V17 12 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  2:08   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 13:51 ` [patch 14/15] SSB updates V17 14 Thomas Gleixner
                   ` (2 subsequent siblings)
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 13/15] x86/bugs: Rework spec_ctrl base and mask logic
From: Thomas Gleixner <tglx@linutronix.de>

x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
which are not to be modified. However the implementation is not really used
and the bitmask is inverted for no real reason. Aside of that it is missing
the STIBP bit if it is supported by the platform, so if the mask would be
used in x86_virt_spec_ctrl() then it would prevent a guest from setting
STIBP.

Add the STIBP bit if supported and use the mask in x86_spec_ctrl_set_guest()
to sanitize the value which is supplied by the guest.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
---
V2: Moved the stray x86_spec_ctrl_base inversion hunk from patch 11/15
    Renamed some variables so it's more clear that they are MSR values
    and not MSR indices.
---
 arch/x86/kernel/cpu/bugs.c |   28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -42,7 +42,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  * The vendor and possibly platform specific bits which can be modified in
  * x86_spec_ctrl_base.
  */
-static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
@@ -68,6 +68,10 @@ void __init check_bugs(void)
 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
+	/* Allow STIBP in MSR_SPEC_CTRL if supported */
+	if (boot_cpu_has(X86_FEATURE_STIBP))
+		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
 	/* Select the proper spectre mitigation before patching alternatives */
 	spectre_v2_select_mitigation();
 
@@ -134,20 +138,28 @@ static enum spectre_v2_mitigation spectr
 	SPECTRE_V2_NONE;
 
 void
-x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
+	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
 	struct thread_info *ti = current_thread_info();
-	u64 msr, host = x86_spec_ctrl_base;
 
 	/* Is MSR_SPEC_CTRL implemented ? */
 	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+		/*
+		 * Restrict guest_spec_ctrl to supported values. Clear the
+		 * modifiable bits in the host base value and or the
+		 * modifiable bits from the guest value.
+		 */
+		guestval = hostval & ~x86_spec_ctrl_mask;
+		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
 		/* SSBD controlled in MSR_SPEC_CTRL */
 		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
-			host |= ssbd_tif_to_spec_ctrl(ti->flags);
+			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
-		if (host != guest_spec_ctrl) {
-			msr = guest ? guest_spec_ctrl : host;
-			wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+		if (hostval != guest_spec_ctrl) {
+			msrval = setguest ? guest_spec_ctrl : hostval;
+			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 		}
 	}
 }
@@ -493,7 +505,7 @@ static enum ssb_mitigation __init __ssb_
 		switch (boot_cpu_data.x86_vendor) {
 		case X86_VENDOR_INTEL:
 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
-			x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
 			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 			break;
 		case X86_VENDOR_AMD:

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 14/15] SSB updates V17 14
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (12 preceding siblings ...)
  2018-05-16 13:51 ` [patch 13/15] SSB updates V17 13 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-16 16:34   ` [MODERATED] " Tom Lendacky
  2018-05-16 13:51 ` [patch 15/15] SSB updates V17 15 Thomas Gleixner
  2018-05-16 14:09 ` [patch 00/15] SSB updates V17 0 Thomas Gleixner
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 14/15] x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG
From: Thomas Gleixner <tglx@linutronix.de>

Add the necessary logic for supporting the emulated VIRT_SPEC_CTRL MSR to
x86_virt_spec_ctrl().  If either X86_FEATURE_LS_CFG_SSBD or
X86_FEATURE_VIRT_SPEC_CTRL is set then use the new guest_virt_spec_ctrl
argument to check whether the state must be modified on the host. The
update reuses speculative_store_bypass_update() so the ZEN-specific sibling
coordination can be reused.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: Reworked to adjust to the changes in 10/15 (Move the flag evaluation
    into the conditional sections.
---
 arch/x86/include/asm/spec-ctrl.h |    6 ++++++
 arch/x86/kernel/cpu/bugs.c       |   30 ++++++++++++++++++++++++++++++
 2 files changed, 36 insertions(+)

--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,6 +53,12 @@ static inline u64 ssbd_tif_to_spec_ctrl(
 	return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+	return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
 static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
 {
 	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -154,6 +154,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
 			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 		}
 	}
+
+	/*
+	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+	 */
+	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+		return;
+
+	/*
+	 * If the host has SSBD mitigation enabled, force it in the host's
+	 * virtual MSR value. If its not permanently enabled, evaluate
+	 * current's TIF_SSBD thread flag.
+	 */
+	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+		hostsval = SPEC_CTRL_SSBD;
+	else
+		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+	/* Sanitize the guest value */
+	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+	if (hostval != guestval) {
+		unsigned long tif;
+
+		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+				 ssbd_spec_ctrl_to_tif(hostval);
+
+		speculative_store_bypass_update(tif);
+	}
 }
 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [patch 15/15] SSB updates V17 15
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (13 preceding siblings ...)
  2018-05-16 13:51 ` [patch 14/15] SSB updates V17 14 Thomas Gleixner
@ 2018-05-16 13:51 ` Thomas Gleixner
  2018-05-17  2:18   ` [MODERATED] " Konrad Rzeszutek Wilk
  2018-05-16 14:09 ` [patch 00/15] SSB updates V17 0 Thomas Gleixner
  15 siblings, 1 reply; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 13:51 UTC (permalink / raw)
  To: speck

Subject: [patch 15/15] KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
From: Tom Lendacky <thomas.lendacky@amd.com>

Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
speculative store bypass disable (SSBD) under SVM.  This will allow guests
to use SSBD on hardware that uses non-architectural mechanisms for enabling
SSBD.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/cpu/common.c |    3 ++-
 arch/x86/kvm/cpuid.c         |   11 +++++++++--
 arch/x86/kvm/svm.c           |   17 +++++++++++++++++
 3 files changed, 28 insertions(+), 3 deletions(-)

--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -767,7 +767,8 @@ static void init_speculation_control(str
 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 		set_cpu_cap(c, X86_FEATURE_STIBP);
 
-	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
 		set_cpu_cap(c, X86_FEATURE_SSBD);
 
 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct
 
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-		F(AMD_IBPB) | F(AMD_IBRS);
+		F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -647,13 +647,20 @@ static inline int __do_cpuid_ent(struct
 			g_phys_as = phys_as;
 		entry->eax = g_phys_as | (virt_as << 8);
 		entry->edx = 0;
-		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
+		/*
+		 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+		 * hardware cpuid
+		 */
 		if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
 			entry->ebx |= F(AMD_IBPB);
 		if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
 			entry->ebx |= F(AMD_IBRS);
+		if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+			entry->ebx |= F(VIRT_SSBD);
 		break;
 	}
 	case 0x80000019:
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4120,6 +4120,13 @@ static int svm_get_msr(struct kvm_vcpu *
 
 		msr_info->data = svm->spec_ctrl;
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+			return 1;
+
+		msr_info->data = svm->virt_spec_ctrl;
+		break;
 	case MSR_F15H_IC_CFG: {
 
 		int family, model;
@@ -4251,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *
 			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
 		break;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		if (!msr->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+			return 1;
+
+		if (data & ~SPEC_CTRL_SSBD)
+			return 1;
+
+		svm->virt_spec_ctrl = data;
+		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
 		break;

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [patch 00/15] SSB updates V17 0
  2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
                   ` (14 preceding siblings ...)
  2018-05-16 13:51 ` [patch 15/15] SSB updates V17 15 Thomas Gleixner
@ 2018-05-16 14:09 ` Thomas Gleixner
  15 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 14:09 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 247 bytes --]

On Wed, 16 May 2018, speck for Thomas Gleixner wrote:

> This is an update to the previous 'SSB updates V16' series which addresses
> various review comments.
> 
> Delta patch below. Git bundle comes in follow up mail.

Attached.

Thanks,
 
 	tglx

[-- Attachment #2: Type: application/octet-stream, Size: 77913 bytes --]

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 02/15] SSB updates V17 2
  2018-05-16 13:51 ` [patch 02/15] SSB updates V17 2 Thomas Gleixner
@ 2018-05-16 14:29   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-16 14:29 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:34PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 02/15] x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
> From: Borislav Petkov <bp@suse.de>
> 
> Intel and AMD have different CPUID bits hence for those use synthetic bits
> which get set on the respective vendor's in init_speculation_control(). So
> that debacles like what he commit message of

s/he/the/
> 
>   c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
> 
> talks about don't happen anymore.

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 14/15] SSB updates V17 14
  2018-05-16 13:51 ` [patch 14/15] SSB updates V17 14 Thomas Gleixner
@ 2018-05-16 16:34   ` Tom Lendacky
  2018-05-16 21:26     ` Thomas Gleixner
  0 siblings, 1 reply; 40+ messages in thread
From: Tom Lendacky @ 2018-05-16 16:34 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 1303 bytes --]

On 05/16/2018 08:51 AM, speck for Thomas Gleixner wrote:

<snip>

> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -154,6 +154,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
>  			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
>  		}
>  	}
> +
> +	/*
> +	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
> +	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
> +	 */
> +	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
> +	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
> +		return;
> +
> +	/*
> +	 * If the host has SSBD mitigation enabled, force it in the host's
> +	 * virtual MSR value. If its not permanently enabled, evaluate
> +	 * current's TIF_SSBD thread flag.
> +	 */
> +	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
> +		hostsval = SPEC_CTRL_SSBD;

hostsval => hostval

Thanks,
Tom

> +	else
> +		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
> +
> +	/* Sanitize the guest value */
> +	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
> +
> +	if (hostval != guestval) {
> +		unsigned long tif;
> +
> +		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
> +				 ssbd_spec_ctrl_to_tif(hostval);
> +
> +		speculative_store_bypass_update(tif);
> +	}
>  }
>  EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
>  
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-16 13:51 ` [patch 08/15] SSB updates V17 8 Thomas Gleixner
@ 2018-05-16 21:13   ` Tom Lendacky
  2018-05-17  2:56     ` Konrad Rzeszutek Wilk
  0 siblings, 1 reply; 40+ messages in thread
From: Tom Lendacky @ 2018-05-16 21:13 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 3866 bytes --]

On 5/16/2018 8:51 AM, speck for Thomas Gleixner wrote:
> Subject: [patch 08/15] x86/speculation: Add virtualized speculative store bypass disable support
> From: Tom Lendacky <thomas.lendacky@amd.com>
> 
> Some AMD processors only support a non-architectural means of enabling
> speculative store bypass disable (SSBD).  To allow a simplified view of
> this to a guest, an architectural definition has been created through a new
> CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
> hypervisor can virtualize the existence of this definition and provide an
> architectural method for using SSBD to a guest.
> 
> Add the new CPUID feature, the new MSR and update the existing SSBD
> support to use this MSR when present.

Paolo (I'm assuming your on this list),

Do you know if anyone is working on Qemu / Libvirt patches to support the
new AMD VIRT_SSBD feature (new CPU definitions, etc.)?

Thanks,
Tom

> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
> ---
>  arch/x86/include/asm/cpufeatures.h |    1 +
>  arch/x86/include/asm/msr-index.h   |    2 ++
>  arch/x86/kernel/cpu/bugs.c         |    4 +++-
>  arch/x86/kernel/process.c          |   13 ++++++++++++-
>  4 files changed, 18 insertions(+), 2 deletions(-)
> 
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -282,6 +282,7 @@
>  #define X86_FEATURE_AMD_IBPB		(13*32+12) /* "" Indirect Branch Prediction Barrier */
>  #define X86_FEATURE_AMD_IBRS		(13*32+14) /* "" Indirect Branch Restricted Speculation */
>  #define X86_FEATURE_AMD_STIBP		(13*32+15) /* "" Single Thread Indirect Branch Predictors */
> +#define X86_FEATURE_VIRT_SSBD		(13*32+25) /* Virtualized Speculative Store Bypass Disable */
>  
>  /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
>  #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -347,6 +347,8 @@
>  #define MSR_AMD64_SEV_ENABLED_BIT	0
>  #define MSR_AMD64_SEV_ENABLED		BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
>  
> +#define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
> +
>  /* Fam 17h MSRs */
>  #define MSR_F17H_IRPERF			0xc00000e9
>  
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -205,7 +205,9 @@ static void x86_amd_ssb_disable(void)
>  {
>  	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
>  
> -	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> +	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
> +		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
> +	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
>  		wrmsrl(MSR_AMD64_LS_CFG, msrval);
>  }
>  
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -388,6 +388,15 @@ static __always_inline void amd_set_core
>  }
>  #endif
>  
> +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
> +{
> +	/*
> +	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
> +	 * so ssbd_tif_to_spec_ctrl() just works.
> +	 */
> +	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
> +}
> +
>  static __always_inline void intel_set_ssb_state(unsigned long tifn)
>  {
>  	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
> @@ -397,7 +406,9 @@ static __always_inline void intel_set_ss
>  
>  static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
>  {
> -	if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> +	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> +		amd_set_ssb_virt_state(tifn);
> +	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
>  		amd_set_core_ssb_state(tifn);
>  	else
>  		intel_set_ssb_state(tifn);
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [patch 14/15] SSB updates V17 14
  2018-05-16 16:34   ` [MODERATED] " Tom Lendacky
@ 2018-05-16 21:26     ` Thomas Gleixner
  0 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-16 21:26 UTC (permalink / raw)
  To: speck

On Wed, 16 May 2018, speck for Tom Lendacky wrote:
> On 05/16/2018 08:51 AM, speck for Thomas Gleixner wrote:
> 
> <snip>
> 
> > --- a/arch/x86/kernel/cpu/bugs.c
> > +++ b/arch/x86/kernel/cpu/bugs.c
> > @@ -154,6 +154,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
> >  			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
> >  		}
> >  	}
> > +
> > +	/*
> > +	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
> > +	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
> > +	 */
> > +	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
> > +	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
> > +		return;
> > +
> > +	/*
> > +	 * If the host has SSBD mitigation enabled, force it in the host's
> > +	 * virtual MSR value. If its not permanently enabled, evaluate
> > +	 * current's TIF_SSBD thread flag.
> > +	 */
> > +	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
> > +		hostsval = SPEC_CTRL_SSBD;
> 
> hostsval => hostval

Bah, fixing it twice on test machines and then forgetting about it. That's
what you get for juggling a dozen balls at once.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 03/15] SSB updates V17 3
  2018-05-16 13:51 ` [patch 03/15] SSB updates V17 3 Thomas Gleixner
@ 2018-05-17  1:06   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:06 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:35PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 03/15] x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
> Intel and implied by IBRS or STIBP support on AMD. That's just confusing
> and in case an AMD CPU has IBRS not supported because the underlying
> problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
> the thing falls apart.
> 
> Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
> availability on both Intel and AMD.
> 
> While at it replace the boot_cpu_has() checks with static_cpu_has() where
> possible. This prevents late microcode loading from exposing SPEC_CTRL, but
> late loading is already very limited as it does not reevaluate the
> mitigation options and other bits and pieces. Having static_cpu_has() is
> the simplest and least fragile solution.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 04/15] SSB updates V17 4
  2018-05-16 13:51 ` [patch 04/15] SSB updates V17 4 Thomas Gleixner
@ 2018-05-17  1:14   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:14 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:36PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 04/15] x86/cpufeatures: Disentangle SSBD enumeration
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> The SSBD enumeration is similarly to the other bits magically shared
> between Intel and AMD though the mechanisms are different.
> 
> Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
> features or family dependent setup.
> 
> Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
> controlled via MSR_SPEC_CTRL and fix up the usage sites.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 05/15] SSB updates V17 5
  2018-05-16 13:51 ` [patch 05/15] SSB updates V17 5 Thomas Gleixner
@ 2018-05-17  1:14   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:14 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:37PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 05/15] x86/cpufeatures: Add FEATURE_ZEN
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> Add a ZEN feature bit so family-dependent static_cpu_has() optimizations
> can be built for ZEN.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 06/15] SSB updates V17 6
  2018-05-16 13:51 ` [patch 06/15] SSB updates V17 6 Thomas Gleixner
@ 2018-05-17  1:28   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:28 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:38PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 06/15] x86/speculation: Handle HT correctly on AMD
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
> hyperthreading is enabled the SSBD bit toggle needs to take both cores into
> account. Otherwise the following situation can happen:
> 
> CPU0		CPU1
> 
> disable SSB
> 		disable SSB
> 		enable  SSB <- Enables it for the Core, i.e. for CPU0 as well
> 
> So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
> again.
> 
> On Intel the SSBD control is per core as well, but the synchronization
> logic is implemented behind the per thread SPEC_CTRL MSR. It works like
> this:
> 
>   CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
> 
> i.e. if one of the threads enables a mitigation then this affects both and
> the mitigation is only disabled in the core when both threads disabled it.
> 
> Add the necessary synchronization logic for AMD family 17H. Unfortunately
> that requires a spinlock to serialize the access to the MSR, but the locks
> are only shared between siblings.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
> ---
> V2: Amended changelog and clarified code comments.
> ---
>  arch/x86/include/asm/spec-ctrl.h |    6 +
>  arch/x86/kernel/process.c        |  125 +++++++++++++++++++++++++++++++++++++--
>  arch/x86/kernel/smpboot.c        |    5 +
>  3 files changed, 130 insertions(+), 6 deletions(-)
> 
> --- a/arch/x86/include/asm/spec-ctrl.h
> +++ b/arch/x86/include/asm/spec-ctrl.h
> @@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg
>  	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
>  }
>  
> +#ifdef CONFIG_SMP
> +extern void speculative_store_bypass_ht_init(void);
> +#else
> +static inline void speculative_store_bypass_ht_init(void) { }
> +#endif
> +
>  extern void speculative_store_bypass_update(void);
>  
>  #endif
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -279,22 +279,135 @@ static inline void switch_to_bitmap(stru
>  	}
>  }
>  
> -static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
> +#ifdef CONFIG_SMP
> +
> +struct ssb_state {
> +	struct ssb_state	*shared_state;
> +	raw_spinlock_t		lock;
> +	unsigned int		disable_state;
> +	unsigned long		local_state;
> +};
> +
> +#define LSTATE_SSB	0
                     ^^^
I don't know why, but this tab is throwing me off.

Feel free to ignore it, but in case you want to change it would
it be better if it was a space?

Either way:

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 07/15] SSB updates V17 7
  2018-05-16 13:51 ` [patch 07/15] SSB updates V17 7 Thomas Gleixner
@ 2018-05-17  1:29   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:29 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:39PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 07/15] x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
> Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
> about the bit position of the SSBD bit and thus facilitate migration.
> Also, the sibling coordination on Family 17H CPUs can only be done on
> the host.
> 
> Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
> extra argument for the VIRT_SPEC_CTRL MSR.
> 
> Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
> data structure which is going to be used in later patches for the actual
> implementation.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 09/15] SSB updates V17 9
  2018-05-16 13:51 ` [patch 09/15] SSB updates V17 9 Thomas Gleixner
@ 2018-05-17  1:40   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:40 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:41PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 09/15] x86/speculation: Rework speculative_store_bypass_update()
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse
> speculative_store_bypass_update() to avoid code duplication. Add an
> argument for supplying a thread info (TIF) value and create a wrapper
> speculative_store_bypass_update_current() which is used at the existing
> call site.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 10/15] SSB updates V17 10
  2018-05-16 13:51 ` [patch 10/15] SSB updates V17 10 Thomas Gleixner
@ 2018-05-17  1:43   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:43 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:42PM +0200, speck for Thomas Gleixner wrote:
> From: Borislav Petkov <bp@suse.de>
> Subject: [patch 10/15] x86/bugs: Unify  x86_spec_ctrl_{set_guest,restore_host}
> 
> Function bodies are very similar and are going to grow more almost
> identical code. Add a bool arg to determine whether SPEC_CTRL is being set
> for the guest or restored to the host.
> 
> No functional changes.
> 
> Signed-off-by: Borislav Petkov <bp@suse.de>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> V2: Hide ssbd_tif_to_spec_ctrl() evaluation
> ---
>  arch/x86/include/asm/spec-ctrl.h |   33 ++++++++++++++++++---
>  arch/x86/kernel/cpu/bugs.c       |   60 +++++++++------------------------------
>  2 files changed, 44 insertions(+), 49 deletions(-)
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 11/15] SSB updates V17 11
  2018-05-16 13:51 ` [patch 11/15] SSB updates V17 11 Thomas Gleixner
@ 2018-05-17  1:45   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  1:45 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:43PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 11/15] x86/bugs: Expose x86_spec_ctrl_base directly
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> x86_spec_ctrl_base is the system wide default value for MSR_SPEC_CTRL.

s/MSR_SPEC_CTRL/SPEC_CTRL MSR/?

> x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
> prevent modification to that variable. Though the variable is read only
> after init and globaly visible already.
> 
> Remove the function and export the variable instead.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
> ---
> V2: Removed the hunk which inverted x86_spec_ctrl_mask
> ---
>  arch/x86/include/asm/nospec-branch.h |   16 +++++-----------
>  arch/x86/include/asm/spec-ctrl.h     |    3 ---
>  arch/x86/kernel/cpu/bugs.c           |   11 +----------
>  3 files changed, 6 insertions(+), 24 deletions(-)


Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 13/15] SSB updates V17 13
  2018-05-16 13:51 ` [patch 13/15] SSB updates V17 13 Thomas Gleixner
@ 2018-05-17  2:08   ` Konrad Rzeszutek Wilk
  2018-05-17  8:45     ` Thomas Gleixner
  0 siblings, 1 reply; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  2:08 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 03:51:45PM +0200, speck for Thomas Gleixner wrote:
> Subject: [patch 13/15] x86/bugs: Rework spec_ctrl base and mask logic
> From: Thomas Gleixner <tglx@linutronix.de>
> 
> x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
> which are not to be modified. However the implementation is not really used
> and the bitmask is inverted for no real reason. Aside of that it is missing

It is inverted to make the check easier. That is the code (but removed in the prior
patch) had:

	if (val & x86_spec_ctrl_mask)
		complain..

Perhaps: s/for no real reason/to make the check easier - but removed in
"x86/bugs: Remove x86_spec_ctrl_set()"/ ?

> the STIBP bit if it is supported by the platform, so if the mask would be
> used in x86_virt_spec_ctrl() then it would prevent a guest from setting
> STIBP.
> 
> Add the STIBP bit if supported and use the mask in x86_spec_ctrl_set_guest()

s/x86_spec_ctrl_set_guest/x86_virt_spec_ctrl/

> to sanitize the value which is supplied by the guest.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Borislav Petkov <bp@suse.de>
> ---
> V2: Moved the stray x86_spec_ctrl_base inversion hunk from patch 11/15
>     Renamed some variables so it's more clear that they are MSR values
>     and not MSR indices.
> ---
>  arch/x86/kernel/cpu/bugs.c |   28 ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
> 
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -42,7 +42,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
>   * The vendor and possibly platform specific bits which can be modified in
>   * x86_spec_ctrl_base.
>   */
> -static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
> +static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
>  
>  /*
>   * AMD specific MSR info for Speculative Store Bypass control.
> @@ -68,6 +68,10 @@ void __init check_bugs(void)
>  	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
>  		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
>  
> +	/* Allow STIBP in MSR_SPEC_CTRL if supported */
> +	if (boot_cpu_has(X86_FEATURE_STIBP))
> +		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
> +
>  	/* Select the proper spectre mitigation before patching alternatives */
>  	spectre_v2_select_mitigation();
>  
> @@ -134,20 +138,28 @@ static enum spectre_v2_mitigation spectr
>  	SPECTRE_V2_NONE;
>  
>  void
> -x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
> +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)

Would it make sense to roll in the s/guest/setguest/ in the
"x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}" patch?

>  {
> +	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
>  	struct thread_info *ti = current_thread_info();
> -	u64 msr, host = x86_spec_ctrl_base;
>  
>  	/* Is MSR_SPEC_CTRL implemented ? */
>  	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
> +		/*
> +		 * Restrict guest_spec_ctrl to supported values. Clear the
> +		 * modifiable bits in the host base value and or the
> +		 * modifiable bits from the guest value.
> +		 */
> +		guestval = hostval & ~x86_spec_ctrl_mask;
> +		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;

How come the guestval is not used at all in this patch?

> +
>  		/* SSBD controlled in MSR_SPEC_CTRL */
>  		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
> -			host |= ssbd_tif_to_spec_ctrl(ti->flags);
> +			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
>  
> -		if (host != guest_spec_ctrl) {
> -			msr = guest ? guest_spec_ctrl : host;
> -			wrmsrl(MSR_IA32_SPEC_CTRL, msr);
> +		if (hostval != guest_spec_ctrl) {
> +			msrval = setguest ? guest_spec_ctrl : hostval;
> +			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
>  		}
>  	}
>  }
> @@ -493,7 +505,7 @@ static enum ssb_mitigation __init __ssb_
>  		switch (boot_cpu_data.x86_vendor) {
>  		case X86_VENDOR_INTEL:
>  			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
> -			x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
> +			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
>  			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
>  			break;
>  		case X86_VENDOR_AMD:
> 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 15/15] SSB updates V17 15
  2018-05-16 13:51 ` [patch 15/15] SSB updates V17 15 Thomas Gleixner
@ 2018-05-17  2:18   ` Konrad Rzeszutek Wilk
  2018-05-17 12:42     ` Paolo Bonzini
  0 siblings, 1 reply; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  2:18 UTC (permalink / raw)
  To: speck

> @@ -4251,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *
>  			break;
>  		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
>  		break;
> +	case MSR_AMD64_VIRT_SPEC_CTRL:
> +		if (!msr->host_initiated &&
> +		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
> +			return 1;
> +
> +		if (data & ~SPEC_CTRL_SSBD)
> +			return 1;
> +
> +		svm->virt_spec_ctrl = data;

You need to save virt_spec_ctrl content for migration purposes.

It was introduced in "x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL"
but not actually used so this patch is probably the best place to define it.

That is add this MSR in the 'msrs_to_save' or perhaps 'emulated_msrs'?

And naturally test if migration does work :-)
> +		break;
>  	case MSR_STAR:
>  		svm->vmcb->save.star = data;
>  		break;
> 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-16 21:13   ` [MODERATED] " Tom Lendacky
@ 2018-05-17  2:56     ` Konrad Rzeszutek Wilk
  2018-05-17 16:13       ` Tom Lendacky
  0 siblings, 1 reply; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17  2:56 UTC (permalink / raw)
  To: speck

On Wed, May 16, 2018 at 04:13:57PM -0500, speck for Tom Lendacky wrote:
> On 5/16/2018 8:51 AM, speck for Thomas Gleixner wrote:
> > Subject: [patch 08/15] x86/speculation: Add virtualized speculative store bypass disable support
> > From: Tom Lendacky <thomas.lendacky@amd.com>
> > 
> > Some AMD processors only support a non-architectural means of enabling
> > speculative store bypass disable (SSBD).  To allow a simplified view of
> > this to a guest, an architectural definition has been created through a new
> > CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
> > hypervisor can virtualize the existence of this definition and provide an
> > architectural method for using SSBD to a guest.
> > 
> > Add the new CPUID feature, the new MSR and update the existing SSBD
> > support to use this MSR when present.
> 
> Paolo (I'm assuming your on this list),
> 
> Do you know if anyone is working on Qemu / Libvirt patches to support the
> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?

But surely you have a patch that you wrote for testing this?

Did you have in mind these two patches (inline), not tested, still compiling.

If you can test them, I can sync up with Daniel P. Berrangé who is going
to post the Intel SSBD ones for libvirt+qemu on Monday.


From ec2d1fd2814d33467eb110cb86cd2b5ecf224089 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Date: Wed, 16 May 2018 21:37:31 -0400
Subject: [PATCH v1 1/2] i386: define the AMD 'virt-ssbd' CPUID feature bit
 (CVE-2018-3639)

AMD Zen expose the Intel equivalant to Speculative Store Bypass Disable
via the 0x80000008_EBX[25] CPUID feature bit.

This needs to be exposed to guest OS to allow them to protect
against CVE-2018-3639.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 target/i386/cpu.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 06d8eee611..9d76cb4b1b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -542,7 +542,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
             "ibpb", NULL, NULL, NULL,
             NULL, NULL, NULL, NULL,
             NULL, NULL, NULL, NULL,
-            NULL, NULL, NULL, NULL,
+            "virt-ssbd", NULL, NULL, NULL,
             NULL, NULL, NULL, NULL,
         },
         .cpuid_eax = 0x80000008,
-- 
2.13.4


and



From 9a64a463b2479fdac914a33d8bda75393c00c145 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Date: Wed, 16 May 2018 22:27:11 -0400
Subject: [PATCH 2/2] i386: Define the Virt SSBD MSR and handling of it.

"Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD).  To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.

Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present." (from x86/speculation: Add virtualized
speculative store bypass disable support in Linux).

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 target/i386/cpu.h     |  2 ++
 target/i386/kvm.c     | 16 ++++++++++++++--
 target/i386/machine.c | 20 ++++++++++++++++++++
 3 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index b9fe2efafe..c73fdd18c7 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -351,6 +351,7 @@ typedef enum X86Seg {
 #define MSR_IA32_FEATURE_CONTROL        0x0000003a
 #define MSR_TSC_ADJUST                  0x0000003b
 #define MSR_IA32_SPEC_CTRL              0x48
+#define MSR_VIRT_SSBD                   0xc001011f
 #define MSR_IA32_TSCDEADLINE            0x6e0
 
 #define FEATURE_CONTROL_LOCKED                    (1<<0)
@@ -1150,6 +1151,7 @@ typedef struct CPUX86State {
     uint32_t pkru;
 
     uint64_t spec_ctrl;
+    uint64_t virt_ssbd;
 
     /* End of state preserved by INIT (dummy marker).  */
     struct {} end_init_save;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index d6666a4b19..0c656a91a4 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -93,6 +93,7 @@ static bool has_msr_hv_frequencies;
 static bool has_msr_hv_reenlightenment;
 static bool has_msr_xss;
 static bool has_msr_spec_ctrl;
+static bool has_msr_virt_ssbd;
 static bool has_msr_smi_count;
 
 static uint32_t has_architectural_pmu_version;
@@ -1233,6 +1234,9 @@ static int kvm_get_supported_msrs(KVMState *s)
                 case MSR_IA32_SPEC_CTRL:
                     has_msr_spec_ctrl = true;
                     break;
+                case MSR_VIRT_SSBD:
+                    has_msr_virt_ssbd = true;
+                    break;
                 }
             }
         }
@@ -1721,6 +1725,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
     if (has_msr_spec_ctrl) {
         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
     }
+    if (has_msr_virt_ssbd) {
+        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
+    }
+
 #ifdef TARGET_X86_64
     if (lm_capable_kernel) {
         kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
@@ -2100,8 +2108,9 @@ static int kvm_get_msrs(X86CPU *cpu)
     if (has_msr_spec_ctrl) {
         kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
     }
-
-
+    if (has_msr_virt_ssbd) {
+        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
+    }
     if (!env->tsc_valid) {
         kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
         env->tsc_valid = !runstate_is_running();
@@ -2481,6 +2490,9 @@ static int kvm_get_msrs(X86CPU *cpu)
         case MSR_IA32_SPEC_CTRL:
             env->spec_ctrl = msrs[i].data;
             break;
+        case MSR_VIRT_SSBD:
+            env->virt_ssbd = msrs[i].data;
+            break;
         case MSR_IA32_RTIT_CTL:
             env->msr_rtit_ctrl = msrs[i].data;
             break;
diff --git a/target/i386/machine.c b/target/i386/machine.c
index fd99c0bbb4..4d98d367c1 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -916,6 +916,25 @@ static const VMStateDescription vmstate_msr_intel_pt = {
     }
 };
 
+static bool virt_ssbd_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->virt_ssbd != 0;
+}
+
+static const VMStateDescription vmstate_msr_virt_ssbd = {
+    .name = "cpu/virt_ssbd",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = virt_ssbd_needed,
+    .fields = (VMStateField[]){
+        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 VMStateDescription vmstate_x86_cpu = {
     .name = "cpu",
     .version_id = 12,
@@ -1039,6 +1058,7 @@ VMStateDescription vmstate_x86_cpu = {
         &vmstate_spec_ctrl,
         &vmstate_mcg_ext_ctl,
         &vmstate_msr_intel_pt,
+        &vmstate_msr_virt_ssbd,
         NULL
     }
 };
-- 
2.13.4

^ permalink raw reply related	[flat|nested] 40+ messages in thread

* Re: [patch 13/15] SSB updates V17 13
  2018-05-17  2:08   ` [MODERATED] " Konrad Rzeszutek Wilk
@ 2018-05-17  8:45     ` Thomas Gleixner
  0 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-17  8:45 UTC (permalink / raw)
  To: speck

On Wed, 16 May 2018, speck for Konrad Rzeszutek Wilk wrote:
> On Wed, May 16, 2018 at 03:51:45PM +0200, speck for Thomas Gleixner wrote:
> > Subject: [patch 13/15] x86/bugs: Rework spec_ctrl base and mask logic
> > From: Thomas Gleixner <tglx@linutronix.de>
> > 
> > x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
> > which are not to be modified. However the implementation is not really used
> > and the bitmask is inverted for no real reason. Aside of that it is missing
> 
> It is inverted to make the check easier. That is the code (but removed in the prior
> patch) had:
> 
> 	if (val & x86_spec_ctrl_mask)
> 		complain..
> 
> Perhaps: s/for no real reason/to make the check easier - but removed in
> "x86/bugs: Remove x86_spec_ctrl_set()"/ ?

Fixed.

> > the STIBP bit if it is supported by the platform, so if the mask would be
> > used in x86_virt_spec_ctrl() then it would prevent a guest from setting
> > STIBP.
> > 
> > Add the STIBP bit if supported and use the mask in x86_spec_ctrl_set_guest()
> 
> s/x86_spec_ctrl_set_guest/x86_virt_spec_ctrl/

Fixed.

> >  void
> > -x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest)
> > +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
> 
> Would it make sense to roll in the s/guest/setguest/ in the
> "x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}" patch?

Done.

> >  {
> > +	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
> >  	struct thread_info *ti = current_thread_info();
> > -	u64 msr, host = x86_spec_ctrl_base;
> >  
> >  	/* Is MSR_SPEC_CTRL implemented ? */
> >  	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
> > +		/*
> > +		 * Restrict guest_spec_ctrl to supported values. Clear the
> > +		 * modifiable bits in the host base value and or the
> > +		 * modifiable bits from the guest value.
> > +		 */
> > +		guestval = hostval & ~x86_spec_ctrl_mask;
> > +		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
> 
> How come the guestval is not used at all in this patch?

Because I'm a moron. Delta fix below.

Thanks,

	tglx
8<-------------------------

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -157,8 +157,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
 		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
 			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
-		if (hostval != guest_spec_ctrl) {
-			msrval = setguest ? guest_spec_ctrl : hostval;
+		if (hostval != guestval) {
+			msrval = setguest ? guestval : hostval;
 			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 		}
 	}

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 15/15] SSB updates V17 15
  2018-05-17  2:18   ` [MODERATED] " Konrad Rzeszutek Wilk
@ 2018-05-17 12:42     ` Paolo Bonzini
  2018-05-17 15:09       ` Thomas Gleixner
  0 siblings, 1 reply; 40+ messages in thread
From: Paolo Bonzini @ 2018-05-17 12:42 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 4649 bytes --]

On 17/05/2018 04:18, speck for Konrad Rzeszutek Wilk wrote:
>> @@ -4251,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *
>>  			break;
>>  		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
>>  		break;
>> +	case MSR_AMD64_VIRT_SPEC_CTRL:
>> +		if (!msr->host_initiated &&
>> +		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
>> +			return 1;
>> +
>> +		if (data & ~SPEC_CTRL_SSBD)
>> +			return 1;
>> +
>> +		svm->virt_spec_ctrl = data;
> 
> You need to save virt_spec_ctrl content for migration purposes.
> 
> It was introduced in "x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL"
> but not actually used so this patch is probably the best place to define it.
> 
> That is add this MSR in the 'msrs_to_save' or perhaps 'emulated_msrs'?

It's emulated_msrs since the MSR doesn't exist on bare metal.  But it also doesn't
exist on Intel, so:

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 949c977bc4c9..8a5d0fdb1d9a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -924,7 +924,7 @@ struct kvm_x86_ops {
 	int (*hardware_setup)(void);               /* __init */
 	void (*hardware_unsetup)(void);            /* __exit */
 	bool (*cpu_has_accelerated_tpr)(void);
-	bool (*cpu_has_high_real_mode_segbase)(void);
+	bool (*has_emulated_msr)(int index);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
 	struct kvm *(*vm_alloc)(void);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b58787daf9f8..5a2724c80ca6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5782,7 +5782,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
 	return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
 	return true;
 }
@@ -7008,7 +7008,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.hardware_enable = svm_hardware_enable,
 	.hardware_disable = svm_hardware_disable,
 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-	.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+	.has_emulated_msr = svm_has_emulated_msr,
 
 	.vcpu_create = svm_create_vcpu,
 	.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aafcc9881e88..ec1da2f44a75 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9495,9 +9495,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 }
 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-	return enable_unrestricted_guest || emulate_invalid_guest_state;
+	switch (index) {
+	case MSR_IA32_SMBASE:
+		/*
+		 * We cannot do SMM unless we can run the guest in big
+		 * real mode.
+		 */
+		return enable_unrestricted_guest || emulate_invalid_guest_state;
+	case MSR_AMD64_VIRT_SPEC_CTRL:
+		/* This is AMD only.  */
+		return false;
+	default:
+		return true;
+	}
 }
 
 static bool vmx_mpx_supported(void)
@@ -12622,7 +12634,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.hardware_enable = hardware_enable,
 	.hardware_disable = hardware_disable,
 	.cpu_has_accelerated_tpr = report_flexpriority,
-	.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+	.has_emulated_msr = vmx_has_emulated_msr,
 
 	.vm_init = vmx_vm_init,
 	.vm_alloc = vmx_vm_alloc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b2ff74b12ec4..6a9c640e7c4f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1058,6 +1058,7 @@ static u32 emulated_msrs[] = {
 	MSR_SMI_COUNT,
 	MSR_PLATFORM_INFO,
 	MSR_MISC_FEATURES_ENABLES,
+	MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2894,7 +2895,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 		 * fringe case that is not enabled except via specific settings
 		 * of the module parameters.
 		 */
-		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
 		break;
 	case KVM_CAP_VAPIC:
 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
@@ -4594,14 +4595,8 @@ static void kvm_init_msr_list(void)
 	num_msrs_to_save = j;
 
 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-		switch (emulated_msrs[i]) {
-		case MSR_IA32_SMBASE:
-			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-				continue;
-			break;
-		default:
-			break;
-		}
+		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+			continue;
 
 		if (j < i)
 			emulated_msrs[j] = emulated_msrs[i];



^ permalink raw reply related	[flat|nested] 40+ messages in thread

* Re: [patch 15/15] SSB updates V17 15
  2018-05-17 12:42     ` Paolo Bonzini
@ 2018-05-17 15:09       ` Thomas Gleixner
  0 siblings, 0 replies; 40+ messages in thread
From: Thomas Gleixner @ 2018-05-17 15:09 UTC (permalink / raw)
  To: speck

On Thu, 17 May 2018, speck for Paolo Bonzini wrote:

> On 17/05/2018 04:18, speck for Konrad Rzeszutek Wilk wrote:
> >> @@ -4251,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *
> >>  			break;
> >>  		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
> >>  		break;
> >> +	case MSR_AMD64_VIRT_SPEC_CTRL:
> >> +		if (!msr->host_initiated &&
> >> +		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
> >> +			return 1;
> >> +
> >> +		if (data & ~SPEC_CTRL_SSBD)
> >> +			return 1;
> >> +
> >> +		svm->virt_spec_ctrl = data;
> > 
> > You need to save virt_spec_ctrl content for migration purposes.
> > 
> > It was introduced in "x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL"
> > but not actually used so this patch is probably the best place to define it.
> > 
> > That is add this MSR in the 'msrs_to_save' or perhaps 'emulated_msrs'?
> 
> It's emulated_msrs since the MSR doesn't exist on bare metal.  But it also doesn't
> exist on Intel, so:

Thanks Paolo!

I've folded it into 15/15.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-17  2:56     ` Konrad Rzeszutek Wilk
@ 2018-05-17 16:13       ` Tom Lendacky
  2018-05-17 16:17         ` Paolo Bonzini
  2018-05-17 16:18         ` Tom Lendacky
  0 siblings, 2 replies; 40+ messages in thread
From: Tom Lendacky @ 2018-05-17 16:13 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 8447 bytes --]

On 05/16/2018 09:56 PM, speck for Konrad Rzeszutek Wilk wrote:
> On Wed, May 16, 2018 at 04:13:57PM -0500, speck for Tom Lendacky wrote:
>> On 5/16/2018 8:51 AM, speck for Thomas Gleixner wrote:
>>> Subject: [patch 08/15] x86/speculation: Add virtualized speculative store bypass disable support
>>> From: Tom Lendacky <thomas.lendacky@amd.com>
>>>
>>> Some AMD processors only support a non-architectural means of enabling
>>> speculative store bypass disable (SSBD).  To allow a simplified view of
>>> this to a guest, an architectural definition has been created through a new
>>> CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
>>> hypervisor can virtualize the existence of this definition and provide an
>>> architectural method for using SSBD to a guest.
>>>
>>> Add the new CPUID feature, the new MSR and update the existing SSBD
>>> support to use this MSR when present.
>>
>> Paolo (I'm assuming your on this list),
>>
>> Do you know if anyone is working on Qemu / Libvirt patches to support the
>> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?
> 
> But surely you have a patch that you wrote for testing this?

Yes, but I want to build upon anything that may already be in progress.
Also, I'm not a qemu expert so I wanted to be sure I had everything.
For example, I can already see that the KVM support needs to add
MSR_AMD64_VIRT_SPEC_CTRL to msrs_to_save in arch/x86/kvm/x86.c (I'll
send a follow-up patch to the list to add that).

> 
> Did you have in mind these two patches (inline), not tested, still compiling.
> 
> If you can test them, I can sync up with Daniel P. Berrangé who is going
> to post the Intel SSBD ones for libvirt+qemu on Monday.

Yes, I'll review and test.  I have some initial comments below.

I'll provide a patch back on top of these patches with any other updates.

Thanks,
Tom

> 
> 
>>From ec2d1fd2814d33467eb110cb86cd2b5ecf224089 Mon Sep 17 00:00:00 2001
> From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Date: Wed, 16 May 2018 21:37:31 -0400
> Subject: [PATCH v1 1/2] i386: define the AMD 'virt-ssbd' CPUID feature bit
>  (CVE-2018-3639)
> 
> AMD Zen expose the Intel equivalant to Speculative Store Bypass Disable
> via the 0x80000008_EBX[25] CPUID feature bit.
> 
> This needs to be exposed to guest OS to allow them to protect
> against CVE-2018-3639.
> 
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
>  target/i386/cpu.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 06d8eee611..9d76cb4b1b 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -542,7 +542,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
>              "ibpb", NULL, NULL, NULL,
>              NULL, NULL, NULL, NULL,
>              NULL, NULL, NULL, NULL,
> -            NULL, NULL, NULL, NULL,
> +            "virt-ssbd", NULL, NULL, NULL,

The virt-ssbd bit is bit 25, so this should be:

  NULL, "virt-ssbd", NULL, NULL,

>              NULL, NULL, NULL, NULL,
>          },
>          .cpuid_eax = 0x80000008,
> -- 
> 2.13.4
> 
> 
> and
> 
> 
> 
> >From 9a64a463b2479fdac914a33d8bda75393c00c145 Mon Sep 17 00:00:00 2001
> From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Date: Wed, 16 May 2018 22:27:11 -0400
> Subject: [PATCH 2/2] i386: Define the Virt SSBD MSR and handling of it.
> 
> "Some AMD processors only support a non-architectural means of enabling
> speculative store bypass disable (SSBD).  To allow a simplified view of
> this to a guest, an architectural definition has been created through a new
> CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
> hypervisor can virtualize the existence of this definition and provide an
> architectural method for using SSBD to a guest.
> 
> Add the new CPUID feature, the new MSR and update the existing SSBD
> support to use this MSR when present." (from x86/speculation: Add virtualized
> speculative store bypass disable support in Linux).
> 
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
>  target/i386/cpu.h     |  2 ++
>  target/i386/kvm.c     | 16 ++++++++++++++--
>  target/i386/machine.c | 20 ++++++++++++++++++++
>  3 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index b9fe2efafe..c73fdd18c7 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -351,6 +351,7 @@ typedef enum X86Seg {
>  #define MSR_IA32_FEATURE_CONTROL        0x0000003a
>  #define MSR_TSC_ADJUST                  0x0000003b
>  #define MSR_IA32_SPEC_CTRL              0x48
> +#define MSR_VIRT_SSBD                   0xc001011f

I don't know if you want to match the kernel naming, but this is named
MSR_AMD64_VIRT_SPEC_CTRL in the kernel.

>  #define MSR_IA32_TSCDEADLINE            0x6e0
>  
>  #define FEATURE_CONTROL_LOCKED                    (1<<0)
> @@ -1150,6 +1151,7 @@ typedef struct CPUX86State {
>      uint32_t pkru;
>  
>      uint64_t spec_ctrl;
> +    uint64_t virt_ssbd;

Probably best to call this virt_spec_ctrl.

>  
>      /* End of state preserved by INIT (dummy marker).  */
>      struct {} end_init_save;
> diff --git a/target/i386/kvm.c b/target/i386/kvm.c
> index d6666a4b19..0c656a91a4 100644
> --- a/target/i386/kvm.c
> +++ b/target/i386/kvm.c
> @@ -93,6 +93,7 @@ static bool has_msr_hv_frequencies;
>  static bool has_msr_hv_reenlightenment;
>  static bool has_msr_xss;
>  static bool has_msr_spec_ctrl;
> +static bool has_msr_virt_ssbd;
>  static bool has_msr_smi_count;
>  
>  static uint32_t has_architectural_pmu_version;
> @@ -1233,6 +1234,9 @@ static int kvm_get_supported_msrs(KVMState *s)
>                  case MSR_IA32_SPEC_CTRL:
>                      has_msr_spec_ctrl = true;
>                      break;
> +                case MSR_VIRT_SSBD:
> +                    has_msr_virt_ssbd = true;
> +                    break;
>                  }
>              }
>          }
> @@ -1721,6 +1725,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
>      if (has_msr_spec_ctrl) {
>          kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
>      }
> +    if (has_msr_virt_ssbd) {
> +        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
> +    }
> +
>  #ifdef TARGET_X86_64
>      if (lm_capable_kernel) {
>          kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
> @@ -2100,8 +2108,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>      if (has_msr_spec_ctrl) {
>          kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
>      }
> -
> -
> +    if (has_msr_virt_ssbd) {
> +        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
> +    }
>      if (!env->tsc_valid) {
>          kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
>          env->tsc_valid = !runstate_is_running();
> @@ -2481,6 +2490,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>          case MSR_IA32_SPEC_CTRL:
>              env->spec_ctrl = msrs[i].data;
>              break;
> +        case MSR_VIRT_SSBD:
> +            env->virt_ssbd = msrs[i].data;
> +            break;
>          case MSR_IA32_RTIT_CTL:
>              env->msr_rtit_ctrl = msrs[i].data;
>              break;
> diff --git a/target/i386/machine.c b/target/i386/machine.c
> index fd99c0bbb4..4d98d367c1 100644
> --- a/target/i386/machine.c
> +++ b/target/i386/machine.c
> @@ -916,6 +916,25 @@ static const VMStateDescription vmstate_msr_intel_pt = {
>      }
>  };
>  
> +static bool virt_ssbd_needed(void *opaque)
> +{
> +    X86CPU *cpu = opaque;
> +    CPUX86State *env = &cpu->env;
> +
> +    return env->virt_ssbd != 0;
> +}
> +
> +static const VMStateDescription vmstate_msr_virt_ssbd = {
> +    .name = "cpu/virt_ssbd",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = virt_ssbd_needed,
> +    .fields = (VMStateField[]){
> +        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
>  VMStateDescription vmstate_x86_cpu = {
>      .name = "cpu",
>      .version_id = 12,
> @@ -1039,6 +1058,7 @@ VMStateDescription vmstate_x86_cpu = {
>          &vmstate_spec_ctrl,
>          &vmstate_mcg_ext_ctl,
>          &vmstate_msr_intel_pt,
> +        &vmstate_msr_virt_ssbd,
>          NULL
>      }
>  };
> -- 
> 2.13.4
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-17 16:13       ` Tom Lendacky
@ 2018-05-17 16:17         ` Paolo Bonzini
  2018-05-17 16:23           ` Konrad Rzeszutek Wilk
  2018-05-17 21:25           ` Tom Lendacky
  2018-05-17 16:18         ` Tom Lendacky
  1 sibling, 2 replies; 40+ messages in thread
From: Paolo Bonzini @ 2018-05-17 16:17 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 1095 bytes --]

On 17/05/2018 18:13, speck for Tom Lendacky wrote:
>>> Paolo (I'm assuming your on this list),
>>>
>>> Do you know if anyone is working on Qemu / Libvirt patches to support the
>>> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?
>> But surely you have a patch that you wrote for testing this?
> Yes, but I want to build upon anything that may already be in progress.
> Also, I'm not a qemu expert so I wanted to be sure I had everything.
> For example, I can already see that the KVM support needs to add
> MSR_AMD64_VIRT_SPEC_CTRL to msrs_to_save in arch/x86/kvm/x86.c (I'll
> send a follow-up patch to the list to add that).
> 
>> Did you have in mind these two patches (inline), not tested, still compiling.
>>
>> If you can test them, I can sync up with Daniel P. Berrangé who is going
>> to post the Intel SSBD ones for libvirt+qemu on Monday.
> Yes, I'll review and test.  I have some initial comments below.

I got (Konrad's?) patches for QEMU via Dan, and they seem to be okay.

> I'll provide a patch back on top of these patches with any other updates.



^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-17 16:13       ` Tom Lendacky
  2018-05-17 16:17         ` Paolo Bonzini
@ 2018-05-17 16:18         ` Tom Lendacky
  1 sibling, 0 replies; 40+ messages in thread
From: Tom Lendacky @ 2018-05-17 16:18 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 8815 bytes --]



On 05/17/2018 11:13 AM, speck for Tom Lendacky wrote:
> On 05/16/2018 09:56 PM, speck for Konrad Rzeszutek Wilk wrote:
>> On Wed, May 16, 2018 at 04:13:57PM -0500, speck for Tom Lendacky wrote:
>>> On 5/16/2018 8:51 AM, speck for Thomas Gleixner wrote:
>>>> Subject: [patch 08/15] x86/speculation: Add virtualized speculative store bypass disable support
>>>> From: Tom Lendacky <thomas.lendacky@amd.com>
>>>>
>>>> Some AMD processors only support a non-architectural means of enabling
>>>> speculative store bypass disable (SSBD).  To allow a simplified view of
>>>> this to a guest, an architectural definition has been created through a new
>>>> CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
>>>> hypervisor can virtualize the existence of this definition and provide an
>>>> architectural method for using SSBD to a guest.
>>>>
>>>> Add the new CPUID feature, the new MSR and update the existing SSBD
>>>> support to use this MSR when present.
>>>
>>> Paolo (I'm assuming your on this list),
>>>
>>> Do you know if anyone is working on Qemu / Libvirt patches to support the
>>> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?
>>
>> But surely you have a patch that you wrote for testing this?
> 
> Yes, but I want to build upon anything that may already be in progress.
> Also, I'm not a qemu expert so I wanted to be sure I had everything.
> For example, I can already see that the KVM support needs to add
> MSR_AMD64_VIRT_SPEC_CTRL to msrs_to_save in arch/x86/kvm/x86.c (I'll
> send a follow-up patch to the list to add that).

I see Paolo has already done that.  Thanks Paolo!

Tom

> 
>>
>> Did you have in mind these two patches (inline), not tested, still compiling.
>>
>> If you can test them, I can sync up with Daniel P. Berrangé who is going
>> to post the Intel SSBD ones for libvirt+qemu on Monday.
> 
> Yes, I'll review and test.  I have some initial comments below.
> 
> I'll provide a patch back on top of these patches with any other updates.
> 
> Thanks,
> Tom
> 
>>
>>
>> >From ec2d1fd2814d33467eb110cb86cd2b5ecf224089 Mon Sep 17 00:00:00 2001
>> From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>> Date: Wed, 16 May 2018 21:37:31 -0400
>> Subject: [PATCH v1 1/2] i386: define the AMD 'virt-ssbd' CPUID feature bit
>>  (CVE-2018-3639)
>>
>> AMD Zen expose the Intel equivalant to Speculative Store Bypass Disable
>> via the 0x80000008_EBX[25] CPUID feature bit.
>>
>> This needs to be exposed to guest OS to allow them to protect
>> against CVE-2018-3639.
>>
>> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>> ---
>>  target/i386/cpu.c | 2 +-
>>  1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
>> index 06d8eee611..9d76cb4b1b 100644
>> --- a/target/i386/cpu.c
>> +++ b/target/i386/cpu.c
>> @@ -542,7 +542,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
>>              "ibpb", NULL, NULL, NULL,
>>              NULL, NULL, NULL, NULL,
>>              NULL, NULL, NULL, NULL,
>> -            NULL, NULL, NULL, NULL,
>> +            "virt-ssbd", NULL, NULL, NULL,
> 
> The virt-ssbd bit is bit 25, so this should be:
> 
>   NULL, "virt-ssbd", NULL, NULL,
> 
>>              NULL, NULL, NULL, NULL,
>>          },
>>          .cpuid_eax = 0x80000008,
>> -- 
>> 2.13.4
>>
>>
>> and
>>
>>
>>
>> >From 9a64a463b2479fdac914a33d8bda75393c00c145 Mon Sep 17 00:00:00 2001
>> From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>> Date: Wed, 16 May 2018 22:27:11 -0400
>> Subject: [PATCH 2/2] i386: Define the Virt SSBD MSR and handling of it.
>>
>> "Some AMD processors only support a non-architectural means of enabling
>> speculative store bypass disable (SSBD).  To allow a simplified view of
>> this to a guest, an architectural definition has been created through a new
>> CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f.  With this, a
>> hypervisor can virtualize the existence of this definition and provide an
>> architectural method for using SSBD to a guest.
>>
>> Add the new CPUID feature, the new MSR and update the existing SSBD
>> support to use this MSR when present." (from x86/speculation: Add virtualized
>> speculative store bypass disable support in Linux).
>>
>> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>> ---
>>  target/i386/cpu.h     |  2 ++
>>  target/i386/kvm.c     | 16 ++++++++++++++--
>>  target/i386/machine.c | 20 ++++++++++++++++++++
>>  3 files changed, 36 insertions(+), 2 deletions(-)
>>
>> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
>> index b9fe2efafe..c73fdd18c7 100644
>> --- a/target/i386/cpu.h
>> +++ b/target/i386/cpu.h
>> @@ -351,6 +351,7 @@ typedef enum X86Seg {
>>  #define MSR_IA32_FEATURE_CONTROL        0x0000003a
>>  #define MSR_TSC_ADJUST                  0x0000003b
>>  #define MSR_IA32_SPEC_CTRL              0x48
>> +#define MSR_VIRT_SSBD                   0xc001011f
> 
> I don't know if you want to match the kernel naming, but this is named
> MSR_AMD64_VIRT_SPEC_CTRL in the kernel.
> 
>>  #define MSR_IA32_TSCDEADLINE            0x6e0
>>  
>>  #define FEATURE_CONTROL_LOCKED                    (1<<0)
>> @@ -1150,6 +1151,7 @@ typedef struct CPUX86State {
>>      uint32_t pkru;
>>  
>>      uint64_t spec_ctrl;
>> +    uint64_t virt_ssbd;
> 
> Probably best to call this virt_spec_ctrl.
> 
>>  
>>      /* End of state preserved by INIT (dummy marker).  */
>>      struct {} end_init_save;
>> diff --git a/target/i386/kvm.c b/target/i386/kvm.c
>> index d6666a4b19..0c656a91a4 100644
>> --- a/target/i386/kvm.c
>> +++ b/target/i386/kvm.c
>> @@ -93,6 +93,7 @@ static bool has_msr_hv_frequencies;
>>  static bool has_msr_hv_reenlightenment;
>>  static bool has_msr_xss;
>>  static bool has_msr_spec_ctrl;
>> +static bool has_msr_virt_ssbd;
>>  static bool has_msr_smi_count;
>>  
>>  static uint32_t has_architectural_pmu_version;
>> @@ -1233,6 +1234,9 @@ static int kvm_get_supported_msrs(KVMState *s)
>>                  case MSR_IA32_SPEC_CTRL:
>>                      has_msr_spec_ctrl = true;
>>                      break;
>> +                case MSR_VIRT_SSBD:
>> +                    has_msr_virt_ssbd = true;
>> +                    break;
>>                  }
>>              }
>>          }
>> @@ -1721,6 +1725,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
>>      if (has_msr_spec_ctrl) {
>>          kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
>>      }
>> +    if (has_msr_virt_ssbd) {
>> +        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
>> +    }
>> +
>>  #ifdef TARGET_X86_64
>>      if (lm_capable_kernel) {
>>          kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
>> @@ -2100,8 +2108,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>>      if (has_msr_spec_ctrl) {
>>          kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
>>      }
>> -
>> -
>> +    if (has_msr_virt_ssbd) {
>> +        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
>> +    }
>>      if (!env->tsc_valid) {
>>          kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
>>          env->tsc_valid = !runstate_is_running();
>> @@ -2481,6 +2490,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>>          case MSR_IA32_SPEC_CTRL:
>>              env->spec_ctrl = msrs[i].data;
>>              break;
>> +        case MSR_VIRT_SSBD:
>> +            env->virt_ssbd = msrs[i].data;
>> +            break;
>>          case MSR_IA32_RTIT_CTL:
>>              env->msr_rtit_ctrl = msrs[i].data;
>>              break;
>> diff --git a/target/i386/machine.c b/target/i386/machine.c
>> index fd99c0bbb4..4d98d367c1 100644
>> --- a/target/i386/machine.c
>> +++ b/target/i386/machine.c
>> @@ -916,6 +916,25 @@ static const VMStateDescription vmstate_msr_intel_pt = {
>>      }
>>  };
>>  
>> +static bool virt_ssbd_needed(void *opaque)
>> +{
>> +    X86CPU *cpu = opaque;
>> +    CPUX86State *env = &cpu->env;
>> +
>> +    return env->virt_ssbd != 0;
>> +}
>> +
>> +static const VMStateDescription vmstate_msr_virt_ssbd = {
>> +    .name = "cpu/virt_ssbd",
>> +    .version_id = 1,
>> +    .minimum_version_id = 1,
>> +    .needed = virt_ssbd_needed,
>> +    .fields = (VMStateField[]){
>> +        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
>> +        VMSTATE_END_OF_LIST()
>> +    }
>> +};
>> +
>>  VMStateDescription vmstate_x86_cpu = {
>>      .name = "cpu",
>>      .version_id = 12,
>> @@ -1039,6 +1058,7 @@ VMStateDescription vmstate_x86_cpu = {
>>          &vmstate_spec_ctrl,
>>          &vmstate_mcg_ext_ctl,
>>          &vmstate_msr_intel_pt,
>> +        &vmstate_msr_virt_ssbd,
>>          NULL
>>      }
>>  };
>> -- 
>> 2.13.4
>>
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-17 16:17         ` Paolo Bonzini
@ 2018-05-17 16:23           ` Konrad Rzeszutek Wilk
  2018-05-17 21:25           ` Tom Lendacky
  1 sibling, 0 replies; 40+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-05-17 16:23 UTC (permalink / raw)
  To: speck

On Thu, May 17, 2018 at 06:17:53PM +0200, speck for Paolo Bonzini wrote:
> On 17/05/2018 18:13, speck for Tom Lendacky wrote:
> >>> Paolo (I'm assuming your on this list),
> >>>
> >>> Do you know if anyone is working on Qemu / Libvirt patches to support the
> >>> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?
> >> But surely you have a patch that you wrote for testing this?
> > Yes, but I want to build upon anything that may already be in progress.
> > Also, I'm not a qemu expert so I wanted to be sure I had everything.
> > For example, I can already see that the KVM support needs to add
> > MSR_AMD64_VIRT_SPEC_CTRL to msrs_to_save in arch/x86/kvm/x86.c (I'll
> > send a follow-up patch to the list to add that).
> > 
> >> Did you have in mind these two patches (inline), not tested, still compiling.
> >>
> >> If you can test them, I can sync up with Daniel P. Berrangé who is going
> >> to post the Intel SSBD ones for libvirt+qemu on Monday.
> > Yes, I'll review and test.  I have some initial comments below.
> 
> I got (Konrad's?) patches for QEMU via Dan, and they seem to be okay.

Did my SoB/authorship get lost?

I will fix them up and send them to Dan, John, and you
per Tom's commnents.

> 
> > I'll provide a patch back on top of these patches with any other updates.
> 
> 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [MODERATED] Re: [patch 08/15] SSB updates V17 8
  2018-05-17 16:17         ` Paolo Bonzini
  2018-05-17 16:23           ` Konrad Rzeszutek Wilk
@ 2018-05-17 21:25           ` Tom Lendacky
  1 sibling, 0 replies; 40+ messages in thread
From: Tom Lendacky @ 2018-05-17 21:25 UTC (permalink / raw)
  To: speck

[-- Attachment #1: Type: text/plain, Size: 13864 bytes --]

On 05/17/2018 11:17 AM, speck for Paolo Bonzini wrote:
> On 17/05/2018 18:13, speck for Tom Lendacky wrote:
>>>> Paolo (I'm assuming your on this list),
>>>>
>>>> Do you know if anyone is working on Qemu / Libvirt patches to support the
>>>> new AMD VIRT_SSBD feature (new CPU definitions, etc.)?
>>> But surely you have a patch that you wrote for testing this?
>> Yes, but I want to build upon anything that may already be in progress.
>> Also, I'm not a qemu expert so I wanted to be sure I had everything.
>> For example, I can already see that the KVM support needs to add
>> MSR_AMD64_VIRT_SPEC_CTRL to msrs_to_save in arch/x86/kvm/x86.c (I'll
>> send a follow-up patch to the list to add that).
>>
>>> Did you have in mind these two patches (inline), not tested, still compiling.
>>>
>>> If you can test them, I can sync up with Daniel P. Berrangé who is going
>>> to post the Intel SSBD ones for libvirt+qemu on Monday.
>> Yes, I'll review and test.  I have some initial comments below.
> 
> I got (Konrad's?) patches for QEMU via Dan, and they seem to be okay.

Three Qemu patches below.  First patch is to fix the bit position of
VIRT_SSBD (although I think Konrad may have already made that change).
Also two patches for updating the CPU models for SSBD, including a
pre-req patch to add IBPB to the fam15h models (I'm not completely sure
these are needed, but wanted to supply them just in case).

Thanks,
Tom

i386: Fix the virtual SSBD bit position definition
From: Tom Lendacky <thomas.lendacky@amd.com>

The virtual SSBD CPUID bit position is bit 25, not 24.  Update the
FEAT_8000_0008_EBX definition.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 target/i386/cpu.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 80202ab..a1a9df7 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -836,7 +836,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
             "ibpb", NULL, NULL, NULL,
             NULL, NULL, NULL, NULL,
             NULL, NULL, NULL, NULL,
-            "virt-ssbd", NULL, NULL, NULL,
+            NULL, "virt-ssbd", NULL, NULL,
             NULL, NULL, NULL, NULL,
         },
         .cpuid_eax = 0x80000008,
---

and

i386: Add AMD family 0x15 IBPB CPU models
From: Tom Lendacky <thomas.lendacky@amd.com>

Create -IBPB versions of existing family 0x15 CPU models with the addition
of CPUID_8000_0008_EBX_IBPB.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 target/i386/cpu.c |   66 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 66 insertions(+)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index a1a9df7..e56df4c 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -2326,6 +2326,39 @@ static X86CPUDefinition builtin_x86_defs[] = {
         .model_id = "AMD Opteron 62xx class CPU",
     },
     {
+        .name = "Opteron_G4-IBPB",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 1,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+            CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_8000_0008_EBX] =
+            CPUID_8000_0008_EBX_IBPB,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 62xx class CPU (with IBPB)",
+    },
+    {
         .name = "Opteron_G5",
         .level = 0xd,
         .vendor = CPUID_VENDOR_AMD,
@@ -2357,6 +2390,39 @@ static X86CPUDefinition builtin_x86_defs[] = {
         .model_id = "AMD Opteron 63xx class CPU",
     },
     {
+        .name = "Opteron_G5-IBPB",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 2,
+        .stepping = 0,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+            CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+            CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_8000_0008_EBX] =
+            CPUID_8000_0008_EBX_IBPB,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 63xx class CPU (with IBPB)",
+    },
+    {
         .name = "EPYC",
         .level = 0xd,
         .vendor = CPUID_VENDOR_AMD,
---

and

i386: Add virtual SSBD CPU definitions for AMD processors
From: Tom Lendacky <thomas.lendacky@amd.com>

Add the virtual SSBD CPUID feature to AMD processor definitions that
will support this - namely the family 0x15 and 0x17 models (family
0x16 would also be valid, but there are no defined models of that
family).

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 target/i386/cpu.c |  113 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 target/i386/cpu.h |    3 +
 2 files changed, 115 insertions(+), 1 deletion(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index e56df4c..b43d731 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -2359,6 +2359,39 @@ static X86CPUDefinition builtin_x86_defs[] = {
         .model_id = "AMD Opteron 62xx class CPU (with IBPB)",
     },
     {
+        .name = "Opteron_G4-vSSBD",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 1,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+            CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+            CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_8000_0008_EBX] =
+            CPUID_8000_0008_EBX_IBPB | CPUID_8000_0008_EBX_VIRT_SSBD,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 62xx class CPU (with IBPB and vSSBD)",
+    },
+    {
         .name = "Opteron_G5",
         .level = 0xd,
         .vendor = CPUID_VENDOR_AMD,
@@ -2423,6 +2456,39 @@ static X86CPUDefinition builtin_x86_defs[] = {
         .model_id = "AMD Opteron 63xx class CPU (with IBPB)",
     },
     {
+        .name = "Opteron_G5-vSSBD",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 21,
+        .model = 2,
+        .stepping = 0,
+        .features[FEAT_1_EDX] =
+            CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+            CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+            CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+            CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+            CPUID_DE | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+            CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+            CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+            CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+        /* Missing: CPUID_EXT2_RDTSCP */
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+            CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+            CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+            CPUID_EXT3_LAHF_LM,
+        .features[FEAT_8000_0008_EBX] =
+            CPUID_8000_0008_EBX_IBPB | CPUID_8000_0008_EBX_VIRT_SSBD,
+        /* no xsaveopt! */
+        .xlevel = 0x8000001A,
+        .model_id = "AMD Opteron 63xx class CPU (with IBPB and vSSBD)",
+    },
+    {
         .name = "EPYC",
         .level = 0xd,
         .vendor = CPUID_VENDOR_AMD,
@@ -2514,6 +2580,53 @@ static X86CPUDefinition builtin_x86_defs[] = {
         .model_id = "AMD EPYC Processor (with IBPB)",
         .cache_info = &epyc_cache_info,
     },
+    {
+        .name = "EPYC-vSSBD",
+        .level = 0xd,
+        .vendor = CPUID_VENDOR_AMD,
+        .family = 23,
+        .model = 1,
+        .stepping = 2,
+        .features[FEAT_1_EDX] =
+            CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+            CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+            CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+            CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+            CPUID_VME | CPUID_FP87,
+        .features[FEAT_1_ECX] =
+            CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+            CPUID_EXT_XSAVE | CPUID_EXT_AES |  CPUID_EXT_POPCNT |
+            CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+            CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+            CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+        .features[FEAT_8000_0001_EDX] =
+            CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+            CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+            CPUID_EXT2_SYSCALL,
+        .features[FEAT_8000_0001_ECX] =
+            CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+            CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+            CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+        .features[FEAT_8000_0008_EBX] =
+            CPUID_8000_0008_EBX_IBPB | CPUID_8000_0008_EBX_VIRT_SSBD,
+        .features[FEAT_7_0_EBX] =
+            CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+            CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
+            CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+            CPUID_7_0_EBX_SHA_NI,
+        /* Missing: XSAVES (not supported by some Linux versions,
+         * including v4.1 to v4.12).
+         * KVM doesn't yet expose any XSAVES state save component.
+         */
+        .features[FEAT_XSAVE] =
+            CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+            CPUID_XSAVE_XGETBV1,
+        .features[FEAT_6_EAX] =
+            CPUID_6_EAX_ARAT,
+        .xlevel = 0x8000000A,
+        .model_id = "AMD EPYC Processor (with IBPB and vSSBD)",
+        .cache_info = &epyc_cache_info,
+    },
 };
 
 typedef struct PropValue {
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 02825fa..45b7a33 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -689,7 +689,8 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
 
 #define KVM_HINTS_DEDICATED (1U << 0)
 
-#define CPUID_8000_0008_EBX_IBPB    (1U << 12) /* Indirect Branch Prediction Barrier */
+#define CPUID_8000_0008_EBX_IBPB      (1U << 12) /* Indirect Branch Prediction Barrier */
+#define CPUID_8000_0008_EBX_VIRT_SSBD (1U << 25) /* Virtual Speculative Store Bypass Disable */
 
 #define CPUID_XSAVE_XSAVEOPT   (1U << 0)
 #define CPUID_XSAVE_XSAVEC     (1U << 1)

> 
>> I'll provide a patch back on top of these patches with any other updates.
> 
> 


^ permalink raw reply related	[flat|nested] 40+ messages in thread

end of thread, other threads:[~2018-05-17 21:25 UTC | newest]

Thread overview: 40+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-16 13:51 [patch 00/15] SSB updates V17 0 Thomas Gleixner
2018-05-16 13:51 ` [patch 01/15] SSB updates V17 1 Thomas Gleixner
2018-05-16 13:51 ` [patch 02/15] SSB updates V17 2 Thomas Gleixner
2018-05-16 14:29   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 03/15] SSB updates V17 3 Thomas Gleixner
2018-05-17  1:06   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 04/15] SSB updates V17 4 Thomas Gleixner
2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 05/15] SSB updates V17 5 Thomas Gleixner
2018-05-17  1:14   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 06/15] SSB updates V17 6 Thomas Gleixner
2018-05-17  1:28   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 07/15] SSB updates V17 7 Thomas Gleixner
2018-05-17  1:29   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 08/15] SSB updates V17 8 Thomas Gleixner
2018-05-16 21:13   ` [MODERATED] " Tom Lendacky
2018-05-17  2:56     ` Konrad Rzeszutek Wilk
2018-05-17 16:13       ` Tom Lendacky
2018-05-17 16:17         ` Paolo Bonzini
2018-05-17 16:23           ` Konrad Rzeszutek Wilk
2018-05-17 21:25           ` Tom Lendacky
2018-05-17 16:18         ` Tom Lendacky
2018-05-16 13:51 ` [patch 09/15] SSB updates V17 9 Thomas Gleixner
2018-05-17  1:40   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 10/15] SSB updates V17 10 Thomas Gleixner
2018-05-17  1:43   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 11/15] SSB updates V17 11 Thomas Gleixner
2018-05-17  1:45   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-16 13:51 ` [patch 12/15] SSB updates V17 12 Thomas Gleixner
2018-05-16 13:51 ` [patch 13/15] SSB updates V17 13 Thomas Gleixner
2018-05-17  2:08   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-17  8:45     ` Thomas Gleixner
2018-05-16 13:51 ` [patch 14/15] SSB updates V17 14 Thomas Gleixner
2018-05-16 16:34   ` [MODERATED] " Tom Lendacky
2018-05-16 21:26     ` Thomas Gleixner
2018-05-16 13:51 ` [patch 15/15] SSB updates V17 15 Thomas Gleixner
2018-05-17  2:18   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-17 12:42     ` Paolo Bonzini
2018-05-17 15:09       ` Thomas Gleixner
2018-05-16 14:09 ` [patch 00/15] SSB updates V17 0 Thomas Gleixner

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.