All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest
@ 2018-01-09 12:03 Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors Paolo Bonzini
                   ` (8 more replies)
  0 siblings, 9 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

This series allows guests to use the MSR_IA32_SPEC_CTRL and
MSR_IA32_PRED_CMD model specific registers that were added as mitigations
for CVE-2017-5715.

These are only the KVM specific parts of the fix.  It does *not* yet
include any protection for reading host memory from the guest, because
that would be done in the same way as the rest of Linux.  So there is no
IBRS *usage* here, no retpolines, no stuffing of the return stack buffer.
(KVM already includes a fix to clear all registers on vmexit, which is
enough to block Google Project Zero's PoC exploit).

However, I am including the changes to use IBPB (indirect branch
predictor barrier) if available.  That occurs only when there is a VCPU
switch on a physical CPU, thus it has a small impact on performance.

The patches are a bit hackish because the relevant cpufeatures have
not been included yet, and because I wanted to make the patches easier
to backport to distro kernels if desired, but I would still like to
have them in 4.16.

Please review.  The interdiff from v1 is at the end of this cover letter.

Thanks,

Paolo

Paolo Bonzini (6):
  KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors
  x86/msr: add definitions for indirect branch predictor MSRs
  kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
  KVM: SVM: fix comment
  kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists

Tim Chen (1):
  kvm: vmx: Set IBPB when running a different VCPU

Tom Lendacky (1):
  x86/svm: Set IBPB when running a different VCPU

 arch/x86/include/asm/msr-index.h |  9 ++++-
 arch/x86/kvm/cpuid.c             | 27 ++++++++++---
 arch/x86/kvm/cpuid.h             | 22 +++++++++++
 arch/x86/kvm/svm.c               | 83 +++++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/vmx.c               | 59 ++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c               |  1 +
 6 files changed, 193 insertions(+), 8 deletions(-)


diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ec08f1d8d39b..828a03425571 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -39,11 +39,6 @@
 
 /* Intel MSRs. Some also available on other CPUs */
 
-#define MSR_IA32_SPEC_CTRL		0x00000048
-
-#define MSR_IA32_PRED_CMD		0x00000049
-#define FEATURE_SET_IBPB		(1UL << 0)
-
 #define MSR_PPIN_CTL			0x0000004e
 #define MSR_PPIN			0x0000004f
 
@@ -469,8 +464,15 @@
 #define MSR_SMI_COUNT			0x00000034
 #define MSR_IA32_FEATURE_CONTROL        0x0000003a
 #define MSR_IA32_TSC_ADJUST             0x0000003b
+
+#define MSR_IA32_SPEC_CTRL		0x00000048
+#define SPEC_CTRL_FEATURE_DISABLE_IBRS	(0 << 0)
+#define SPEC_CTRL_FEATURE_ENABLE_IBRS	(1 << 0)
+
+#define MSR_IA32_PRED_CMD		0x00000049
+#define PRED_CMD_IBPB			(1UL << 0)
+
 #define MSR_IA32_BNDCFGS		0x00000d90
-
 #define MSR_IA32_BNDCFGS_RSVD		0x00000ffc
 
 #define MSR_IA32_XSS			0x00000da0
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index dd744d896cec..c249d5f599e4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1738,7 +1738,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	 * svm_vcpu_load; block speculative execution.
 	 */
 	if (have_ibpb_support)
-		wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1776,7 +1776,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (sd->current_vmcb != svm->vmcb) {
 		sd->current_vmcb = svm->vmcb;
 		if (have_ibpb_support)
-			wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 	}
 
 	avic_vcpu_load(vcpu, cpu);
@@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = svm->nested.vm_cr_msr;
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+	    	    (!msr_info->host_initiated &&
+       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		msr_info->data = svm->spec_ctrl;
 		break;
 	case MSR_IA32_UCODE_REV:
@@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+	    	    (!msr_info->host_initiated &&
+       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		svm->spec_ctrl = data;
 		break;
 	case MSR_IA32_APICBASE:
@@ -4996,6 +5004,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	local_irq_enable();
 
+	/*
+	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
+	 * before vmentry.
+	 */
 	if (have_spec_ctrl && svm->spec_ctrl != 0)
 		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
@@ -5077,6 +5089,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 		if (svm->spec_ctrl != 0)
 			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 	}
+	/*
+	 * Speculative execution past the above wrmsrl might encounter
+	 * an indirect branch and use guest-controlled contents of the
+	 * indirect branch predictor; block it.
+	 */
+	asm("lfence");
 
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bf127c570675..ef2681fa568a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2376,7 +2376,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
 		vmcs_load(vmx->loaded_vmcs->vmcs);
 		if (have_spec_ctrl)
-			wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 	}
 
 	if (!already_loaded) {
@@ -3347,6 +3347,7 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
  */
 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct shared_msr_entry *msr;
 
 	switch (msr_info->index) {
@@ -3358,8 +3359,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = vmcs_readl(GUEST_GS_BASE);
 		break;
 	case MSR_KERNEL_GS_BASE:
-		vmx_load_host_state(to_vmx(vcpu));
-		msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+		vmx_load_host_state(vmx);
+		msr_info->data = vmx->msr_guest_kernel_gs_base;
 		break;
 #endif
 	case MSR_EFER:
@@ -3368,7 +3369,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = guest_read_tsc(vcpu);
 		break;
 	case MSR_IA32_SPEC_CTRL:
-		msr_info->data = to_vmx(vcpu)->spec_ctrl;
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
+		msr_info->data = to_vmx(vcpu)->spec_ctrl;
 		break;
 	case MSR_IA32_SYSENTER_CS:
 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
@@ -3388,13 +3393,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_IA32_MCG_EXT_CTL:
 		if (!msr_info->host_initiated &&
-		    !(to_vmx(vcpu)->msr_ia32_feature_control &
+		    !(vmx->msr_ia32_feature_control &
 		      FEATURE_CONTROL_LMCE))
 			return 1;
 		msr_info->data = vcpu->arch.mcg_ext_ctl;
 		break;
 	case MSR_IA32_FEATURE_CONTROL:
-		msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
+		msr_info->data = vmx->msr_ia32_feature_control;
 		break;
 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
 		if (!nested_vmx_allowed(vcpu))
@@ -3443,7 +3448,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		/* Otherwise falls through */
 	default:
-		msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
+		msr = find_msr_entry(vmx, msr_info->index);
 		if (msr) {
 			msr_info->data = msr->data;
 			break;
@@ -3510,7 +3515,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		kvm_write_tsc(vcpu, msr_info);
 		break;
 	case MSR_IA32_SPEC_CTRL:
-		to_vmx(vcpu)->spec_ctrl = msr_info->data;
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
+		to_vmx(vcpu)->spec_ctrl = data;
 		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -4046,7 +4046,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
 	 * vmx_vcpu_load; block speculative execution.
 	 */
 	if (have_spec_ctrl)
-		wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 }
 
 static void vmx_nested_free_vmcs02(struct vcpu_vmx *vmx)
@@ -9629,13 +9638,17 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	pt_guest_enter(vmx);
 
-	if (have_spec_ctrl && vmx->spec_ctrl != 0)
-		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
-
 	atomic_switch_perf_msrs(vmx);
 
 	vmx_arm_hv_timer(vcpu);
 
+	/*
+	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
+	 * before vmentry.
+	 */
+	if (have_spec_ctrl && vmx->spec_ctrl != 0)
+		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
 		/* Store host registers */
@@ -9744,9 +9757,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	if (have_spec_ctrl) {
 		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
-		if (vmx->spec_ctrl)
+		if (vmx->spec_ctrl != 0)
 			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 	}
+	/*
+	 * Speculative execution past the above wrmsrl might encounter
+	 * an indirect branch and use guest-controlled contents of the
+	 * indirect branch predictor; block it.
+	 */
+	asm("lfence");
 
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (vmx->host_debugctlmsr)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-15  9:42   ` David Hildenbrand
  2018-01-09 12:03 ` [PATCH 2/8] x86/msr: add definitions for indirect branch predictor MSRs Paolo Bonzini
                   ` (7 subsequent siblings)
  8 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

As an interim measure until SPEC_CTRL is supported by upstream
Linux in cpufeatures, add a function that lets vmx.c and svm.c
know whether to save/restore MSR_IA32_SPEC_CTRL.

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/cpuid.c |  3 ---
 arch/x86/kvm/cpuid.h | 22 ++++++++++++++++++++++
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8e9a07c557f1..767af697c20c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -67,9 +67,6 @@ u64 kvm_supported_xcr0(void)
 
 #define F(x) bit(X86_FEATURE_##x)
 
-/* These are scattered features in cpufeatures.h. */
-#define KVM_CPUID_BIT_AVX512_4VNNIW     2
-#define KVM_CPUID_BIT_AVX512_4FMAPS     3
 #define KF(x) bit(KVM_CPUID_BIT_##x)
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c2cea6651279..8d04ccf177ce 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -155,6 +155,28 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
 	return x86_stepping(best->eax);
 }
 
+/* These are scattered features in cpufeatures.h. */
+#define KVM_CPUID_BIT_AVX512_4VNNIW	2
+#define KVM_CPUID_BIT_AVX512_4FMAPS	3
+#define KVM_CPUID_BIT_SPEC_CTRL		26
+#define KVM_CPUID_BIT_STIBP		27
+
+/* CPUID[eax=0x80000008].ebx */
+#define KVM_CPUID_BIT_IBPB_SUPPORT	12
+
+static inline bool cpu_has_spec_ctrl(void)
+{
+	u32 eax, ebx, ecx, edx;
+	cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
+
+	return edx & bit(KVM_CPUID_BIT_SPEC_CTRL);
+}
+
+static inline bool cpu_has_ibpb_support(void)
+{
+	return cpuid_ebx(0x80000008) & bit(KVM_CPUID_BIT_IBPB_SUPPORT);
+}
+
 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 2/8] x86/msr: add definitions for indirect branch predictor MSRs
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest Paolo Bonzini
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

These MSRs are available if the CPU features SPEC_CTRL
(CPUID(EAX=7,ECX=0).EDX[26]) is present.  The PRED_CMD MSR
is also available if the CPU feature IBPB_SUPPORT
(CPUID(EAX=0x80000008).EBX[12]) is present.

KVM will soon start using PRED_CMD and will make SPEC_CTRL
available to guests.

Reviewed-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/msr-index.h | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 03ffde6217d0..828a03425571 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -464,8 +464,15 @@
 #define MSR_SMI_COUNT			0x00000034
 #define MSR_IA32_FEATURE_CONTROL        0x0000003a
 #define MSR_IA32_TSC_ADJUST             0x0000003b
-#define MSR_IA32_BNDCFGS		0x00000d90
 
+#define MSR_IA32_SPEC_CTRL		0x00000048
+#define SPEC_CTRL_FEATURE_DISABLE_IBRS	(0 << 0)
+#define SPEC_CTRL_FEATURE_ENABLE_IBRS	(1 << 0)
+
+#define MSR_IA32_PRED_CMD		0x00000049
+#define PRED_CMD_IBPB			(1UL << 0)
+
+#define MSR_IA32_BNDCFGS		0x00000d90
 #define MSR_IA32_BNDCFGS_RSVD		0x00000ffc
 
 #define MSR_IA32_XSS			0x00000da0
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 2/8] x86/msr: add definitions for indirect branch predictor MSRs Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-13 10:16   ` Longpeng (Mike)
       [not found]   ` <1515839272.22302.520.camel@amazon.co.uk>
  2018-01-09 12:03 ` [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU Paolo Bonzini
                   ` (5 subsequent siblings)
  8 siblings, 2 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
it yet).

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 669f5f74857d..ef603692aa98 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -120,6 +120,8 @@
 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #endif
 
+static bool __read_mostly have_spec_ctrl;
+
 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
 #define KVM_VM_CR0_ALWAYS_ON						\
@@ -609,6 +611,8 @@ struct vcpu_vmx {
 	u64 		      msr_host_kernel_gs_base;
 	u64 		      msr_guest_kernel_gs_base;
 #endif
+	u64		      spec_ctrl;
+
 	u32 vm_entry_controls_shadow;
 	u32 vm_exit_controls_shadow;
 	u32 secondary_exec_control;
@@ -3361,6 +3365,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_TSC:
 		msr_info->data = guest_read_tsc(vcpu);
 		break;
+	case MSR_IA32_SPEC_CTRL:
+		msr_info->data = to_vmx(vcpu)->spec_ctrl;
+		break;
 	case MSR_IA32_SYSENTER_CS:
 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
 		break;
@@ -3500,6 +3507,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_TSC:
 		kvm_write_tsc(vcpu, msr_info);
 		break;
+	case MSR_IA32_SPEC_CTRL:
+		to_vmx(vcpu)->spec_ctrl = data;
+		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
 			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -7062,6 +7072,17 @@ static __init int hardware_setup(void)
 		goto out;
 	}
 
+	/*
+	 * FIXME: this is only needed until SPEC_CTRL is supported
+	 * by upstream Linux in cpufeatures, then it can be replaced
+	 * with static_cpu_has.
+	 */
+	have_spec_ctrl = cpu_has_spec_ctrl();
+	if (have_spec_ctrl)
+		pr_info("kvm: SPEC_CTRL available\n");
+	else
+		pr_info("kvm: SPEC_CTRL not available\n");
+
 	if (boot_cpu_has(X86_FEATURE_NX))
 		kvm_enable_efer_bits(EFER_NX);
 
@@ -7131,6 +7152,8 @@ static __init int hardware_setup(void)
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+	vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
+	vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);
 
 	memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
 			vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -9601,6 +9624,13 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	vmx_arm_hv_timer(vcpu);
 
+	/*
+	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
+	 * before vmentry.
+	 */
+	if (have_spec_ctrl && vmx->spec_ctrl != 0)
+		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
 		/* Store host registers */
@@ -9707,6 +9737,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 	      );
 
+	if (have_spec_ctrl) {
+		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+		if (vmx->spec_ctrl != 0)
+			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+	}
+	/*
+	 * Speculative execution past the above wrmsrl might encounter
+	 * an indirect branch and use guest-controlled contents of the
+	 * indirect branch predictor; block it.
+	 */
+	asm("lfence");
+
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (vmx->host_debugctlmsr)
 		update_debugctlmsr(vmx->host_debugctlmsr);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (2 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-12  1:49   ` Wanpeng Li
  2018-01-09 12:03 ` [PATCH 5/8] KVM: SVM: fix comment Paolo Bonzini
                   ` (4 subsequent siblings)
  8 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw,
	bp, x86, Tim Chen

From: Tim Chen <tim.c.chen@linux.intel.com>

Ensure an IBPB (Indirect branch prediction barrier) before every VCPU
switch.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ef603692aa98..49b4a2d61603 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2375,6 +2375,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
 		vmcs_load(vmx->loaded_vmcs->vmcs);
+		if (have_spec_ctrl)
+			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 	}
 
 	if (!already_loaded) {
@@ -4029,6 +4031,13 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
 	free_vmcs(loaded_vmcs->vmcs);
 	loaded_vmcs->vmcs = NULL;
 	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
+
+	/*
+	 * The VMCS could be recycled, causing a false negative in
+	 * vmx_vcpu_load; block speculative execution.
+	 */
+	if (have_spec_ctrl)
+		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 }
 
 static void vmx_nested_free_vmcs02(struct vcpu_vmx *vmx)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 5/8] KVM: SVM: fix comment
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (3 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-15  9:53   ` David Hildenbrand
  2018-01-09 12:03 ` [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest Paolo Bonzini
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

If always==true, then read/write bits are cleared from
the MSR permission bitmap, thus passing-through the MSR.
Fix the comment to match reality.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 55dde3896898..31ace8d7774a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -236,7 +236,7 @@ struct amd_svm_iommu_ir {
 
 static const struct svm_direct_access_msrs {
 	u32 index;   /* Index of the MSR */
-	bool always; /* True if intercept is always on */
+	bool always; /* True if intercept is always off */
 } direct_access_msrs[] = {
 	{ .index = MSR_STAR,				.always = true  },
 	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (4 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 5/8] KVM: SVM: fix comment Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-09 14:22   ` Konrad Rzeszutek Wilk
  2018-01-10 20:13   ` Tom Lendacky
  2018-01-09 12:03 ` [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU Paolo Bonzini
                   ` (2 subsequent siblings)
  8 siblings, 2 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
it yet).

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 31ace8d7774a..934a21e02e03 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -183,6 +183,8 @@ struct vcpu_svm {
 		u64 gs_base;
 	} host;
 
+	u64 spec_ctrl;
+
 	u32 *msrpm;
 
 	ulong nmi_iret_rip;
@@ -248,6 +250,8 @@ struct amd_svm_iommu_ir {
 	{ .index = MSR_CSTAR,				.always = true  },
 	{ .index = MSR_SYSCALL_MASK,			.always = true  },
 #endif
+	{ .index = MSR_IA32_SPEC_CTRL,			.always = true  },
+	{ .index = MSR_IA32_PRED_CMD,			.always = true  },
 	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
 	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
 	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
@@ -283,6 +287,8 @@ struct amd_svm_iommu_ir {
 /* enable/disable Virtual GIF */
 static int vgif = true;
 module_param(vgif, int, 0444);
+ 
+static bool __read_mostly have_spec_ctrl;
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
@@ -1135,6 +1141,17 @@ static __init int svm_hardware_setup(void)
 			pr_info("Virtual GIF supported\n");
 	}
 
+	/*
+	 * FIXME: this is only needed until SPEC_CTRL is supported
+	 * by upstream Linux in cpufeatures, then it can be replaced
+	 * with static_cpu_has.
+	 */
+	have_spec_ctrl = cpu_has_spec_ctrl();
+	if (have_spec_ctrl)
+		pr_info("kvm: SPEC_CTRL available\n");
+	else
+		pr_info("kvm: SPEC_CTRL not available\n");
+
 	return 0;
 
 err:
@@ -3599,6 +3616,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_VM_CR:
 		msr_info->data = svm->nested.vm_cr_msr;
 		break;
+	case MSR_IA32_SPEC_CTRL:
+		msr_info->data = svm->spec_ctrl;
+		break;
 	case MSR_IA32_UCODE_REV:
 		msr_info->data = 0x01000065;
 		break;
@@ -3754,6 +3774,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 	case MSR_VM_IGNNE:
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
+	case MSR_IA32_SPEC_CTRL:
+		svm->spec_ctrl = data;
+		break;
 	case MSR_IA32_APICBASE:
 		if (kvm_vcpu_apicv_active(vcpu))
 			avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4942,6 +4965,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	local_irq_enable();
 
+	/*
+	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
+	 * before vmentry.
+	 */
+	if (have_spec_ctrl && svm->spec_ctrl != 0)
+		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
 		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 		);
 
+	if (have_spec_ctrl) {
+		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+		if (svm->spec_ctrl != 0)
+			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+	}
+	/*
+	 * Speculative execution past the above wrmsrl might encounter
+	 * an indirect branch and use guest-controlled contents of the
+	 * indirect branch predictor; block it.
+	 */
+	asm("lfence");
+
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (5 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-09 14:23   ` Konrad Rzeszutek Wilk
  2018-01-09 12:03 ` [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists Paolo Bonzini
  2018-01-09 12:03 ` [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability Paolo Bonzini
  8 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

From: Tom Lendacky <thomas.lendacky@amd.com>

Set IBPB (Indirect Branch Prediction Barrier) when the current CPU is
going to run a VCPU different from what was previously run.  Nested
virtualization uses the same VMCB for the second level guest, but the
L1 hypervisor should be using IBRS to protect itself.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 934a21e02e03..97126c2bd663 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -289,6 +289,7 @@ struct amd_svm_iommu_ir {
 module_param(vgif, int, 0444);
  
 static bool __read_mostly have_spec_ctrl;
+static bool __read_mostly have_ibpb_support;
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
@@ -540,6 +541,7 @@ struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+	struct vmcb *current_vmcb;
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -1151,6 +1153,11 @@ static __init int svm_hardware_setup(void)
 		pr_info("kvm: SPEC_CTRL available\n");
 	else
 		pr_info("kvm: SPEC_CTRL not available\n");
+	have_ibpb_support = have_spec_ctrl || cpu_has_ibpb_support();
+	if (have_ibpb_support)
+		pr_info("kvm: IBPB_SUPPORT available\n");
+	else
+		pr_info("kvm: IBPB_SUPPORT not available\n");
 
 	return 0;
 
@@ -1725,11 +1732,19 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
+
+	/*
+	 * The VMCB could be recycled, causing a false negative in
+	 * svm_vcpu_load; block speculative execution.
+	 */
+	if (have_ibpb_support)
+		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 	int i;
 
 	if (unlikely(cpu != vcpu->cpu)) {
@@ -1758,6 +1773,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (static_cpu_has(X86_FEATURE_RDTSCP))
 		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
 
+	if (sd->current_vmcb != svm->vmcb) {
+		sd->current_vmcb = svm->vmcb;
+		if (have_ibpb_support)
+			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+	}
+
 	avic_vcpu_load(vcpu, cpu);
 }
 
@@ -2798,6 +2819,11 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 	if (!nested_vmcb)
 		return 1;
 
+	/*
+	 * No need for IBPB here, the L1 hypervisor should be running with
+	 * IBRS=1 and inserts one already when switching L2 VMs.
+	 */
+
 	/* Exit Guest-Mode */
 	leave_guest_mode(&svm->vcpu);
 	svm->nested.vmcb = 0;
@@ -3061,6 +3087,11 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 	if (!nested_vmcb)
 		return false;
 
+	/*
+	 * No need for IBPB here, since the nested VM is less privileged.  The
+	 * L1 hypervisor inserts one already when switching L2 VMs.
+	 */
+
 	if (!nested_vmcb_checks(nested_vmcb)) {
 		nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
 		nested_vmcb->control.exit_code_hi = 0;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (6 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-13  1:25   ` Eric Wheeler
  2018-01-09 12:03 ` [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability Paolo Bonzini
  8 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

Expose them to userspace, now that guests can use them.
I am not adding cpufeatures here to avoid having a kernel
that shows spec_ctrl in /proc/cpuinfo and actually has no
support whatsoever for IBRS/IBPB.  Keep the ugly special-casing
for now, and clean it up once the generic arch/x86/ code
learns about them.

Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/cpuid.c | 24 +++++++++++++++++++++---
 arch/x86/kvm/x86.c   |  1 +
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 767af697c20c..5f43a5940275 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -397,7 +397,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 
 	/* cpuid 7.0.edx*/
 	const u32 kvm_cpuid_7_0_edx_x86_features =
-		KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
+		KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS) |
+		KF(SPEC_CTRL) | KF(STIBP);
+
+	/* cpuid 0x80000008.edx */
+	const u32 kvm_cpuid_80000008_ebx_x86_features =
+		KF(IBPB_SUPPORT);
 
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
@@ -483,7 +488,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
 				entry->ecx &= ~F(PKU);
 			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
-			entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
+			/*
+			 * FIXME: the special casing of SPEC_CTRL and STIBP
+			 * can be removed once they become regular
+			 * cpufeatures.
+			 */
+			entry->edx &= (
+				get_scattered_cpuid_leaf(7, 0, CPUID_EDX) |
+				KF(SPEC_CTRL) | KF(STIBP));
 		} else {
 			entry->ebx = 0;
 			entry->ecx = 0;
@@ -651,7 +663,13 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 		if (!g_phys_as)
 			g_phys_as = phys_as;
 		entry->eax = g_phys_as | (virt_as << 8);
-		entry->ebx = entry->edx = 0;
+		/*
+		 * FIXME: mask against cpufeatures, with
+		 * get_scattered_cpuid_leaf(0x80000008, 0, CPUID_EBX),
+		 * once IBPB_SUPPORT becomes a regular cpufeature.
+		 */
+		entry->ebx &= kvm_cpuid_80000008_ebx_x86_features;
+		entry->edx = 0;
 		break;
 	}
 	case 0x80000019:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index daa1918031df..4abb37d9f4d8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1032,6 +1032,7 @@ unsigned int kvm_get_pt_addr_cnt(void)
 	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
 	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
 	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
+	MSR_IA32_SPEC_CTRL,
 };
 
 static unsigned num_msrs_to_save;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
                   ` (7 preceding siblings ...)
  2018-01-09 12:03 ` [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists Paolo Bonzini
@ 2018-01-09 12:03 ` Paolo Bonzini
  2018-01-16  0:55   ` Eric Wheeler
  2018-01-30 13:21   ` [9/8] " Mihai Carabas
  8 siblings, 2 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 12:03 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
Check that against host CPUID or guest CPUID, respectively for
host-initiated and guest-initiated accesses.

Suggested-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
	This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
	I still wanted to ack Jim's improvement.

 arch/x86/kvm/svm.c | 8 ++++++++
 arch/x86/kvm/vmx.c | 8 ++++++++
 2 files changed, 16 insertions(+)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 97126c2bd663..3a646580d7c5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = svm->nested.vm_cr_msr;
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+	    	    (!msr_info->host_initiated &&
+       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		msr_info->data = svm->spec_ctrl;
 		break;
 	case MSR_IA32_UCODE_REV:
@@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+	    	    (!msr_info->host_initiated &&
+       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		svm->spec_ctrl = data;
 		break;
 	case MSR_IA32_APICBASE:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 49b4a2d61603..42bc7ee293e4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = guest_read_tsc(vcpu);
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
 		break;
 	case MSR_IA32_SYSENTER_CS:
@@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		kvm_write_tsc(vcpu, msr_info);
 		break;
 	case MSR_IA32_SPEC_CTRL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
+			return 1;
 		to_vmx(vcpu)->spec_ctrl = data;
 		break;
 	case MSR_IA32_CR_PAT:
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 12:03 ` [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest Paolo Bonzini
@ 2018-01-09 14:22   ` Konrad Rzeszutek Wilk
  2018-01-09 16:05     ` Paolo Bonzini
  2018-01-09 16:08     ` Paolo Bonzini
  2018-01-10 20:13   ` Tom Lendacky
  1 sibling, 2 replies; 46+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-01-09 14:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On Tue, Jan 09, 2018 at 01:03:08PM +0100, Paolo Bonzini wrote:
> Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
> for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
> IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
> it yet).
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 31ace8d7774a..934a21e02e03 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -183,6 +183,8 @@ struct vcpu_svm {
>  		u64 gs_base;
>  	} host;
>  
> +	u64 spec_ctrl;
> +
>  	u32 *msrpm;
>  
>  	ulong nmi_iret_rip;
> @@ -248,6 +250,8 @@ struct amd_svm_iommu_ir {
>  	{ .index = MSR_CSTAR,				.always = true  },
>  	{ .index = MSR_SYSCALL_MASK,			.always = true  },
>  #endif
> +	{ .index = MSR_IA32_SPEC_CTRL,			.always = true  },
> +	{ .index = MSR_IA32_PRED_CMD,			.always = true  },
>  	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
>  	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
>  	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
> @@ -283,6 +287,8 @@ struct amd_svm_iommu_ir {
>  /* enable/disable Virtual GIF */
>  static int vgif = true;
>  module_param(vgif, int, 0444);
> + 
> +static bool __read_mostly have_spec_ctrl;
>  
>  static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
>  static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
> @@ -1135,6 +1141,17 @@ static __init int svm_hardware_setup(void)
>  			pr_info("Virtual GIF supported\n");
>  	}
>  
> +	/*
> +	 * FIXME: this is only needed until SPEC_CTRL is supported
> +	 * by upstream Linux in cpufeatures, then it can be replaced
> +	 * with static_cpu_has.
> +	 */
> +	have_spec_ctrl = cpu_has_spec_ctrl();
> +	if (have_spec_ctrl)
> +		pr_info("kvm: SPEC_CTRL available\n");
> +	else
> +		pr_info("kvm: SPEC_CTRL not available\n");

Perhaps just

	pr_info("kvm: SPEC_CTRL %s available\n", have_spec_ctrl ? "" : "not");

> +
>  	return 0;
>  
>  err:
> @@ -3599,6 +3616,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  	case MSR_VM_CR:
>  		msr_info->data = svm->nested.vm_cr_msr;
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		msr_info->data = svm->spec_ctrl;
> +		break;
>  	case MSR_IA32_UCODE_REV:
>  		msr_info->data = 0x01000065;
>  		break;
> @@ -3754,6 +3774,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  	case MSR_VM_IGNNE:
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		svm->spec_ctrl = data;
> +		break;
>  	case MSR_IA32_APICBASE:
>  		if (kvm_vcpu_apicv_active(vcpu))
>  			avic_update_vapic_bar(to_svm(vcpu), data);
> @@ -4942,6 +4965,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  
>  	local_irq_enable();
>  
> +	/*
> +	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
> +	 * before vmentry.
> +	 */
> +	if (have_spec_ctrl && svm->spec_ctrl != 0)
> +		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> +
>  	asm volatile (
>  		"push %%" _ASM_BP "; \n\t"
>  		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
> @@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  #endif
>  		);
>  
> +	if (have_spec_ctrl) {
> +		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> +		if (svm->spec_ctrl != 0)

Perhaps just :

	if (svm->spec_ctrl) ?

And above too?
> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> +	}
> +	/*
> +	 * Speculative execution past the above wrmsrl might encounter
> +	 * an indirect branch and use guest-controlled contents of the
> +	 * indirect branch predictor; block it.
> +	 */
> +	asm("lfence");

Don't you want this to be part of the if () .. else part?

Meaning:

	if (have_spec_ctrl && svm->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
	else
		asm("lfence");

But .. I am missing something - AMD don't expose 0x48. They expose only 0x49.

That is only the IPBP is needed on AMD? (I haven't actually seen any official
docs from AMD).

> +
>  #ifdef CONFIG_X86_64
>  	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
>  #else
> -- 
> 1.8.3.1
> 
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU
  2018-01-09 12:03 ` [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU Paolo Bonzini
@ 2018-01-09 14:23   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 46+ messages in thread
From: Konrad Rzeszutek Wilk @ 2018-01-09 14:23 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On Tue, Jan 09, 2018 at 01:03:09PM +0100, Paolo Bonzini wrote:
> From: Tom Lendacky <thomas.lendacky@amd.com>
> 
> Set IBPB (Indirect Branch Prediction Barrier) when the current CPU is
> going to run a VCPU different from what was previously run.  Nested
> virtualization uses the same VMCB for the second level guest, but the
> L1 hypervisor should be using IBRS to protect itself.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/svm.c | 31 +++++++++++++++++++++++++++++++
>  1 file changed, 31 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 934a21e02e03..97126c2bd663 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -289,6 +289,7 @@ struct amd_svm_iommu_ir {
>  module_param(vgif, int, 0444);
>   
>  static bool __read_mostly have_spec_ctrl;
> +static bool __read_mostly have_ibpb_support;
>  
>  static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
>  static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
> @@ -540,6 +541,7 @@ struct svm_cpu_data {
>  	struct kvm_ldttss_desc *tss_desc;
>  
>  	struct page *save_area;
> +	struct vmcb *current_vmcb;
>  };
>  
>  static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
> @@ -1151,6 +1153,11 @@ static __init int svm_hardware_setup(void)
>  		pr_info("kvm: SPEC_CTRL available\n");
>  	else
>  		pr_info("kvm: SPEC_CTRL not available\n");
> +	have_ibpb_support = have_spec_ctrl || cpu_has_ibpb_support();
> +	if (have_ibpb_support)
> +		pr_info("kvm: IBPB_SUPPORT available\n");
> +	else
> +		pr_info("kvm: IBPB_SUPPORT not available\n");

pr_info("kvm: IBPB_SUPPORT %s available" .. you know :-)

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 14:22   ` Konrad Rzeszutek Wilk
@ 2018-01-09 16:05     ` Paolo Bonzini
  2018-01-09 16:08     ` Paolo Bonzini
  1 sibling, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 16:05 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On 09/01/2018 15:22, Konrad Rzeszutek Wilk wrote:
>> +	have_spec_ctrl = cpu_has_spec_ctrl();
>> +	if (have_spec_ctrl)
>> +		pr_info("kvm: SPEC_CTRL available\n");
>> +	else
>> +		pr_info("kvm: SPEC_CTRL not available\n");
> Perhaps just
> 
> 	pr_info("kvm: SPEC_CTRL %s available\n", have_spec_ctrl ? "" : "not");
> 

I don't expect any of these FIXMEs to be ever committed. :)

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 14:22   ` Konrad Rzeszutek Wilk
  2018-01-09 16:05     ` Paolo Bonzini
@ 2018-01-09 16:08     ` Paolo Bonzini
  2018-01-11 10:45       ` Wanpeng Li
  1 sibling, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-09 16:08 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

Oops, I missed these.

On 09/01/2018 15:22, Konrad Rzeszutek Wilk wrote:
>> +	if (have_spec_ctrl) {
>> +		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
>> +		if (svm->spec_ctrl != 0)
> Perhaps just :
> 
> 	if (svm->spec_ctrl) ?
> 
> And above too?

These will become != SPEC_CTRL_ENABLE_IBRS or something like that.

>> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>> +	}
>> +	/*
>> +	 * Speculative execution past the above wrmsrl might encounter
>> +	 * an indirect branch and use guest-controlled contents of the
>> +	 * indirect branch predictor; block it.
>> +	 */
>> +	asm("lfence");
> Don't you want this to be part of the if () .. else part?

Not right now, because the processor could speculate that have_spec_ctrl
== 0 and skip the wrmsrl.  After it becomes a static_cpu_has, it could
move inside, but only if the kernel is compiled with static keys enabled.

> Meaning:
> 
> 	if (have_spec_ctrl && svm->spec_ctrl)
> 		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> 	else
> 		asm("lfence");
> 
> But .. I am missing something - AMD don't expose 0x48. They expose only 0x49.
> 
> That is only the IPBP is needed on AMD? (I haven't actually seen any official
> docs from AMD).

AMD is not exposing 0x48 yet, but they plan to based on my information
from a few weeks ago.

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 12:03 ` [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest Paolo Bonzini
  2018-01-09 14:22   ` Konrad Rzeszutek Wilk
@ 2018-01-10 20:13   ` Tom Lendacky
  2018-01-11 10:33     ` Paolo Bonzini
  1 sibling, 1 reply; 46+ messages in thread
From: Tom Lendacky @ 2018-01-10 20:13 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, dwmw, bp, x86

On 1/9/2018 6:03 AM, Paolo Bonzini wrote:
> Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
> for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
> IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
> it yet).
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 31ace8d7774a..934a21e02e03 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c

...

> @@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  #endif
>  		);
>  
> +	if (have_spec_ctrl) {
> +		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
> +		if (svm->spec_ctrl != 0)
> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> +	}
> +	/*
> +	 * Speculative execution past the above wrmsrl might encounter
> +	 * an indirect branch and use guest-controlled contents of the
> +	 * indirect branch predictor; block it.
> +	 */
> +	asm("lfence");

This will end up needing to be an alternative macro based on the
LFENCE_RDTSC or MFENCE_RDTSC features [1].  You'll probably just want to
use the speculation barrier macro that ends up being defined to control
the speculation here.

Thanks,
Tom

[1] https://marc.info/?l=linux-kernel&m=151545930207815&w=2

> +
>  #ifdef CONFIG_X86_64
>  	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
>  #else
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-10 20:13   ` Tom Lendacky
@ 2018-01-11 10:33     ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-11 10:33 UTC (permalink / raw)
  To: Tom Lendacky, linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, dwmw, bp, x86

On 10/01/2018 21:13, Tom Lendacky wrote:
> On 1/9/2018 6:03 AM, Paolo Bonzini wrote:
>> Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
>> for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
>> IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
>> it yet).
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>>  arch/x86/kvm/svm.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 42 insertions(+)
>>
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 31ace8d7774a..934a21e02e03 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
> 
> ...
> 
>> @@ -5015,6 +5045,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>>  #endif
>>  		);
>>  
>> +	if (have_spec_ctrl) {
>> +		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
>> +		if (svm->spec_ctrl != 0)
>> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>> +	}
>> +	/*
>> +	 * Speculative execution past the above wrmsrl might encounter
>> +	 * an indirect branch and use guest-controlled contents of the
>> +	 * indirect branch predictor; block it.
>> +	 */
>> +	asm("lfence");
> 
> This will end up needing to be an alternative macro based on the
> LFENCE_RDTSC or MFENCE_RDTSC features [1].  You'll probably just want to
> use the speculation barrier macro that ends up being defined to control
> the speculation here.

Yes.  Though, is there any processor that has spec_ctrl (which is none
on AMD) but needs a full fence?

Paolo

> Thanks,
> Tom
> 
> [1] https://marc.info/?l=linux-kernel&m=151545930207815&w=2
> 
>> +
>>  #ifdef CONFIG_X86_64
>>  	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
>>  #else
>>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest
  2018-01-09 16:08     ` Paolo Bonzini
@ 2018-01-11 10:45       ` Wanpeng Li
  0 siblings, 0 replies; 46+ messages in thread
From: Wanpeng Li @ 2018-01-11 10:45 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Konrad Rzeszutek Wilk, linux-kernel, kvm, Radim Krcmar,
	Liran Alon, Jim Mattson, Anthony Liguori, thomas.lendacky, dwmw,
	Borislav Petkov, the arch/x86 maintainers

2018-01-10 0:08 GMT+08:00 Paolo Bonzini <pbonzini@redhat.com>:
> Oops, I missed these.
>
> On 09/01/2018 15:22, Konrad Rzeszutek Wilk wrote:
>>> +    if (have_spec_ctrl) {
>>> +            rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
>>> +            if (svm->spec_ctrl != 0)
>> Perhaps just :
>>
>>       if (svm->spec_ctrl) ?
>>
>> And above too?
>
> These will become != SPEC_CTRL_ENABLE_IBRS or something like that.
>
>>> +                    wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>>> +    }
>>> +    /*
>>> +     * Speculative execution past the above wrmsrl might encounter
>>> +     * an indirect branch and use guest-controlled contents of the
>>> +     * indirect branch predictor; block it.
>>> +     */
>>> +    asm("lfence");
>> Don't you want this to be part of the if () .. else part?
>
> Not right now, because the processor could speculate that have_spec_ctrl
> == 0 and skip the wrmsrl.  After it becomes a static_cpu_has, it could
> move inside, but only if the kernel is compiled with static keys enabled.
>
>> Meaning:
>>
>>       if (have_spec_ctrl && svm->spec_ctrl)
>>               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
>>       else
>>               asm("lfence");
>>
>> But .. I am missing something - AMD don't expose 0x48. They expose only 0x49.
>>
>> That is only the IPBP is needed on AMD? (I haven't actually seen any official
>> docs from AMD).
>
> AMD is not exposing 0x48 yet, but they plan to based on my information
> from a few weeks ago.

Haha, interesting, they announce officially there is no issue for
variant 2. http://www.amd.com/en/corporate/speculative-execution

Regards,
Wanpeng Li

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU
  2018-01-09 12:03 ` [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU Paolo Bonzini
@ 2018-01-12  1:49   ` Wanpeng Li
  2018-01-12 17:03     ` Jim Mattson
  0 siblings, 1 reply; 46+ messages in thread
From: Wanpeng Li @ 2018-01-12  1:49 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, Radim Krcmar, Liran Alon, Jim Mattson,
	Anthony Liguori, thomas.lendacky, dwmw, Borislav Petkov,
	the arch/x86 maintainers, Tim Chen

2018-01-09 20:03 GMT+08:00 Paolo Bonzini <pbonzini@redhat.com>:
>
>         if (!already_loaded) {
> @@ -4029,6 +4031,13 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>         free_vmcs(loaded_vmcs->vmcs);
>         loaded_vmcs->vmcs = NULL;
>         WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
> +
> +       /*
> +        * The VMCS could be recycled, causing a false negative in
> +        * vmx_vcpu_load; block speculative execution.
> +        */
> +       if (have_spec_ctrl)
> +               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
>  }

Intel guys told us the recycle is about the address of vmcs, not the
content. Could you explain more why it matters?

Regards,
Wanpeng Li

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU
  2018-01-12  1:49   ` Wanpeng Li
@ 2018-01-12 17:03     ` Jim Mattson
  2018-01-13  9:29       ` Woodhouse, David
  0 siblings, 1 reply; 46+ messages in thread
From: Jim Mattson @ 2018-01-12 17:03 UTC (permalink / raw)
  To: Wanpeng Li
  Cc: Paolo Bonzini, LKML, kvm, Radim Krcmar, Liran Alon,
	Anthony Liguori, Tom Lendacky, dwmw, Borislav Petkov,
	the arch/x86 maintainers, Tim Chen

The point behind the IPBP in vmx_vcpu_load is to prevent one VCPU from
steering the speculative execution of the next. If the VMCS address is
recycled, vmx_vcpu_load doesn't realize that the VCPUs are different,
and so it won't issue the IPBP.

On Thu, Jan 11, 2018 at 5:49 PM, Wanpeng Li <kernellwp@gmail.com> wrote:
> 2018-01-09 20:03 GMT+08:00 Paolo Bonzini <pbonzini@redhat.com>:
>>
>>         if (!already_loaded) {
>> @@ -4029,6 +4031,13 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>>         free_vmcs(loaded_vmcs->vmcs);
>>         loaded_vmcs->vmcs = NULL;
>>         WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>> +
>> +       /*
>> +        * The VMCS could be recycled, causing a false negative in
>> +        * vmx_vcpu_load; block speculative execution.
>> +        */
>> +       if (have_spec_ctrl)
>> +               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
>>  }
>
> Intel guys told us the recycle is about the address of vmcs, not the
> content. Could you explain more why it matters?
>
> Regards,
> Wanpeng Li

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists
  2018-01-09 12:03 ` [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists Paolo Bonzini
@ 2018-01-13  1:25   ` Eric Wheeler
  2018-01-13  8:00     ` Paolo Bonzini
  0 siblings, 1 reply; 46+ messages in thread
From: Eric Wheeler @ 2018-01-13  1:25 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On Tue, 9 Jan 2018, Paolo Bonzini wrote:

> Expose them to userspace, now that guests can use them.
> I am not adding cpufeatures here to avoid having a kernel
> that shows spec_ctrl in /proc/cpuinfo and actually has no
> support whatsoever for IBRS/IBPB.  Keep the ugly special-casing
> for now, and clean it up once the generic arch/x86/ code
> learns about them.
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index daa1918031df..4abb37d9f4d8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1032,6 +1032,7 @@ unsigned int kvm_get_pt_addr_cnt(void)
>  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
>  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
>  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> +	MSR_IA32_SPEC_CTRL,
>  };

Hi Paolo,

Thank you for posting this!

I am trying to merge this into 4.14 which does not have 
kvm_get_pt_addr_cnt. The rest of the patch commits, but this gets 
rejected. Is this a necessary part of the commit?

patching file arch/x86/kvm/cpuid.c
Hunk #1 succeeded at 389 (offset -8 lines).
Hunk #2 succeeded at 479 (offset -9 lines).
Hunk #3 succeeded at 636 (offset -27 lines).
patching file arch/x86/kvm/x86.c
Hunk #1 FAILED at 1032.
1 out of 1 hunk FAILED -- saving rejects to file arch/x86/kvm/x86.c.rej

]# cat arch/x86/kvm/x86.c.rej
--- arch/x86/kvm/x86.c
+++ arch/x86/kvm/x86.c
@@ -1032,6 +1032,7 @@
 	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
 	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
 	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
+	MSR_IA32_SPEC_CTRL,
 };
 
 static unsigned num_msrs_to_save;


Thank you for your help!

--
Eric Wheeler


>  
>  static unsigned num_msrs_to_save;
> -- 
> 1.8.3.1
> 
> 
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists
  2018-01-13  1:25   ` Eric Wheeler
@ 2018-01-13  8:00     ` Paolo Bonzini
  2018-01-16  0:40       ` Eric Wheeler
  0 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-13  8:00 UTC (permalink / raw)
  To: Eric Wheeler
  Cc: linux-kernel, kvm, rkrcmar, liran alon, jmattson, aliguori,
	thomas lendacky, dwmw, bp, x86

Just add the new MSR at the end of the array.

Paolo

----- Eric Wheeler <kvm@lists.ewheeler.net> ha scritto:
> On Tue, 9 Jan 2018, Paolo Bonzini wrote:
> 
> > Expose them to userspace, now that guests can use them.
> > I am not adding cpufeatures here to avoid having a kernel
> > that shows spec_ctrl in /proc/cpuinfo and actually has no
> > support whatsoever for IBRS/IBPB.  Keep the ugly special-casing
> > for now, and clean it up once the generic arch/x86/ code
> > learns about them.
> > 
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index daa1918031df..4abb37d9f4d8 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -1032,6 +1032,7 @@ unsigned int kvm_get_pt_addr_cnt(void)
> >  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
> >  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
> >  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> > +	MSR_IA32_SPEC_CTRL,
> >  };
> 
> Hi Paolo,
> 
> Thank you for posting this!
> 
> I am trying to merge this into 4.14 which does not have 
> kvm_get_pt_addr_cnt. The rest of the patch commits, but this gets 
> rejected. Is this a necessary part of the commit?
> 
> patching file arch/x86/kvm/cpuid.c
> Hunk #1 succeeded at 389 (offset -8 lines).
> Hunk #2 succeeded at 479 (offset -9 lines).
> Hunk #3 succeeded at 636 (offset -27 lines).
> patching file arch/x86/kvm/x86.c
> Hunk #1 FAILED at 1032.
> 1 out of 1 hunk FAILED -- saving rejects to file arch/x86/kvm/x86.c.rej
> 
> ]# cat arch/x86/kvm/x86.c.rej
> --- arch/x86/kvm/x86.c
> +++ arch/x86/kvm/x86.c
> @@ -1032,6 +1032,7 @@
>  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
>  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
>  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> +	MSR_IA32_SPEC_CTRL,
>  };
>  
>  static unsigned num_msrs_to_save;
> 
> 
> Thank you for your help!
> 
> --
> Eric Wheeler
> 
> 
> >  
> >  static unsigned num_msrs_to_save;
> > -- 
> > 1.8.3.1
> > 
> > 
> > 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU
  2018-01-12 17:03     ` Jim Mattson
@ 2018-01-13  9:29       ` Woodhouse, David
  2018-01-15  9:21         ` Paolo Bonzini
  0 siblings, 1 reply; 46+ messages in thread
From: Woodhouse, David @ 2018-01-13  9:29 UTC (permalink / raw)
  To: Jim Mattson, Wanpeng Li
  Cc: Paolo Bonzini, LKML, kvm, Radim Krcmar, Liran Alon,
	Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Tim Chen

[-- Attachment #1: Type: text/plain, Size: 1479 bytes --]

On Fri, 2018-01-12 at 09:03 -0800, Jim Mattson wrote:
> The point behind the IPBP in vmx_vcpu_load is to prevent one VCPU from
> steering the speculative execution of the next. If the VMCS address is
> recycled, vmx_vcpu_load doesn't realize that the VCPUs are different,
> and so it won't issue the IPBP.

I don't understand the sequence of events that could lead to this.

If the VMCS is freed, surely per_cpu(current_vmcs, cpu) has to be
cleared? If the VMCS is freed while it's still *active* on a CPU,
that's a bug, surely? And if that CPU is later offlined and clears the
VMCS, it's going to scribble on freed (and potentially re-used) memory.

So vmx_cpu_load() *will* realise that it's different, won't it?

>> +       if (have_spec_ctrl)
>> +               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);

Also, I think the same condition applies to the conditional branches
over the IBPB-frobbing, as it does to setting IBRS. You can eschew the
'else lfence' only if you put in a comment showing that you've proved
it's safe. Many of the other bits like this are being done with
alternatives, which avoids that concern completely.

But really, I don't like this series much. Don't say "let's do this
until upstream supports...". Just fix it up properly, and add the
generic X86_FEATURE_IBPB bit and use it. We have *too* many separate
tiny patch sets, and we need to be getting our act together and putting
it all in one.


[-- Attachment #2: smime.p7s --]
[-- Type: application/x-pkcs7-signature, Size: 5210 bytes --]

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
  2018-01-09 12:03 ` [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest Paolo Bonzini
@ 2018-01-13 10:16   ` Longpeng (Mike)
  2018-01-15  9:23     ` Paolo Bonzini
       [not found]   ` <1515839272.22302.520.camel@amazon.co.uk>
  1 sibling, 1 reply; 46+ messages in thread
From: Longpeng (Mike) @ 2018-01-13 10:16 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86, Gonglei



On 2018/1/9 20:03, Paolo Bonzini wrote:

> Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
> for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
> IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
> it yet).
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 669f5f74857d..ef603692aa98 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -120,6 +120,8 @@
>  module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
>  #endif
>  
> +static bool __read_mostly have_spec_ctrl;
> +
>  #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
>  #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
>  #define KVM_VM_CR0_ALWAYS_ON						\
> @@ -609,6 +611,8 @@ struct vcpu_vmx {
>  	u64 		      msr_host_kernel_gs_base;
>  	u64 		      msr_guest_kernel_gs_base;
>  #endif
> +	u64		      spec_ctrl;
> +
>  	u32 vm_entry_controls_shadow;
>  	u32 vm_exit_controls_shadow;
>  	u32 secondary_exec_control;
> @@ -3361,6 +3365,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  	case MSR_IA32_TSC:
>  		msr_info->data = guest_read_tsc(vcpu);
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		msr_info->data = to_vmx(vcpu)->spec_ctrl;
> +		break;
>  	case MSR_IA32_SYSENTER_CS:
>  		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
>  		break;
> @@ -3500,6 +3507,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  	case MSR_IA32_TSC:
>  		kvm_write_tsc(vcpu, msr_info);
>  		break;
> +	case MSR_IA32_SPEC_CTRL:
> +		to_vmx(vcpu)->spec_ctrl = data;
> +		break;
>  	case MSR_IA32_CR_PAT:
>  		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
>  			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
> @@ -7062,6 +7072,17 @@ static __init int hardware_setup(void)
>  		goto out;
>  	}
>  
> +	/*
> +	 * FIXME: this is only needed until SPEC_CTRL is supported
> +	 * by upstream Linux in cpufeatures, then it can be replaced
> +	 * with static_cpu_has.
> +	 */
> +	have_spec_ctrl = cpu_has_spec_ctrl();
> +	if (have_spec_ctrl)
> +		pr_info("kvm: SPEC_CTRL available\n");
> +	else
> +		pr_info("kvm: SPEC_CTRL not available\n");
> +

In this approach, we must reload these modules if we update the microcode later ?

>  	if (boot_cpu_has(X86_FEATURE_NX))
>  		kvm_enable_efer_bits(EFER_NX);
>  
> @@ -7131,6 +7152,8 @@ static __init int hardware_setup(void)
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
>  	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
> +	vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
> +	vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);
>  
>  	memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
>  			vmx_msr_bitmap_legacy, PAGE_SIZE);
> @@ -9601,6 +9624,13 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  
>  	vmx_arm_hv_timer(vcpu);
>  
> +	/*
> +	 * MSR_IA32_SPEC_CTRL is restored after the last indirect branch
> +	 * before vmentry.
> +	 */
> +	if (have_spec_ctrl && vmx->spec_ctrl != 0)
> +		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
> +
>  	vmx->__launched = vmx->loaded_vmcs->launched;
>  	asm(
>  		/* Store host registers */
> @@ -9707,6 +9737,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  #endif
>  	      );
>  
> +	if (have_spec_ctrl) {
> +		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
> +		if (vmx->spec_ctrl != 0)
> +			wrmsrl(MSR_IA32_SPEC_CTRL, 0);
> +	}
> +	/*
> +	 * Speculative execution past the above wrmsrl might encounter
> +	 * an indirect branch and use guest-controlled contents of the
> +	 * indirect branch predictor; block it.
> +	 */
> +	asm("lfence");
> +
>  	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
>  	if (vmx->host_debugctlmsr)
>  		update_debugctlmsr(vmx->host_debugctlmsr);


-- 
Regards,
Longpeng(Mike)

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU
  2018-01-13  9:29       ` Woodhouse, David
@ 2018-01-15  9:21         ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-15  9:21 UTC (permalink / raw)
  To: Woodhouse, David, jmattson, kernellwp
  Cc: kvm, linux-kernel, tim.c.chen, Liguori, Anthony, x86, liran.alon,
	bp, thomas.lendacky, rkrcmar

On 13/01/2018 10:29, Woodhouse, David wrote:
> On Fri, 2018-01-12 at 09:03 -0800, Jim Mattson wrote:
>> The point behind the IPBP in vmx_vcpu_load is to prevent one VCPU from
>> steering the speculative execution of the next. If the VMCS address is
>> recycled, vmx_vcpu_load doesn't realize that the VCPUs are different,
>> and so it won't issue the IPBP.
> 
> I don't understand the sequence of events that could lead to this.
> 
> If the VMCS is freed, surely per_cpu(current_vmcs, cpu) has to be
> cleared? If the VMCS is freed while it's still *active* on a CPU,
> that's a bug, surely? And if that CPU is later offlined and clears the
> VMCS, it's going to scribble on freed (and potentially re-used) memory.

You're right, svm.c needs it but vmx.c does not (because AMD has no
vmclear equivalent).

>>> +       if (have_spec_ctrl)
>>> +               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
> 
> Also, I think the same condition applies to the conditional branches
> over the IBPB-frobbing, as it does to setting IBRS. You can eschew the
> 'else lfence' only if you put in a comment showing that you've proved
> it's safe. Many of the other bits like this are being done with
> alternatives, which avoids that concern completely.
> 
> But really, I don't like this series much. Don't say "let's do this
> until upstream supports...". Just fix it up properly, and add the
> generic X86_FEATURE_IBPB bit and use it. We have *too* many separate
> tiny patch sets, and we need to be getting our act together and putting
> it all in one.

I agree, this series is not meant to be committed.  I only posted to get
early reviews (and it was very effective for that purpose).

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
       [not found]   ` <1515839272.22302.520.camel@amazon.co.uk>
@ 2018-01-15  9:23     ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-15  9:23 UTC (permalink / raw)
  To: Woodhouse, David, KVM list

On 13/01/2018 11:27, Woodhouse, David wrote:
>> +        * FIXME: this is only needed until SPEC_CTRL is supported
>> +        * by upstream Linux in cpufeatures, then it can be replaced
>> +        * with static_cpu_has.
> And I already shouted at Peter yesterday about static_cpu_has(). I
> refer you to my previous rant on that :)

I saw that rent and I agree---but your rant was about lfence, here I'm
not proposing to remove any lfences once KVM uses static_cpu_has:

+	/*
+	 * Speculative execution past the above wrmsrl might encounter
+	 * an indirect branch and use guest-controlled contents of the
+	 * indirect branch predictor; block it.
+	 */
+	asm("lfence");

No mention of static_cpu_has here. :)

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
  2018-01-13 10:16   ` Longpeng (Mike)
@ 2018-01-15  9:23     ` Paolo Bonzini
  2018-01-15  9:34       ` Thomas Gleixner
  0 siblings, 1 reply; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-15  9:23 UTC (permalink / raw)
  To: Longpeng (Mike)
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86, Gonglei

On 13/01/2018 11:16, Longpeng (Mike) wrote:
>> +	/*
>> +	 * FIXME: this is only needed until SPEC_CTRL is supported
>> +	 * by upstream Linux in cpufeatures, then it can be replaced
>> +	 * with static_cpu_has.
>> +	 */
>> +	have_spec_ctrl = cpu_has_spec_ctrl();
>> +	if (have_spec_ctrl)
>> +		pr_info("kvm: SPEC_CTRL available\n");
>> +	else
>> +		pr_info("kvm: SPEC_CTRL not available\n");
>> +
> 
> In this approach, we must reload these modules if we update the microcode later ?

I strongly suggest using early microcode update anyway.

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest
  2018-01-15  9:23     ` Paolo Bonzini
@ 2018-01-15  9:34       ` Thomas Gleixner
  0 siblings, 0 replies; 46+ messages in thread
From: Thomas Gleixner @ 2018-01-15  9:34 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Longpeng (Mike),
	linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86, Gonglei

On Mon, 15 Jan 2018, Paolo Bonzini wrote:

> On 13/01/2018 11:16, Longpeng (Mike) wrote:
> >> +	/*
> >> +	 * FIXME: this is only needed until SPEC_CTRL is supported
> >> +	 * by upstream Linux in cpufeatures, then it can be replaced
> >> +	 * with static_cpu_has.
> >> +	 */
> >> +	have_spec_ctrl = cpu_has_spec_ctrl();
> >> +	if (have_spec_ctrl)
> >> +		pr_info("kvm: SPEC_CTRL available\n");
> >> +	else
> >> +		pr_info("kvm: SPEC_CTRL not available\n");
> >> +
> > 
> > In this approach, we must reload these modules if we update the microcode later ?
> 
> I strongly suggest using early microcode update anyway.

We had the discussion already and we are not going to support late micro
code loading. It's just not worth the trouble.

Also please do not commit any of this before we have sorted out the bare
metal IBRS/IBPB support. We really don't want to have two variants of that
in tree.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors
  2018-01-09 12:03 ` [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors Paolo Bonzini
@ 2018-01-15  9:42   ` David Hildenbrand
  0 siblings, 0 replies; 46+ messages in thread
From: David Hildenbrand @ 2018-01-15  9:42 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

On 09.01.2018 13:03, Paolo Bonzini wrote:
> As an interim measure until SPEC_CTRL is supported by upstream
> Linux in cpufeatures, add a function that lets vmx.c and svm.c
> know whether to save/restore MSR_IA32_SPEC_CTRL.
> 
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/cpuid.c |  3 ---
>  arch/x86/kvm/cpuid.h | 22 ++++++++++++++++++++++
>  2 files changed, 22 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 8e9a07c557f1..767af697c20c 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -67,9 +67,6 @@ u64 kvm_supported_xcr0(void)
>  
>  #define F(x) bit(X86_FEATURE_##x)
>  
> -/* These are scattered features in cpufeatures.h. */
> -#define KVM_CPUID_BIT_AVX512_4VNNIW     2
> -#define KVM_CPUID_BIT_AVX512_4FMAPS     3
>  #define KF(x) bit(KVM_CPUID_BIT_##x)
>  
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index c2cea6651279..8d04ccf177ce 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -155,6 +155,28 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
>  	return x86_stepping(best->eax);
>  }
>  
> +/* These are scattered features in cpufeatures.h. */
> +#define KVM_CPUID_BIT_AVX512_4VNNIW	2
> +#define KVM_CPUID_BIT_AVX512_4FMAPS	3
> +#define KVM_CPUID_BIT_SPEC_CTRL		26
> +#define KVM_CPUID_BIT_STIBP		27
> +
> +/* CPUID[eax=0x80000008].ebx */
> +#define KVM_CPUID_BIT_IBPB_SUPPORT	12
> +
> +static inline bool cpu_has_spec_ctrl(void)
> +{
> +	u32 eax, ebx, ecx, edx;
> +	cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
> +
> +	return edx & bit(KVM_CPUID_BIT_SPEC_CTRL);
> +}
> +
> +static inline bool cpu_has_ibpb_support(void)
> +{
> +	return cpuid_ebx(0x80000008) & bit(KVM_CPUID_BIT_IBPB_SUPPORT);
> +}
> +
>  static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
>  {
>  	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
> 

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 

Thanks,

David / dhildenb

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 5/8] KVM: SVM: fix comment
  2018-01-09 12:03 ` [PATCH 5/8] KVM: SVM: fix comment Paolo Bonzini
@ 2018-01-15  9:53   ` David Hildenbrand
  0 siblings, 0 replies; 46+ messages in thread
From: David Hildenbrand @ 2018-01-15  9:53 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw, bp, x86

On 09.01.2018 13:03, Paolo Bonzini wrote:
> If always==true, then read/write bits are cleared from
> the MSR permission bitmap, thus passing-through the MSR.
> Fix the comment to match reality.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/svm.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 55dde3896898..31ace8d7774a 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -236,7 +236,7 @@ struct amd_svm_iommu_ir {
>  
>  static const struct svm_direct_access_msrs {
>  	u32 index;   /* Index of the MSR */
> -	bool always; /* True if intercept is always on */
> +	bool always; /* True if intercept is always off */
>  } direct_access_msrs[] = {
>  	{ .index = MSR_STAR,				.always = true  },
>  	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
> 

The function set_msr_interception() is really named confusingly.

If we pass in a "1", we clear the bit, resulting in no interception.

So 1==no intercept, 0==intercept. This function would be better named
"disable_msr_interception" or sth. like that.

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 

Thanks,

David / dhildenb

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists
  2018-01-13  8:00     ` Paolo Bonzini
@ 2018-01-16  0:40       ` Eric Wheeler
  2018-01-16  7:39         ` R: " Paolo Bonzini
  0 siblings, 1 reply; 46+ messages in thread
From: Eric Wheeler @ 2018-01-16  0:40 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran alon, jmattson, aliguori,
	thomas lendacky, dwmw, bp, x86

On Sat, 13 Jan 2018, Paolo Bonzini wrote:

> Just add the new MSR at the end of the array.

I'm assuming you meant emulated_msrs[], correct?  


--
Eric Wheeler



> 
> Paolo
> 
> ----- Eric Wheeler <kvm@lists.ewheeler.net> ha scritto:
> > On Tue, 9 Jan 2018, Paolo Bonzini wrote:
> > 
> > > Expose them to userspace, now that guests can use them.
> > > I am not adding cpufeatures here to avoid having a kernel
> > > that shows spec_ctrl in /proc/cpuinfo and actually has no
> > > support whatsoever for IBRS/IBPB.  Keep the ugly special-casing
> > > for now, and clean it up once the generic arch/x86/ code
> > > learns about them.
> > > 
> > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > > index daa1918031df..4abb37d9f4d8 100644
> > > --- a/arch/x86/kvm/x86.c
> > > +++ b/arch/x86/kvm/x86.c
> > > @@ -1032,6 +1032,7 @@ unsigned int kvm_get_pt_addr_cnt(void)
> > >  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
> > >  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
> > >  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> > > +	MSR_IA32_SPEC_CTRL,
> > >  };
> > 
> > Hi Paolo,
> > 
> > Thank you for posting this!
> > 
> > I am trying to merge this into 4.14 which does not have 
> > kvm_get_pt_addr_cnt. The rest of the patch commits, but this gets 
> > rejected. Is this a necessary part of the commit?
> > 
> > patching file arch/x86/kvm/cpuid.c
> > Hunk #1 succeeded at 389 (offset -8 lines).
> > Hunk #2 succeeded at 479 (offset -9 lines).
> > Hunk #3 succeeded at 636 (offset -27 lines).
> > patching file arch/x86/kvm/x86.c
> > Hunk #1 FAILED at 1032.
> > 1 out of 1 hunk FAILED -- saving rejects to file arch/x86/kvm/x86.c.rej
> > 
> > ]# cat arch/x86/kvm/x86.c.rej
> > --- arch/x86/kvm/x86.c
> > +++ arch/x86/kvm/x86.c
> > @@ -1032,6 +1032,7 @@
> >  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
> >  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
> >  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> > +	MSR_IA32_SPEC_CTRL,
> >  };
> >  
> >  static unsigned num_msrs_to_save;
> > 
> > 
> > Thank you for your help!
> > 
> > --
> > Eric Wheeler
> > 
> > 
> > >  
> > >  static unsigned num_msrs_to_save;
> > > -- 
> > > 1.8.3.1
> > > 
> > > 
> > > 
> 
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-09 12:03 ` [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability Paolo Bonzini
@ 2018-01-16  0:55   ` Eric Wheeler
  2018-01-16 12:59     ` Paolo Bonzini
  2018-01-30 13:21   ` [9/8] " Mihai Carabas
  1 sibling, 1 reply; 46+ messages in thread
From: Eric Wheeler @ 2018-01-16  0:55 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On Tue, 9 Jan 2018, Paolo Bonzini wrote:
> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
> Check that against host CPUID or guest CPUID, respectively for
> host-initiated and guest-initiated accesses.

Hi Radim, Paolo:

In porting this patch series to v4.14, I'm getting this BUILD_BUG_ON:

In file included from arch/x86/kvm/vmx.c:21:0:
In function 'x86_feature_cpuid',
    inlined from 'guest_cpuid_get_register' at arch/x86/kvm/cpuid.h:72:25,
    inlined from 'vmx_get_msr' at arch/x86/kvm/cpuid.h:101:6:
arch/x86/kvm/cpuid.h:64:232: error: call to '__compiletime_assert_64' 
declared with attribute error: BUILD_BUG_ON failed: 
reverse_cpuid[x86_leaf].function == 0
  BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
                                                                                                                                                                                                                                        
^
In function 'x86_feature_cpuid',
    inlined from 'guest_cpuid_get_register' at arch/x86/kvm/cpuid.h:72:25,
    inlined from 'vmx_set_msr' at arch/x86/kvm/cpuid.h:101:6:
arch/x86/kvm/cpuid.h:64:232: error: call to '__compiletime_assert_64' 
declared with attribute error: BUILD_BUG_ON failed: 
reverse_cpuid[x86_leaf].function == 0
  BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
                                                                                                                                                                                                                                        

I think this is caused by the following call stack for 
X86_FEATURE_SPEC_CTRL, but if not please correct me here:

arch/x86/kvm/vmx.c:
vmx_get_msr/vmx_set_msr()
	guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)
		guest_cpuid_get_register(vcpu, x86_feature); 
			x86_feature_cpuid(x86_feature);
				x86_feature_cpuid(unsigned x86_feature)
					BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);


It looks like I need to add something to reverse_cpuid[] but I'm not sure 
what.  

Do you know what needs to be added here?

-Eric

--
Eric Wheeler



> 
> Suggested-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> 	This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
> 	I still wanted to ack Jim's improvement.
> 
>  arch/x86/kvm/svm.c | 8 ++++++++
>  arch/x86/kvm/vmx.c | 8 ++++++++
>  2 files changed, 16 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 97126c2bd663..3a646580d7c5 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		msr_info->data = svm->nested.vm_cr_msr;
>  		break;
>  	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +	    	    (!msr_info->host_initiated &&
> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>  		msr_info->data = svm->spec_ctrl;
>  		break;
>  	case MSR_IA32_UCODE_REV:
> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
>  	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +	    	    (!msr_info->host_initiated &&
> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>  		svm->spec_ctrl = data;
>  		break;
>  	case MSR_IA32_APICBASE:
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 49b4a2d61603..42bc7ee293e4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		msr_info->data = guest_read_tsc(vcpu);
>  		break;
>  	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>  		msr_info->data = to_vmx(vcpu)->spec_ctrl;
>  		break;
>  	case MSR_IA32_SYSENTER_CS:
> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		kvm_write_tsc(vcpu, msr_info);
>  		break;
>  	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>  		to_vmx(vcpu)->spec_ctrl = data;
>  		break;
>  	case MSR_IA32_CR_PAT:
> -- 
> 1.8.3.1
> 
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* R: Re: [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists
  2018-01-16  0:40       ` Eric Wheeler
@ 2018-01-16  7:39         ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-16  7:39 UTC (permalink / raw)
  To: Eric Wheeler
  Cc: linux-kernel, kvm, rkrcmar, liran alon, jmattson, aliguori,
	thomas lendacky, dwmw, bp, x86


----- Eric Wheeler <kvm@lists.ewheeler.net> ha scritto:
> On Sat, 13 Jan 2018, Paolo Bonzini wrote:
> 
> > Just add the new MSR at the end of the array.
> 
> I'm assuming you meant emulated_msrs[], correct?  

No, msrs_to_save. It's just above emulated_msrs.

Paolo
> 
> 
> --
> Eric Wheeler
> 
> 
> 
> > 
> > Paolo
> > 
> > ----- Eric Wheeler <kvm@lists.ewheeler.net> ha scritto:
> > > On Tue, 9 Jan 2018, Paolo Bonzini wrote:
> > > 
> > > > Expose them to userspace, now that guests can use them.
> > > > I am not adding cpufeatures here to avoid having a kernel
> > > > that shows spec_ctrl in /proc/cpuinfo and actually has no
> > > > support whatsoever for IBRS/IBPB.  Keep the ugly special-casing
> > > > for now, and clean it up once the generic arch/x86/ code
> > > > learns about them.
> > > > 
> > > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > > > index daa1918031df..4abb37d9f4d8 100644
> > > > --- a/arch/x86/kvm/x86.c
> > > > +++ b/arch/x86/kvm/x86.c
> > > > @@ -1032,6 +1032,7 @@ unsigned int kvm_get_pt_addr_cnt(void)
> > > >  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
> > > >  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
> > > >  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> > > > +	MSR_IA32_SPEC_CTRL,
> > > >  };
> > > 
> > > Hi Paolo,
> > > 
> > > Thank you for posting this!
> > > 
> > > I am trying to merge this into 4.14 which does not have 
> > > kvm_get_pt_addr_cnt. The rest of the patch commits, but this gets 
> > > rejected. Is this a necessary part of the commit?
> > > 
> > > patching file arch/x86/kvm/cpuid.c
> > > Hunk #1 succeeded at 389 (offset -8 lines).
> > > Hunk #2 succeeded at 479 (offset -9 lines).
> > > Hunk #3 succeeded at 636 (offset -27 lines).
> > > patching file arch/x86/kvm/x86.c
> > > Hunk #1 FAILED at 1032.
> > > 1 out of 1 hunk FAILED -- saving rejects to file arch/x86/kvm/x86.c.rej
> > > 
> > > ]# cat arch/x86/kvm/x86.c.rej
> > > --- arch/x86/kvm/x86.c
> > > +++ arch/x86/kvm/x86.c
> > > @@ -1032,6 +1032,7 @@
> > >  	MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
> > >  	MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
> > >  	MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
> > > +	MSR_IA32_SPEC_CTRL,
> > >  };
> > >  
> > >  static unsigned num_msrs_to_save;
> > > 
> > > 
> > > Thank you for your help!
> > > 
> > > --
> > > Eric Wheeler
> > > 
> > > 
> > > >  
> > > >  static unsigned num_msrs_to_save;
> > > > -- 
> > > > 1.8.3.1
> > > > 
> > > > 
> > > > 
> > 
> > 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-16  0:55   ` Eric Wheeler
@ 2018-01-16 12:59     ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-16 12:59 UTC (permalink / raw)
  To: Eric Wheeler
  Cc: linux-kernel, kvm, rkrcmar, liran.alon, jmattson, aliguori,
	thomas.lendacky, dwmw, bp, x86

On 16/01/2018 01:55, Eric Wheeler wrote:
> On Tue, 9 Jan 2018, Paolo Bonzini wrote:
>> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
>> Check that against host CPUID or guest CPUID, respectively for
>> host-initiated and guest-initiated accesses.
> 
> Hi Radim, Paolo:
> 
> In porting this patch series to v4.14

Just don't apply this patch.

Paolo

> In file included from arch/x86/kvm/vmx.c:21:0:
> In function 'x86_feature_cpuid',
>     inlined from 'guest_cpuid_get_register' at arch/x86/kvm/cpuid.h:72:25,
>     inlined from 'vmx_get_msr' at arch/x86/kvm/cpuid.h:101:6:
> arch/x86/kvm/cpuid.h:64:232: error: call to '__compiletime_assert_64' 
> declared with attribute error: BUILD_BUG_ON failed: 
> reverse_cpuid[x86_leaf].function == 0
>   BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
>                                                                                                                                                                                                                                         
> ^
> In function 'x86_feature_cpuid',
>     inlined from 'guest_cpuid_get_register' at arch/x86/kvm/cpuid.h:72:25,
>     inlined from 'vmx_set_msr' at arch/x86/kvm/cpuid.h:101:6:
> arch/x86/kvm/cpuid.h:64:232: error: call to '__compiletime_assert_64' 
> declared with attribute error: BUILD_BUG_ON failed: 
> reverse_cpuid[x86_leaf].function == 0
>   BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
>                                                                                                                                                                                                                                         
> 
> I think this is caused by the following call stack for 
> X86_FEATURE_SPEC_CTRL, but if not please correct me here:
> 
> arch/x86/kvm/vmx.c:
> vmx_get_msr/vmx_set_msr()
> 	guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)
> 		guest_cpuid_get_register(vcpu, x86_feature); 
> 			x86_feature_cpuid(x86_feature);
> 				x86_feature_cpuid(unsigned x86_feature)
> 					BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
> 
> 
> It looks like I need to add something to reverse_cpuid[] but I'm not sure 
> what.  
> 
> Do you know what needs to be added here?
> 
> -Eric
> 
> --
> Eric Wheeler
> 
> 
> 
>>
>> Suggested-by: Jim Mattson <jmattson@google.com>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>> 	This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
>> 	I still wanted to ack Jim's improvement.
>>
>>  arch/x86/kvm/svm.c | 8 ++++++++
>>  arch/x86/kvm/vmx.c | 8 ++++++++
>>  2 files changed, 16 insertions(+)
>>
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 97126c2bd663..3a646580d7c5 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>  		msr_info->data = svm->nested.vm_cr_msr;
>>  		break;
>>  	case MSR_IA32_SPEC_CTRL:
>> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +	    	    (!msr_info->host_initiated &&
>> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +			return 1;
>>  		msr_info->data = svm->spec_ctrl;
>>  		break;
>>  	case MSR_IA32_UCODE_REV:
>> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>>  		break;
>>  	case MSR_IA32_SPEC_CTRL:
>> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +	    	    (!msr_info->host_initiated &&
>> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +			return 1;
>>  		svm->spec_ctrl = data;
>>  		break;
>>  	case MSR_IA32_APICBASE:
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 49b4a2d61603..42bc7ee293e4 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>  		msr_info->data = guest_read_tsc(vcpu);
>>  		break;
>>  	case MSR_IA32_SPEC_CTRL:
>> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +		    (!msr_info->host_initiated &&
>> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +			return 1;
>>  		msr_info->data = to_vmx(vcpu)->spec_ctrl;
>>  		break;
>>  	case MSR_IA32_SYSENTER_CS:
>> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>  		kvm_write_tsc(vcpu, msr_info);
>>  		break;
>>  	case MSR_IA32_SPEC_CTRL:
>> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +		    (!msr_info->host_initiated &&
>> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +			return 1;
>>  		to_vmx(vcpu)->spec_ctrl = data;
>>  		break;
>>  	case MSR_IA32_CR_PAT:
>> -- 
>> 1.8.3.1
>>
>>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-09 12:03 ` [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability Paolo Bonzini
  2018-01-16  0:55   ` Eric Wheeler
@ 2018-01-30 13:21   ` Mihai Carabas
  2018-01-30 16:33     ` Jim Mattson
  1 sibling, 1 reply; 46+ messages in thread
From: Mihai Carabas @ 2018-01-30 13:21 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm
  Cc: rkrcmar, liran.alon, jmattson, aliguori, thomas.lendacky, dwmw,
	bp, x86, Konrad Rzeszutek Wilk

Hello Paolo,

I've back ported this patch on 4.1, after adding the per-vcpu MSR 
bitmap. Also enabled the SPEC_CTRL_MSR intercept if qemu instructed so [1].

Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>

[1]
+++ b/arch/x86/kvm/vmx.c
@@ -8391,6 +8391,16 @@ static struct kvm_vcpu *vmx_create_vcpu(struct 
kvm *kvm, unsigned int id)
         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_PRED_CMD, 
MSR_TYPE_R | MSR_TYPE_W);
         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_BNDCFGS, 
MSR_TYPE_R | MSR_TYPE_W);

+       /*
+        * If the physical CPU or the vCPU of this VM doesn't
+        * support SPEC_CTRL feature, catch each access to it.
+        */
+       if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+            !guest_cpuid_has_spec_ctrl(&vmx->vcpu))
+               vmx_enable_intercept_for_msr(
+                       msr_bitmap,
+                       MSR_IA32_SPEC_CTRL,
+                       MSR_TYPE_R | MSR_TYPE_W);

         /*
          * If PML is turned on, failure on enabling PML just results in 
failure


On 09.01.2018 14:03, Paolo Bonzini wrote:
> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
> Check that against host CPUID or guest CPUID, respectively for
> host-initiated and guest-initiated accesses.
> 
> Suggested-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> 	This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
> 	I still wanted to ack Jim's improvement.
> 
>   arch/x86/kvm/svm.c | 8 ++++++++
>   arch/x86/kvm/vmx.c | 8 ++++++++
>   2 files changed, 16 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 97126c2bd663..3a646580d7c5 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		msr_info->data = svm->nested.vm_cr_msr;
>   		break;
>   	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +	    	    (!msr_info->host_initiated &&
> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>   		msr_info->data = svm->spec_ctrl;
>   		break;
>   	case MSR_IA32_UCODE_REV:
> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>   		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>   		break;
>   	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +	    	    (!msr_info->host_initiated &&
> +       		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>   		svm->spec_ctrl = data;
>   		break;
>   	case MSR_IA32_APICBASE:
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 49b4a2d61603..42bc7ee293e4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		msr_info->data = guest_read_tsc(vcpu);
>   		break;
>   	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>   		msr_info->data = to_vmx(vcpu)->spec_ctrl;
>   		break;
>   	case MSR_IA32_SYSENTER_CS:
> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		kvm_write_tsc(vcpu, msr_info);
>   		break;
>   	case MSR_IA32_SPEC_CTRL:
> +		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
> +			return 1;
>   		to_vmx(vcpu)->spec_ctrl = data;
>   		break;
>   	case MSR_IA32_CR_PAT:
> 

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 13:21   ` [9/8] " Mihai Carabas
@ 2018-01-30 16:33     ` Jim Mattson
  2018-01-30 16:43       ` Mihai Carabas
  0 siblings, 1 reply; 46+ messages in thread
From: Jim Mattson @ 2018-01-30 16:33 UTC (permalink / raw)
  To: Mihai Carabas
  Cc: Paolo Bonzini, LKML, kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, David Woodhouse,
	Borislav Petkov, the arch/x86 maintainers, Konrad Rzeszutek Wilk

All MSR intercepts are enabled by default, so I don't think this patch
does anything at all, unless I'm missing some context.

On Tue, Jan 30, 2018 at 5:21 AM, Mihai Carabas <mihai.carabas@oracle.com> wrote:
> Hello Paolo,
>
> I've back ported this patch on 4.1, after adding the per-vcpu MSR bitmap.
> Also enabled the SPEC_CTRL_MSR intercept if qemu instructed so [1].
>
> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
>
> [1]
> +++ b/arch/x86/kvm/vmx.c
> @@ -8391,6 +8391,16 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm
> *kvm, unsigned int id)
>         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_PRED_CMD,
> MSR_TYPE_R | MSR_TYPE_W);
>         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_BNDCFGS,
> MSR_TYPE_R | MSR_TYPE_W);
>
> +       /*
> +        * If the physical CPU or the vCPU of this VM doesn't
> +        * support SPEC_CTRL feature, catch each access to it.
> +        */
> +       if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
> +            !guest_cpuid_has_spec_ctrl(&vmx->vcpu))
> +               vmx_enable_intercept_for_msr(
> +                       msr_bitmap,
> +                       MSR_IA32_SPEC_CTRL,
> +                       MSR_TYPE_R | MSR_TYPE_W);
>
>         /*
>          * If PML is turned on, failure on enabling PML just results in
> failure
>
>
>
> On 09.01.2018 14:03, Paolo Bonzini wrote:
>>
>> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
>> Check that against host CPUID or guest CPUID, respectively for
>> host-initiated and guest-initiated accesses.
>>
>> Suggested-by: Jim Mattson <jmattson@google.com>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>>         This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
>>         I still wanted to ack Jim's improvement.
>>
>>   arch/x86/kvm/svm.c | 8 ++++++++
>>   arch/x86/kvm/vmx.c | 8 ++++++++
>>   2 files changed, 16 insertions(+)
>>
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 97126c2bd663..3a646580d7c5 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu,
>> struct msr_data *msr_info)
>>                 msr_info->data = svm->nested.vm_cr_msr;
>>                 break;
>>         case MSR_IA32_SPEC_CTRL:
>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +                   (!msr_info->host_initiated &&
>> +                            !guest_cpuid_has(vcpu,
>> X86_FEATURE_SPEC_CTRL)))
>> +                       return 1;
>>                 msr_info->data = svm->spec_ctrl;
>>                 break;
>>         case MSR_IA32_UCODE_REV:
>> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu,
>> struct msr_data *msr)
>>                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data
>> 0x%llx\n", ecx, data);
>>                 break;
>>         case MSR_IA32_SPEC_CTRL:
>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +                   (!msr_info->host_initiated &&
>> +                            !guest_cpuid_has(vcpu,
>> X86_FEATURE_SPEC_CTRL)))
>> +                       return 1;
>>                 svm->spec_ctrl = data;
>>                 break;
>>         case MSR_IA32_APICBASE:
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 49b4a2d61603..42bc7ee293e4 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu,
>> struct msr_data *msr_info)
>>                 msr_info->data = guest_read_tsc(vcpu);
>>                 break;
>>         case MSR_IA32_SPEC_CTRL:
>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +                   (!msr_info->host_initiated &&
>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +                       return 1;
>>                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
>>                 break;
>>         case MSR_IA32_SYSENTER_CS:
>> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu,
>> struct msr_data *msr_info)
>>                 kvm_write_tsc(vcpu, msr_info);
>>                 break;
>>         case MSR_IA32_SPEC_CTRL:
>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +                   (!msr_info->host_initiated &&
>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>> +                       return 1;
>>                 to_vmx(vcpu)->spec_ctrl = data;
>>                 break;
>>         case MSR_IA32_CR_PAT:
>>
>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 16:33     ` Jim Mattson
@ 2018-01-30 16:43       ` Mihai Carabas
  2018-01-30 16:57         ` Jim Mattson
  0 siblings, 1 reply; 46+ messages in thread
From: Mihai Carabas @ 2018-01-30 16:43 UTC (permalink / raw)
  To: Jim Mattson
  Cc: Paolo Bonzini, LKML, kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, David Woodhouse,
	Borislav Petkov, the arch/x86 maintainers, Konrad Rzeszutek Wilk

On 30.01.2018 18:33, Jim Mattson wrote:
> All MSR intercepts are enabled by default, so I don't think this patch
> does anything at all, unless I'm missing some context.
> 

Currently on upstream some MSR are intercepted: 
https://github.com/torvalds/linux/blob/master/arch/x86/kvm/vmx.c#L6838

In particular to this patch, the MSR_IA32_SPEC_CTRL intercept is 
disabled in 3/8: https://patchwork.kernel.org/patch/10151889/


> On Tue, Jan 30, 2018 at 5:21 AM, Mihai Carabas <mihai.carabas@oracle.com> wrote:
>> Hello Paolo,
>>
>> I've back ported this patch on 4.1, after adding the per-vcpu MSR bitmap.
>> Also enabled the SPEC_CTRL_MSR intercept if qemu instructed so [1].
>>
>> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
>>
>> [1]
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -8391,6 +8391,16 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm
>> *kvm, unsigned int id)
>>          vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_PRED_CMD,
>> MSR_TYPE_R | MSR_TYPE_W);
>>          vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_BNDCFGS,
>> MSR_TYPE_R | MSR_TYPE_W);
>>
>> +       /*
>> +        * If the physical CPU or the vCPU of this VM doesn't
>> +        * support SPEC_CTRL feature, catch each access to it.
>> +        */
>> +       if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>> +            !guest_cpuid_has_spec_ctrl(&vmx->vcpu))
>> +               vmx_enable_intercept_for_msr(
>> +                       msr_bitmap,
>> +                       MSR_IA32_SPEC_CTRL,
>> +                       MSR_TYPE_R | MSR_TYPE_W);
>>
>>          /*
>>           * If PML is turned on, failure on enabling PML just results in
>> failure
>>
>>
>>
>> On 09.01.2018 14:03, Paolo Bonzini wrote:
>>>
>>> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
>>> Check that against host CPUID or guest CPUID, respectively for
>>> host-initiated and guest-initiated accesses.
>>>
>>> Suggested-by: Jim Mattson <jmattson@google.com>
>>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>>> ---
>>>          This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
>>>          I still wanted to ack Jim's improvement.
>>>
>>>    arch/x86/kvm/svm.c | 8 ++++++++
>>>    arch/x86/kvm/vmx.c | 8 ++++++++
>>>    2 files changed, 16 insertions(+)
>>>
>>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>>> index 97126c2bd663..3a646580d7c5 100644
>>> --- a/arch/x86/kvm/svm.c
>>> +++ b/arch/x86/kvm/svm.c
>>> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu,
>>> struct msr_data *msr_info)
>>>                  msr_info->data = svm->nested.vm_cr_msr;
>>>                  break;
>>>          case MSR_IA32_SPEC_CTRL:
>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>> +                   (!msr_info->host_initiated &&
>>> +                            !guest_cpuid_has(vcpu,
>>> X86_FEATURE_SPEC_CTRL)))
>>> +                       return 1;
>>>                  msr_info->data = svm->spec_ctrl;
>>>                  break;
>>>          case MSR_IA32_UCODE_REV:
>>> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu,
>>> struct msr_data *msr)
>>>                  vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data
>>> 0x%llx\n", ecx, data);
>>>                  break;
>>>          case MSR_IA32_SPEC_CTRL:
>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>> +                   (!msr_info->host_initiated &&
>>> +                            !guest_cpuid_has(vcpu,
>>> X86_FEATURE_SPEC_CTRL)))
>>> +                       return 1;
>>>                  svm->spec_ctrl = data;
>>>                  break;
>>>          case MSR_IA32_APICBASE:
>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>> index 49b4a2d61603..42bc7ee293e4 100644
>>> --- a/arch/x86/kvm/vmx.c
>>> +++ b/arch/x86/kvm/vmx.c
>>> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu,
>>> struct msr_data *msr_info)
>>>                  msr_info->data = guest_read_tsc(vcpu);
>>>                  break;
>>>          case MSR_IA32_SPEC_CTRL:
>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>> +                   (!msr_info->host_initiated &&
>>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>>> +                       return 1;
>>>                  msr_info->data = to_vmx(vcpu)->spec_ctrl;
>>>                  break;
>>>          case MSR_IA32_SYSENTER_CS:
>>> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu,
>>> struct msr_data *msr_info)
>>>                  kvm_write_tsc(vcpu, msr_info);
>>>                  break;
>>>          case MSR_IA32_SPEC_CTRL:
>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>> +                   (!msr_info->host_initiated &&
>>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>>> +                       return 1;
>>>                  to_vmx(vcpu)->spec_ctrl = data;
>>>                  break;
>>>          case MSR_IA32_CR_PAT:
>>>
>>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 16:43       ` Mihai Carabas
@ 2018-01-30 16:57         ` Jim Mattson
  2018-01-30 17:14           ` David Woodhouse
  0 siblings, 1 reply; 46+ messages in thread
From: Jim Mattson @ 2018-01-30 16:57 UTC (permalink / raw)
  To: Mihai Carabas
  Cc: Paolo Bonzini, LKML, kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, David Woodhouse,
	Borislav Petkov, the arch/x86 maintainers, Konrad Rzeszutek Wilk

It's really hard to tell which patches are being proposed for which
repositories, but assuming that everything else is correct, I don't
think your condition is adequate. What if the physical CPU and the
virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
(STIBP), even though setting that bit in the guest should raise #GP.

On Tue, Jan 30, 2018 at 8:43 AM, Mihai Carabas <mihai.carabas@oracle.com> wrote:
> On 30.01.2018 18:33, Jim Mattson wrote:
>>
>> All MSR intercepts are enabled by default, so I don't think this patch
>> does anything at all, unless I'm missing some context.
>>
>
> Currently on upstream some MSR are intercepted:
> https://github.com/torvalds/linux/blob/master/arch/x86/kvm/vmx.c#L6838
>
> In particular to this patch, the MSR_IA32_SPEC_CTRL intercept is disabled in
> 3/8: https://patchwork.kernel.org/patch/10151889/
>
>
>
>> On Tue, Jan 30, 2018 at 5:21 AM, Mihai Carabas <mihai.carabas@oracle.com>
>> wrote:
>>>
>>> Hello Paolo,
>>>
>>> I've back ported this patch on 4.1, after adding the per-vcpu MSR bitmap.
>>> Also enabled the SPEC_CTRL_MSR intercept if qemu instructed so [1].
>>>
>>> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
>>>
>>> [1]
>>> +++ b/arch/x86/kvm/vmx.c
>>> @@ -8391,6 +8391,16 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm
>>> *kvm, unsigned int id)
>>>          vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_PRED_CMD,
>>> MSR_TYPE_R | MSR_TYPE_W);
>>>          vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_BNDCFGS,
>>> MSR_TYPE_R | MSR_TYPE_W);
>>>
>>> +       /*
>>> +        * If the physical CPU or the vCPU of this VM doesn't
>>> +        * support SPEC_CTRL feature, catch each access to it.
>>> +        */
>>> +       if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>> +            !guest_cpuid_has_spec_ctrl(&vmx->vcpu))
>>> +               vmx_enable_intercept_for_msr(
>>> +                       msr_bitmap,
>>> +                       MSR_IA32_SPEC_CTRL,
>>> +                       MSR_TYPE_R | MSR_TYPE_W);
>>>
>>>          /*
>>>           * If PML is turned on, failure on enabling PML just results in
>>> failure
>>>
>>>
>>>
>>> On 09.01.2018 14:03, Paolo Bonzini wrote:
>>>>
>>>>
>>>> MSR_IA32_SPEC_CTRL is not available unless CPU[7,0].EDX[26] is 1.
>>>> Check that against host CPUID or guest CPUID, respectively for
>>>> host-initiated and guest-initiated accesses.
>>>>
>>>> Suggested-by: Jim Mattson <jmattson@google.com>
>>>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>>>> ---
>>>>          This is for after X86_FEATURE_SPEC_CTRL is added to Linux, but
>>>>          I still wanted to ack Jim's improvement.
>>>>
>>>>    arch/x86/kvm/svm.c | 8 ++++++++
>>>>    arch/x86/kvm/vmx.c | 8 ++++++++
>>>>    2 files changed, 16 insertions(+)
>>>>
>>>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>>>> index 97126c2bd663..3a646580d7c5 100644
>>>> --- a/arch/x86/kvm/svm.c
>>>> +++ b/arch/x86/kvm/svm.c
>>>> @@ -3648,6 +3648,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu,
>>>> struct msr_data *msr_info)
>>>>                  msr_info->data = svm->nested.vm_cr_msr;
>>>>                  break;
>>>>          case MSR_IA32_SPEC_CTRL:
>>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>>> +                   (!msr_info->host_initiated &&
>>>> +                            !guest_cpuid_has(vcpu,
>>>> X86_FEATURE_SPEC_CTRL)))
>>>> +                       return 1;
>>>>                  msr_info->data = svm->spec_ctrl;
>>>>                  break;
>>>>          case MSR_IA32_UCODE_REV:
>>>> @@ -3806,6 +3810,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu,
>>>> struct msr_data *msr)
>>>>                  vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data
>>>> 0x%llx\n", ecx, data);
>>>>                  break;
>>>>          case MSR_IA32_SPEC_CTRL:
>>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>>> +                   (!msr_info->host_initiated &&
>>>> +                            !guest_cpuid_has(vcpu,
>>>> X86_FEATURE_SPEC_CTRL)))
>>>> +                       return 1;
>>>>                  svm->spec_ctrl = data;
>>>>                  break;
>>>>          case MSR_IA32_APICBASE:
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> index 49b4a2d61603..42bc7ee293e4 100644
>>>> --- a/arch/x86/kvm/vmx.c
>>>> +++ b/arch/x86/kvm/vmx.c
>>>> @@ -3368,6 +3368,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu,
>>>> struct msr_data *msr_info)
>>>>                  msr_info->data = guest_read_tsc(vcpu);
>>>>                  break;
>>>>          case MSR_IA32_SPEC_CTRL:
>>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>>> +                   (!msr_info->host_initiated &&
>>>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>>>> +                       return 1;
>>>>                  msr_info->data = to_vmx(vcpu)->spec_ctrl;
>>>>                  break;
>>>>          case MSR_IA32_SYSENTER_CS:
>>>> @@ -3510,6 +3514,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu,
>>>> struct msr_data *msr_info)
>>>>                  kvm_write_tsc(vcpu, msr_info);
>>>>                  break;
>>>>          case MSR_IA32_SPEC_CTRL:
>>>> +               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL) ||
>>>> +                   (!msr_info->host_initiated &&
>>>> +                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
>>>> +                       return 1;
>>>>                  to_vmx(vcpu)->spec_ctrl = data;
>>>>                  break;
>>>>          case MSR_IA32_CR_PAT:
>>>>
>>>
>

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 16:57         ` Jim Mattson
@ 2018-01-30 17:14           ` David Woodhouse
  2018-01-30 17:38             ` Jim Mattson
  2018-01-30 17:45             ` Thomas Gleixner
  0 siblings, 2 replies; 46+ messages in thread
From: David Woodhouse @ 2018-01-30 17:14 UTC (permalink / raw)
  To: Jim Mattson, Mihai Carabas
  Cc: Paolo Bonzini, LKML, kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

[-- Attachment #1: Type: text/plain, Size: 1237 bytes --]

On Tue, 2018-01-30 at 08:57 -0800, Jim Mattson wrote:
> It's really hard to tell which patches are being proposed for which
> repositories, but assuming that everything else is correct, I don't
> think your condition is adequate. What if the physical CPU and the
> virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
> physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
> access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
> (STIBP), even though setting that bit in the guest should raise #GP.

Everything we're talking about here is for tip/x86/pti. Which I note
has just updated to be 4.15-based, although I thought it was going to
stay on 4.14 for now. So I've updated my tree at
http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
accordingly.

You can always write to the STIBP bit without a #GP even when it's not
advertised/available.

There's a possibility that we'll want to always trap and *prevent*
that, instead of passing through — because doing so will also have an
effect on the HT siblings. But as discussed, I wanted to get the basics
working before exploring the complex IBRS/STIBP interactions. This much
should be OK to start with.

[-- Attachment #2: smime.p7s --]
[-- Type: application/x-pkcs7-signature, Size: 5213 bytes --]

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 17:14           ` David Woodhouse
@ 2018-01-30 17:38             ` Jim Mattson
  2018-01-30 17:45             ` Thomas Gleixner
  1 sibling, 0 replies; 46+ messages in thread
From: Jim Mattson @ 2018-01-30 17:38 UTC (permalink / raw)
  To: David Woodhouse
  Cc: Mihai Carabas, Paolo Bonzini, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

On Tue, Jan 30, 2018 at 9:14 AM, David Woodhouse <dwmw2@infradead.org> wrote:
> On Tue, 2018-01-30 at 08:57 -0800, Jim Mattson wrote:
>> It's really hard to tell which patches are being proposed for which
>> repositories, but assuming that everything else is correct, I don't
>> think your condition is adequate. What if the physical CPU and the
>> virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
>> physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
>> access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
>> (STIBP), even though setting that bit in the guest should raise #GP.
>
> Everything we're talking about here is for tip/x86/pti. Which I note
> has just updated to be 4.15-based, although I thought it was going to
> stay on 4.14 for now. So I've updated my tree at
> http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
> accordingly.
>
> You can always write to the STIBP bit without a #GP even when it's not
> advertised/available.

Oops. Yes, you're right. It's writing the IBRS bit when only STIBP is
available that results in a #GP.

> There's a possibility that we'll want to always trap and *prevent*
> that, instead of passing through — because doing so will also have an
> effect on the HT siblings. But as discussed, I wanted to get the basics
> working before exploring the complex IBRS/STIBP interactions. This much
> should be OK to start with.

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 17:14           ` David Woodhouse
  2018-01-30 17:38             ` Jim Mattson
@ 2018-01-30 17:45             ` Thomas Gleixner
  2018-01-30 23:11               ` Paolo Bonzini
  1 sibling, 1 reply; 46+ messages in thread
From: Thomas Gleixner @ 2018-01-30 17:45 UTC (permalink / raw)
  To: David Woodhouse
  Cc: Jim Mattson, Mihai Carabas, Paolo Bonzini, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

On Tue, 30 Jan 2018, David Woodhouse wrote:

> On Tue, 2018-01-30 at 08:57 -0800, Jim Mattson wrote:
> > It's really hard to tell which patches are being proposed for which
> > repositories, but assuming that everything else is correct, I don't
> > think your condition is adequate. What if the physical CPU and the
> > virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
> > physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
> > access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
> > (STIBP), even though setting that bit in the guest should raise #GP.
> 
> Everything we're talking about here is for tip/x86/pti. Which I note
> has just updated to be 4.15-based, although I thought it was going to
> stay on 4.14 for now. So I've updated my tree at
> http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
> accordingly.

Yes, we tried to stay on 4.14 base but this started to created nasty merge
conflicts for no value. Merging in v4.15 turned out to resolve those issues
while still serving as the feed branch for Gregs stable work. For the time
being we try to make stable backporting at least for 4.14/15 as painless as
possible.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 17:45             ` Thomas Gleixner
@ 2018-01-30 23:11               ` Paolo Bonzini
  2018-01-30 23:47                 ` David Woodhouse
  2018-02-05 11:10                 ` Ingo Molnar
  0 siblings, 2 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-30 23:11 UTC (permalink / raw)
  To: Thomas Gleixner, David Woodhouse
  Cc: Jim Mattson, Mihai Carabas, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

On 30/01/2018 12:45, Thomas Gleixner wrote:
> On Tue, 30 Jan 2018, David Woodhouse wrote:
> 
>> On Tue, 2018-01-30 at 08:57 -0800, Jim Mattson wrote:
>>> It's really hard to tell which patches are being proposed for which
>>> repositories, but assuming that everything else is correct, I don't
>>> think your condition is adequate. What if the physical CPU and the
>>> virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
>>> physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
>>> access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
>>> (STIBP), even though setting that bit in the guest should raise #GP.
>>
>> Everything we're talking about here is for tip/x86/pti. Which I note
>> has just updated to be 4.15-based, although I thought it was going to
>> stay on 4.14 for now. So I've updated my tree at
>> http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
>> accordingly.
> 
> Yes, we tried to stay on 4.14 base but this started to created nasty merge
> conflicts for no value. Merging in v4.15 turned out to resolve those issues
> while still serving as the feed branch for Gregs stable work. For the time
> being we try to make stable backporting at least for 4.14/15 as painless as
> possible.

Great, then the "per-VCPU MSR bitmaps" branch
(git://git.kernel.org/pub/scm/virt/kvm/kvm.git refs/heads/msr-bitmaps)
that I created last weekend can be pulled directly in tip/x86/pti.

It cherry-picks directly to both 4.14 so it will be fine for Greg too.

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 23:11               ` Paolo Bonzini
@ 2018-01-30 23:47                 ` David Woodhouse
  2018-01-31  1:06                   ` Paolo Bonzini
  2018-02-05 11:10                 ` Ingo Molnar
  1 sibling, 1 reply; 46+ messages in thread
From: David Woodhouse @ 2018-01-30 23:47 UTC (permalink / raw)
  To: Paolo Bonzini, Thomas Gleixner
  Cc: Jim Mattson, Mihai Carabas, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

[-- Attachment #1: Type: text/plain, Size: 550 bytes --]

On Tue, 2018-01-30 at 18:11 -0500, Paolo Bonzini wrote:

> Great, then the "per-VCPU MSR bitmaps" branch
> (git://git.kernel.org/pub/scm/virt/kvm/kvm.git refs/heads/msr-bitmaps)
> that I created last weekend can be pulled directly in tip/x86/pti.
> 
> It cherry-picks directly to both 4.14 so it will be fine for Greg too.

Right, I've switched to pulling that branch in to my tree at 
http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb

I seem to recall there was some hecking at it, and I expected it to
change? :)

[-- Attachment #2: smime.p7s --]
[-- Type: application/x-pkcs7-signature, Size: 5213 bytes --]

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 23:47                 ` David Woodhouse
@ 2018-01-31  1:06                   ` Paolo Bonzini
  0 siblings, 0 replies; 46+ messages in thread
From: Paolo Bonzini @ 2018-01-31  1:06 UTC (permalink / raw)
  To: David Woodhouse, Thomas Gleixner
  Cc: Jim Mattson, Mihai Carabas, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

On 30/01/2018 18:47, David Woodhouse wrote:
> On Tue, 2018-01-30 at 18:11 -0500, Paolo Bonzini wrote:
> 
>> Great, then the "per-VCPU MSR bitmaps" branch
>> (git://git.kernel.org/pub/scm/virt/kvm/kvm.git refs/heads/msr-bitmaps)
>> that I created last weekend can be pulled directly in tip/x86/pti.
>>
>> It cherry-picks directly to both 4.14 so it will be fine for Greg too.
> 
> Right, I've switched to pulling that branch in to my tree at 
> http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
> 
> I seem to recall there was some hecking at it, and I expected it to
> change? :)

No, apart from adding a "!" in v1->v2 it's all fixed.  SHA1 ids won't
change.

Paolo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-01-30 23:11               ` Paolo Bonzini
  2018-01-30 23:47                 ` David Woodhouse
@ 2018-02-05 11:10                 ` Ingo Molnar
  2018-02-05 11:15                   ` David Woodhouse
  1 sibling, 1 reply; 46+ messages in thread
From: Ingo Molnar @ 2018-02-05 11:10 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Thomas Gleixner, David Woodhouse, Jim Mattson, Mihai Carabas,
	LKML, kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk


* Paolo Bonzini <pbonzini@redhat.com> wrote:

> On 30/01/2018 12:45, Thomas Gleixner wrote:
> > On Tue, 30 Jan 2018, David Woodhouse wrote:
> > 
> >> On Tue, 2018-01-30 at 08:57 -0800, Jim Mattson wrote:
> >>> It's really hard to tell which patches are being proposed for which
> >>> repositories, but assuming that everything else is correct, I don't
> >>> think your condition is adequate. What if the physical CPU and the
> >>> virtual CPU both have CPUID.(EAX=7H,ECX=0):EDX[26], but only the
> >>> physical CPU has CPUID.(EAX=7H,ECX=0):EDX[27]? If the guest has write
> >>> access to MSR_IA32_SPEC_CTRL, it can set MSR_IA32_SPEC_CTRL[1]
> >>> (STIBP), even though setting that bit in the guest should raise #GP.
> >>
> >> Everything we're talking about here is for tip/x86/pti. Which I note
> >> has just updated to be 4.15-based, although I thought it was going to
> >> stay on 4.14 for now. So I've updated my tree at
> >> http://git.infradead.org/linux-retpoline.git/shortlog/refs/heads/ibpb
> >> accordingly.
> > 
> > Yes, we tried to stay on 4.14 base but this started to created nasty merge
> > conflicts for no value. Merging in v4.15 turned out to resolve those issues
> > while still serving as the feed branch for Gregs stable work. For the time
> > being we try to make stable backporting at least for 4.14/15 as painless as
> > possible.
> 
> Great, then the "per-VCPU MSR bitmaps" branch
> (git://git.kernel.org/pub/scm/virt/kvm/kvm.git refs/heads/msr-bitmaps)
> that I created last weekend can be pulled directly in tip/x86/pti.

Can this branch still be rebased, to fix the SoB chain of:

  de3a0021a606 ("KVM: nVMX: Eliminate vmcs02 pool")

?

I'm not sure what workflow resulted in this commit, but it is missing your SoB:

  commit de3a0021a60635de96aa92713c1a31a96747d72c
  Author:     Jim Mattson <jmattson@google.com>
  AuthorDate: Mon Nov 27 17:22:25 2017 -0600
  Commit:     Paolo Bonzini <pbonzini@redhat.com>
  CommitDate: Sat Jan 27 09:43:03 2018 +0100

    KVM: nVMX: Eliminate vmcs02 pool
    
    The potential performance advantages of a vmcs02 pool have never been
    realized. To simplify the code, eliminate the pool. Instead, a single
    vmcs02 is allocated per VCPU when the VCPU enters VMX operation.
    
    Cc: stable@vger.kernel.org       # prereq for Spectre mitigation
    Signed-off-by: Jim Mattson <jmattson@google.com>
    Signed-off-by: Mark Kanda <mark.kanda@oracle.com>
    Reviewed-by: Ameya More <ameya.more@oracle.com>
    Reviewed-by: David Hildenbrand <david@redhat.com>
    Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>

You probably rebased Radim'm tree?

If this tree can still be rebased I'd like to re-pull it into tip:x86/pti, as
those bits are not yet upstream.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-02-05 11:10                 ` Ingo Molnar
@ 2018-02-05 11:15                   ` David Woodhouse
  2018-02-05 12:10                     ` Ingo Molnar
  0 siblings, 1 reply; 46+ messages in thread
From: David Woodhouse @ 2018-02-05 11:15 UTC (permalink / raw)
  To: Ingo Molnar, Paolo Bonzini
  Cc: Thomas Gleixner, Jim Mattson, Mihai Carabas, LKML, kvm list,
	Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk

[-- Attachment #1: Type: text/plain, Size: 226 bytes --]

On Mon, 2018-02-05 at 12:10 +0100, Ingo Molnar wrote:
> 
> Can this branch still be rebased, to fix the SoB chain of:
> 
>   de3a0021a606 ("KVM: nVMX: Eliminate vmcs02 pool")
> 
> ?

It can't. Linus pulled it already.

[-- Attachment #2: smime.p7s --]
[-- Type: application/x-pkcs7-signature, Size: 5213 bytes --]

^ permalink raw reply	[flat|nested] 46+ messages in thread

* Re: [9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability
  2018-02-05 11:15                   ` David Woodhouse
@ 2018-02-05 12:10                     ` Ingo Molnar
  0 siblings, 0 replies; 46+ messages in thread
From: Ingo Molnar @ 2018-02-05 12:10 UTC (permalink / raw)
  To: David Woodhouse
  Cc: Paolo Bonzini, Thomas Gleixner, Jim Mattson, Mihai Carabas, LKML,
	kvm list, Radim Krčmář,
	Liran Alon, Anthony Liguori, Tom Lendacky, Borislav Petkov,
	the arch/x86 maintainers, Konrad Rzeszutek Wilk


* David Woodhouse <dwmw2@infradead.org> wrote:

> On Mon, 2018-02-05 at 12:10 +0100, Ingo Molnar wrote:
> > 
> > Can this branch still be rebased, to fix the SoB chain of:
> > 
> >   de3a0021a606 ("KVM: nVMX: Eliminate vmcs02 pool")
> > 
> > ?
> 
> It can't. Linus pulled it already.

Ah, indeed, freshly so in 35277995e179 - never mind then!

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 46+ messages in thread

end of thread, other threads:[~2018-02-05 12:10 UTC | newest]

Thread overview: 46+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-09 12:03 [PATCH v2 0/8] KVM: x86: expose CVE-2017-5715 ("Spectre variant 2") mitigations to guest Paolo Bonzini
2018-01-09 12:03 ` [PATCH 1/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT accessors Paolo Bonzini
2018-01-15  9:42   ` David Hildenbrand
2018-01-09 12:03 ` [PATCH 2/8] x86/msr: add definitions for indirect branch predictor MSRs Paolo Bonzini
2018-01-09 12:03 ` [PATCH 3/8] kvm: vmx: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to the guest Paolo Bonzini
2018-01-13 10:16   ` Longpeng (Mike)
2018-01-15  9:23     ` Paolo Bonzini
2018-01-15  9:34       ` Thomas Gleixner
     [not found]   ` <1515839272.22302.520.camel@amazon.co.uk>
2018-01-15  9:23     ` Paolo Bonzini
2018-01-09 12:03 ` [PATCH 4/8] kvm: vmx: Set IBPB when running a different VCPU Paolo Bonzini
2018-01-12  1:49   ` Wanpeng Li
2018-01-12 17:03     ` Jim Mattson
2018-01-13  9:29       ` Woodhouse, David
2018-01-15  9:21         ` Paolo Bonzini
2018-01-09 12:03 ` [PATCH 5/8] KVM: SVM: fix comment Paolo Bonzini
2018-01-15  9:53   ` David Hildenbrand
2018-01-09 12:03 ` [PATCH 6/8] kvm: svm: pass MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD down to guest Paolo Bonzini
2018-01-09 14:22   ` Konrad Rzeszutek Wilk
2018-01-09 16:05     ` Paolo Bonzini
2018-01-09 16:08     ` Paolo Bonzini
2018-01-11 10:45       ` Wanpeng Li
2018-01-10 20:13   ` Tom Lendacky
2018-01-11 10:33     ` Paolo Bonzini
2018-01-09 12:03 ` [PATCH 7/8] x86/svm: Set IBPB when running a different VCPU Paolo Bonzini
2018-01-09 14:23   ` Konrad Rzeszutek Wilk
2018-01-09 12:03 ` [PATCH 8/8] KVM: x86: add SPEC_CTRL and IBPB_SUPPORT to MSR and CPUID lists Paolo Bonzini
2018-01-13  1:25   ` Eric Wheeler
2018-01-13  8:00     ` Paolo Bonzini
2018-01-16  0:40       ` Eric Wheeler
2018-01-16  7:39         ` R: " Paolo Bonzini
2018-01-09 12:03 ` [PATCH 9/8] KVM: x86: limit MSR_IA32_SPEC_CTRL access based on CPUID availability Paolo Bonzini
2018-01-16  0:55   ` Eric Wheeler
2018-01-16 12:59     ` Paolo Bonzini
2018-01-30 13:21   ` [9/8] " Mihai Carabas
2018-01-30 16:33     ` Jim Mattson
2018-01-30 16:43       ` Mihai Carabas
2018-01-30 16:57         ` Jim Mattson
2018-01-30 17:14           ` David Woodhouse
2018-01-30 17:38             ` Jim Mattson
2018-01-30 17:45             ` Thomas Gleixner
2018-01-30 23:11               ` Paolo Bonzini
2018-01-30 23:47                 ` David Woodhouse
2018-01-31  1:06                   ` Paolo Bonzini
2018-02-05 11:10                 ` Ingo Molnar
2018-02-05 11:15                   ` David Woodhouse
2018-02-05 12:10                     ` Ingo Molnar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.