[5/5] x86/feature: Detect the x86 feature Indirect Branch Prediction Barrier
diff mbox series

Message ID 1515720739-43819-6-git-send-email-ashok.raj@intel.com
State New, archived
Headers show
Series
  • Add support for IBRS & IBPB KVM support.
Related show

Commit Message

Raj, Ashok Jan. 12, 2018, 1:32 a.m. UTC
cpuid ax=0x7, return rdx bit 26 to indicate presence of both
IA32_SPEC_CTRL(MSR 0x48) and IA32_PRED_CMD(MSR 0x49)

BIT0: Indirect Branch Prediction Barrier

When this MSR is written with IBPB=1 it ensures that earlier code's behavior
doesn't control later indirect branch predictions.

Note this MSR is only writable and does not carry any state. Its a barrier
so the code should perform a wrmsr when the barrier is needed.

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
---
 arch/x86/include/asm/cpufeatures.h |  1 +
 arch/x86/include/asm/msr-index.h   |  3 +++
 arch/x86/kernel/cpu/spec_ctrl.c    |  7 +++++++
 arch/x86/kvm/svm.c                 | 16 ++++++++++++++++
 arch/x86/kvm/vmx.c                 | 10 ++++++++++
 5 files changed, 37 insertions(+)

Comments

Peter Zijlstra Jan. 12, 2018, 10:08 a.m. UTC | #1
On Thu, Jan 11, 2018 at 05:32:19PM -0800, Ashok Raj wrote:
> @@ -1711,11 +1715,18 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>  	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>  	kvm_vcpu_uninit(vcpu);
>  	kmem_cache_free(kvm_vcpu_cache, svm);
> +    /* 
> +     * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
> +     * block speculative execution.
> +     */
> +	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
> +        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
>  }

> @@ -3837,6 +3839,12 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>  	free_vmcs(loaded_vmcs->vmcs);
>  	loaded_vmcs->vmcs = NULL;
>  	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
> +    /*
> +     * The VMCS could be recycled, causing a false negative in vmx_vcpu_load
> +     * block speculative execution.
> +     */
> +	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
> +        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
>  }

Whitespace damage.

Also, why not introduce a helper like:

static inline flush_ibpb(void)
{
	if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
		native_write_msr(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
}

?
Borislav Petkov Jan. 12, 2018, 12:32 p.m. UTC | #2
On Thu, Jan 11, 2018 at 05:32:19PM -0800, Ashok Raj wrote:
> cpuid ax=0x7, return rdx bit 26 to indicate presence of both
> IA32_SPEC_CTRL(MSR 0x48) and IA32_PRED_CMD(MSR 0x49)

So why do we need two X86_FEATURE flags then?
Woodhouse, David Jan. 12, 2018, 12:39 p.m. UTC | #3
On Fri, 2018-01-12 at 13:32 +0100, Borislav Petkov wrote:
> On Thu, Jan 11, 2018 at 05:32:19PM -0800, Ashok Raj wrote:
> > cpuid ax=0x7, return rdx bit 26 to indicate presence of both
> > IA32_SPEC_CTRL(MSR 0x48) and IA32_PRED_CMD(MSR 0x49)
> 
> So why do we need two X86_FEATURE flags then?

AMD has only the latter and enumerates them differently.
Lendacky, Thomas Jan. 12, 2018, 3:21 p.m. UTC | #4
On 1/12/2018 6:39 AM, Woodhouse, David wrote:
> On Fri, 2018-01-12 at 13:32 +0100, Borislav Petkov wrote:
>> On Thu, Jan 11, 2018 at 05:32:19PM -0800, Ashok Raj wrote:
>>> cpuid ax=0x7, return rdx bit 26 to indicate presence of both
>>> IA32_SPEC_CTRL(MSR 0x48) and IA32_PRED_CMD(MSR 0x49)
>>
>> So why do we need two X86_FEATURE flags then?
> 
> AMD has only the latter and enumerates them differently.

Correct.  Both 0x48 and 0x49 are tied to the same cpuid bit.  AMD has
a separate cpuid bit for 0x49 (IBPB) alone.

Thanks,
Tom

>
Lendacky, Thomas Jan. 12, 2018, 3:31 p.m. UTC | #5
On 1/11/2018 7:32 PM, Ashok Raj wrote:
> cpuid ax=0x7, return rdx bit 26 to indicate presence of both
> IA32_SPEC_CTRL(MSR 0x48) and IA32_PRED_CMD(MSR 0x49)
> 
> BIT0: Indirect Branch Prediction Barrier
> 
> When this MSR is written with IBPB=1 it ensures that earlier code's behavior
> doesn't control later indirect branch predictions.
> 
> Note this MSR is only writable and does not carry any state. Its a barrier
> so the code should perform a wrmsr when the barrier is needed.
> 
> Signed-off-by: Ashok Raj <ashok.raj@intel.com>
> ---
>  arch/x86/include/asm/cpufeatures.h |  1 +
>  arch/x86/include/asm/msr-index.h   |  3 +++
>  arch/x86/kernel/cpu/spec_ctrl.c    |  7 +++++++
>  arch/x86/kvm/svm.c                 | 16 ++++++++++++++++
>  arch/x86/kvm/vmx.c                 | 10 ++++++++++
>  5 files changed, 37 insertions(+)
> 
> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
> index 624b58e..52f37fc 100644
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -213,6 +213,7 @@
>  #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
>  #define X86_FEATURE_SPEC_CTRL		( 7*32+19) /* Speculation Control */
>  #define X86_FEATURE_SPEC_CTRL_IBRS	( 7*32+20) /* Speculation Control, use IBRS */
> +#define X86_FEATURE_PRED_CMD	( 7*32+21) /* Indirect Branch Prediction Barrier */
>  
>  /* Virtualization flags: Linux defined, word 8 */
>  #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
> index 3e1cb18..1888e19 100644
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -46,6 +46,9 @@
>  #define SPEC_CTRL_DISABLE_IBRS		(0 << 0)
>  #define SPEC_CTRL_ENABLE_IBRS		(1 << 0)
>  
> +#define MSR_IA32_PRED_CMD		0x00000049
> +#define FEATURE_SET_IBPB		(1<<0)
> +
>  #define MSR_IA32_PERFCTR0		0x000000c1
>  #define MSR_IA32_PERFCTR1		0x000000c2
>  #define MSR_FSB_FREQ			0x000000cd
> diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c
> index 02fc630..6cfec19 100644
> --- a/arch/x86/kernel/cpu/spec_ctrl.c
> +++ b/arch/x86/kernel/cpu/spec_ctrl.c
> @@ -15,6 +15,13 @@ void spec_ctrl_scan_feature(struct cpuinfo_x86 *c)
>  			if (!c->cpu_index)
>  				static_branch_enable(&spec_ctrl_dynamic_ibrs);
>  		}
> +		/*
> +		 * For Intel CPU's this MSR is shared the same cpuid
> +		 * enumeration. When MSR_IA32_SPEC_CTRL is present
> +		 * MSR_IA32_SPEC_CTRL is also available
> +		 * TBD: AMD might have a separate enumeration for each.

AMD will follow the specification that if cpuid ax=0x7, return rdx[26]
is set, it will indicate both MSR registers and features are supported.

But AMD also has a separate bit for IBPB (X86_FEATURE_PRED_CMD) alone.
As all of the IBRS/IBPB stuff happens, that patch will follow.

> +		 */
> +		set_cpu_cap(c, X86_FEATURE_PRED_CMD);>  	}
>  }
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 7c14471a..36924c9 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -251,6 +251,7 @@ static const struct svm_direct_access_msrs {
>  	{ .index = MSR_SYSCALL_MASK,			.always = true  },
>  #endif
>  	{ .index = MSR_IA32_SPEC_CTRL,          .always = true  },
> +	{ .index = MSR_IA32_PRED_CMD,           .always = false },

This should be .always = true

>  	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
>  	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
>  	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
> @@ -531,6 +532,7 @@ struct svm_cpu_data {
>  	struct kvm_ldttss_desc *tss_desc;
>  
>  	struct page *save_area;
> +	struct vmcb *current_vmcb;
>  };
>  
>  static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
> @@ -923,6 +925,8 @@ static void svm_vcpu_init_msrpm(u32 *msrpm)
>  
>  	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
>  		set_msr_interception(msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
> +	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
> +		set_msr_interception(msrpm, MSR_IA32_PRED_CMD, 1, 1);

Similar to the comment about SPEC_CTRL, this should be removed as it will
be covered by the loop.

>  }
>  
>  static void add_msr_offset(u32 offset)
> @@ -1711,11 +1715,18 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>  	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>  	kvm_vcpu_uninit(vcpu);
>  	kmem_cache_free(kvm_vcpu_cache, svm);
> +    /* 
> +     * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
> +     * block speculative execution.
> +     */
> +	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
> +        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
>  }
>  
>  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> +	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
>  	int i;
>  
>  	if (unlikely(cpu != vcpu->cpu)) {
> @@ -1744,6 +1755,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  	if (static_cpu_has(X86_FEATURE_RDTSCP))
>  		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
>  
> +	if (sd->current_vmcb != svm->vmcb) {
> +		sd->current_vmcb = svm->vmcb;
> +		if (boot_cpu_has(X86_FEATURE_PRED_CMD))
> +			native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
> +	}
>  	avic_vcpu_load(vcpu, cpu);
>  }
>  
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 1913896..caeb9ff 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2280,6 +2280,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>  		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>  		vmcs_load(vmx->loaded_vmcs->vmcs);
> +		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))

This should probably use X86_FEATURE_PRED_CMD.

> +			native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);>  	}
>  
>  	if (!already_loaded) {
> @@ -3837,6 +3839,12 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>  	free_vmcs(loaded_vmcs->vmcs);
>  	loaded_vmcs->vmcs = NULL;
>  	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
> +    /*
> +     * The VMCS could be recycled, causing a false negative in vmx_vcpu_load
> +     * block speculative execution.
> +     */
> +	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))

Again, X86_FEATURE_PRED_CMD.

> +        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
>  }
>  
>  static void free_kvm_area(void)
> @@ -6804,6 +6812,8 @@ static __init int hardware_setup(void)
>  	 */
>  	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
>  		vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
> +	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
> +		vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);
>  
>  	vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
>  	vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
>
Woodhouse, David Jan. 12, 2018, 3:36 p.m. UTC | #6
On Fri, 2018-01-12 at 09:31 -0600, Tom Lendacky wrote:
> 
> AMD will follow the specification that if cpuid ax=0x7, return rdx[26]
> is set, it will indicate both MSR registers and features are supported.
> 
> But AMD also has a separate bit for IBPB (X86_FEATURE_PRED_CMD) alone.
> As all of the IBRS/IBPB stuff happens, that patch will follow.

Please let's roll it into the patch set. I don't want Intel posting
deliberately AMD-ignoring patches. Sort it out, guys.
Lendacky, Thomas Jan. 12, 2018, 5:06 p.m. UTC | #7
On 1/12/2018 9:36 AM, Woodhouse, David wrote:
> On Fri, 2018-01-12 at 09:31 -0600, Tom Lendacky wrote:
>>
>> AMD will follow the specification that if cpuid ax=0x7, return rdx[26]
>> is set, it will indicate both MSR registers and features are supported.
>>
>> But AMD also has a separate bit for IBPB (X86_FEATURE_PRED_CMD) alone.
>> As all of the IBRS/IBPB stuff happens, that patch will follow.
> 
> Please let's roll it into the patch set. I don't want Intel posting
> deliberately AMD-ignoring patches. Sort it out, guys.
> 

Based on the current patches, here is what it should be for the
standalone IBPB support:

x86/cpu: Detect standalone IBPB support

From: Tom Lendacky <thomas.lendacky@amd.com>

Add support to detect standalone IBPB feature support.  This feature is
indicated as follows:

  CPUID EAX=0x80000008, ECX=0x00 return EBX[12] indicates support for
  IBPB

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/cpufeatures.h |    1 +
 arch/x86/kernel/cpu/spec_ctrl.c    |    9 +++++----
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 52f37fc..33f0215 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -273,6 +273,7 @@
 #define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_IBPB		(13*32+12) /* Indirect Branch Prediction Barrier */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c
index 6cfec19..1aadd73 100644
--- a/arch/x86/kernel/cpu/spec_ctrl.c
+++ b/arch/x86/kernel/cpu/spec_ctrl.c
@@ -16,12 +16,13 @@ void spec_ctrl_scan_feature(struct cpuinfo_x86 *c)
 				static_branch_enable(&spec_ctrl_dynamic_ibrs);
 		}
 		/*
-		 * For Intel CPU's this MSR is shared the same cpuid
-		 * enumeration. When MSR_IA32_SPEC_CTRL is present
-		 * MSR_IA32_SPEC_CTRL is also available
-		 * TBD: AMD might have a separate enumeration for each.
+		 * The PRED_CMD MSR is shared with the cpuid enumeration
+		 * for SPEC_CTRL.  When MSR_IA32_SPEC_CTRL is present,
+		 * then MSR_IA32_PRED_CMD is, too.
 		 */
 		set_cpu_cap(c, X86_FEATURE_PRED_CMD);
+	} else if (boot_cpu_has(X86_FEATURE_IBPB)) {
+		set_cpu_cap(c, X86_FEATURE_PRED_CMD);
 	}
 }
 


> 
> 
> Amazon Web Services UK Limited. Registered in England and Wales with
> registration number 08650665 and which has its registered office at 60
> Holborn Viaduct, London EC1A 2FD, United Kingdom.

Patch
diff mbox series

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 624b58e..52f37fc 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -213,6 +213,7 @@ 
 #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_SPEC_CTRL		( 7*32+19) /* Speculation Control */
 #define X86_FEATURE_SPEC_CTRL_IBRS	( 7*32+20) /* Speculation Control, use IBRS */
+#define X86_FEATURE_PRED_CMD	( 7*32+21) /* Indirect Branch Prediction Barrier */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3e1cb18..1888e19 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -46,6 +46,9 @@ 
 #define SPEC_CTRL_DISABLE_IBRS		(0 << 0)
 #define SPEC_CTRL_ENABLE_IBRS		(1 << 0)
 
+#define MSR_IA32_PRED_CMD		0x00000049
+#define FEATURE_SET_IBPB		(1<<0)
+
 #define MSR_IA32_PERFCTR0		0x000000c1
 #define MSR_IA32_PERFCTR1		0x000000c2
 #define MSR_FSB_FREQ			0x000000cd
diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c
index 02fc630..6cfec19 100644
--- a/arch/x86/kernel/cpu/spec_ctrl.c
+++ b/arch/x86/kernel/cpu/spec_ctrl.c
@@ -15,6 +15,13 @@  void spec_ctrl_scan_feature(struct cpuinfo_x86 *c)
 			if (!c->cpu_index)
 				static_branch_enable(&spec_ctrl_dynamic_ibrs);
 		}
+		/*
+		 * For Intel CPU's this MSR is shared the same cpuid
+		 * enumeration. When MSR_IA32_SPEC_CTRL is present
+		 * MSR_IA32_SPEC_CTRL is also available
+		 * TBD: AMD might have a separate enumeration for each.
+		 */
+		set_cpu_cap(c, X86_FEATURE_PRED_CMD);
 	}
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7c14471a..36924c9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -251,6 +251,7 @@  static const struct svm_direct_access_msrs {
 	{ .index = MSR_SYSCALL_MASK,			.always = true  },
 #endif
 	{ .index = MSR_IA32_SPEC_CTRL,          .always = true  },
+	{ .index = MSR_IA32_PRED_CMD,           .always = false },
 	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
 	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
 	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
@@ -531,6 +532,7 @@  struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+	struct vmcb *current_vmcb;
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -923,6 +925,8 @@  static void svm_vcpu_init_msrpm(u32 *msrpm)
 
 	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
 		set_msr_interception(msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
+		set_msr_interception(msrpm, MSR_IA32_PRED_CMD, 1, 1);
 }
 
 static void add_msr_offset(u32 offset)
@@ -1711,11 +1715,18 @@  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
+    /* 
+     * The VMCB could be recycled, causing a false negative in svm_vcpu_load;
+     * block speculative execution.
+     */
+	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
+        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 	int i;
 
 	if (unlikely(cpu != vcpu->cpu)) {
@@ -1744,6 +1755,11 @@  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (static_cpu_has(X86_FEATURE_RDTSCP))
 		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
 
+	if (sd->current_vmcb != svm->vmcb) {
+		sd->current_vmcb = svm->vmcb;
+		if (boot_cpu_has(X86_FEATURE_PRED_CMD))
+			native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
+	}
 	avic_vcpu_load(vcpu, cpu);
 }
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1913896..caeb9ff 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2280,6 +2280,8 @@  static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
 		vmcs_load(vmx->loaded_vmcs->vmcs);
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+			native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
 	}
 
 	if (!already_loaded) {
@@ -3837,6 +3839,12 @@  static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
 	free_vmcs(loaded_vmcs->vmcs);
 	loaded_vmcs->vmcs = NULL;
 	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
+    /*
+     * The VMCS could be recycled, causing a false negative in vmx_vcpu_load
+     * block speculative execution.
+     */
+	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+        native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
 }
 
 static void free_kvm_area(void)
@@ -6804,6 +6812,8 @@  static __init int hardware_setup(void)
 	 */
 	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
 		vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
+	if (boot_cpu_has(X86_FEATURE_PRED_CMD))
+		vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);
 
 	vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
 	vmx_disable_intercept_for_msr(MSR_GS_BASE, false);