All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kvm: x86: Sink cpuid update into vendor-specific set_cr4 functions
@ 2020-10-29 17:06 Jim Mattson
  2020-10-31 13:54 ` Paolo Bonzini
  0 siblings, 1 reply; 2+ messages in thread
From: Jim Mattson @ 2020-10-29 17:06 UTC (permalink / raw)
  To: kvm, Paolo Bonzini
  Cc: Jim Mattson, Abhiroop Dabral, Ricardo Koller, Peter Shier,
	Haozhong Zhang, Dexuan Cui, Huaitong Han

On emulated VM-entry and VM-exit, update the CPUID bits that reflect
CR4.OSXSAVE and CR4.PKE.

This fixes a bug where the CPUID bits could continue to reflect L2 CR4
values after emulated VM-exit to L1. It also fixes a related bug where
the CPUID bits could continue to reflect L1 CR4 values after emulated
VM-entry to L2. The latter bug is mainly relevant to SVM, wherein
CPUID is not a required intercept. However, it could also be relevant
to VMX, because the code to conditionally update these CPUID bits
assumes that the guest CPUID and the guest CR4 are always in sync.

Fixes: 8eb3f87d903168 ("KVM: nVMX: fix guest CR4 loading when emulating L2 to L1 exit")
Fixes: 2acf923e38fb6a ("KVM: VMX: Enable XSAVE/XRSTOR for guest")
Fixes: b9baba86148904 ("KVM, pkeys: expose CPUID/CR4 to guest")
Reported-by: Abhiroop Dabral <adabral@paloaltonetworks.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Cc: Haozhong Zhang <haozhong.zhang@intel.com>
Cc: Dexuan Cui <dexuan.cui@intel.com>
Cc: Huaitong Han <huaitong.han@intel.com>
---
 arch/x86/kvm/cpuid.c   | 1 +
 arch/x86/kvm/svm/svm.c | 4 ++++
 arch/x86/kvm/vmx/vmx.c | 5 +++++
 arch/x86/kvm/x86.c     | 8 --------
 4 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 06a278b3701d..661732be33f5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -139,6 +139,7 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
 					   MSR_IA32_MISC_ENABLE_MWAIT);
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
 
 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2f32fd09e259..78163e345e84 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1699,6 +1699,10 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	cr4 |= host_cr4_mce;
 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
 	vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+
+	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
+		kvm_update_cpuid_runtime(vcpu);
+
 	return 0;
 }
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d14c94d0aff1..bd2cb47f113b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3095,6 +3095,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
 
 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
+	unsigned long old_cr4 = vcpu->arch.cr4;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	/*
 	 * Pass through host's Machine Check Enable value to hw_cr4, which
@@ -3166,6 +3167,10 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 
 	vmcs_writel(CR4_READ_SHADOW, cr4);
 	vmcs_writel(GUEST_CR4, hw_cr4);
+
+	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
+		kvm_update_cpuid_runtime(vcpu);
+
 	return 0;
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 397f599b20e5..e95c333724c2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1014,9 +1014,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
 		kvm_mmu_reset_context(vcpu);
 
-	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
-		kvm_update_cpuid_runtime(vcpu);
-
 	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -9522,7 +9519,6 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
 	struct msr_data apic_base_msr;
 	int mmu_reset_needed = 0;
-	int cpuid_update_needed = 0;
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
 	int ret = -EINVAL;
@@ -9557,11 +9553,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 	vcpu->arch.cr0 = sregs->cr0;
 
 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
-	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
-				(X86_CR4_OSXSAVE | X86_CR4_PKE));
 	kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
-	if (cpuid_update_needed)
-		kvm_update_cpuid_runtime(vcpu);
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 	if (is_pae_paging(vcpu)) {
-- 
2.29.1.341.ge80a0c044ae-goog


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] kvm: x86: Sink cpuid update into vendor-specific set_cr4 functions
  2020-10-29 17:06 [PATCH] kvm: x86: Sink cpuid update into vendor-specific set_cr4 functions Jim Mattson
@ 2020-10-31 13:54 ` Paolo Bonzini
  0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2020-10-31 13:54 UTC (permalink / raw)
  To: Jim Mattson, kvm
  Cc: Abhiroop Dabral, Ricardo Koller, Peter Shier, Haozhong Zhang,
	Dexuan Cui, Huaitong Han

On 29/10/20 18:06, Jim Mattson wrote:
> On emulated VM-entry and VM-exit, update the CPUID bits that reflect
> CR4.OSXSAVE and CR4.PKE.
> 
> This fixes a bug where the CPUID bits could continue to reflect L2 CR4
> values after emulated VM-exit to L1. It also fixes a related bug where
> the CPUID bits could continue to reflect L1 CR4 values after emulated
> VM-entry to L2. The latter bug is mainly relevant to SVM, wherein
> CPUID is not a required intercept. However, it could also be relevant
> to VMX, because the code to conditionally update these CPUID bits
> assumes that the guest CPUID and the guest CR4 are always in sync.
> 
> Fixes: 8eb3f87d903168 ("KVM: nVMX: fix guest CR4 loading when emulating L2 to L1 exit")
> Fixes: 2acf923e38fb6a ("KVM: VMX: Enable XSAVE/XRSTOR for guest")
> Fixes: b9baba86148904 ("KVM, pkeys: expose CPUID/CR4 to guest")
> Reported-by: Abhiroop Dabral <adabral@paloaltonetworks.com>
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Ricardo Koller <ricarkol@google.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Cc: Haozhong Zhang <haozhong.zhang@intel.com>
> Cc: Dexuan Cui <dexuan.cui@intel.com>
> Cc: Huaitong Han <huaitong.han@intel.com>
> ---
>  arch/x86/kvm/cpuid.c   | 1 +
>  arch/x86/kvm/svm/svm.c | 4 ++++
>  arch/x86/kvm/vmx/vmx.c | 5 +++++
>  arch/x86/kvm/x86.c     | 8 --------
>  4 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 06a278b3701d..661732be33f5 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -139,6 +139,7 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
>  					   MSR_IA32_MISC_ENABLE_MWAIT);
>  	}
>  }
> +EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
>  
>  static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  {
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 2f32fd09e259..78163e345e84 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1699,6 +1699,10 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>  	cr4 |= host_cr4_mce;
>  	to_svm(vcpu)->vmcb->save.cr4 = cr4;
>  	vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
> +
> +	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
> +		kvm_update_cpuid_runtime(vcpu);
> +
>  	return 0;
>  }
>  
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index d14c94d0aff1..bd2cb47f113b 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -3095,6 +3095,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
>  
>  int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>  {
> +	unsigned long old_cr4 = vcpu->arch.cr4;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	/*
>  	 * Pass through host's Machine Check Enable value to hw_cr4, which
> @@ -3166,6 +3167,10 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>  
>  	vmcs_writel(CR4_READ_SHADOW, cr4);
>  	vmcs_writel(GUEST_CR4, hw_cr4);
> +
> +	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
> +		kvm_update_cpuid_runtime(vcpu);
> +
>  	return 0;
>  }
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 397f599b20e5..e95c333724c2 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1014,9 +1014,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>  	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
>  		kvm_mmu_reset_context(vcpu);
>  
> -	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
> -		kvm_update_cpuid_runtime(vcpu);
> -
>  	return 0;
>  }
>  EXPORT_SYMBOL_GPL(kvm_set_cr4);
> @@ -9522,7 +9519,6 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
>  {
>  	struct msr_data apic_base_msr;
>  	int mmu_reset_needed = 0;
> -	int cpuid_update_needed = 0;
>  	int pending_vec, max_bits, idx;
>  	struct desc_ptr dt;
>  	int ret = -EINVAL;
> @@ -9557,11 +9553,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
>  	vcpu->arch.cr0 = sregs->cr0;
>  
>  	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
> -	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
> -				(X86_CR4_OSXSAVE | X86_CR4_PKE));
>  	kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
> -	if (cpuid_update_needed)
> -		kvm_update_cpuid_runtime(vcpu);
>  
>  	idx = srcu_read_lock(&vcpu->kvm->srcu);
>  	if (is_pae_paging(vcpu)) {
> 

Queued, thanks.

Paolo


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-10-31 13:55 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-29 17:06 [PATCH] kvm: x86: Sink cpuid update into vendor-specific set_cr4 functions Jim Mattson
2020-10-31 13:54 ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.