linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: x86: cleanup CR3 reserved bits checks
@ 2021-02-02 17:02 Paolo Bonzini
  2021-02-02 18:34 ` Sean Christopherson
  0 siblings, 1 reply; 3+ messages in thread
From: Paolo Bonzini @ 2021-02-02 17:02 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: seanjc

If not in long mode, the low bits of CR3 are reserved but not enforced to
be zero, so remove those checks.  If in long mode, however, the MBZ bits
extend down to the highest physical address bit of the guest, excluding
the encryption bit.

Make the checks consistent with the above, and match them between
nested_vmcb_checks and KVM_SET_SREGS.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 12 ++----------
 arch/x86/kvm/svm/svm.h    |  3 ---
 arch/x86/kvm/x86.c        |  2 ++
 3 files changed, 4 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index eecb548bdda6..9ee542ea3f56 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -244,18 +244,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 
 	vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
 
-	if (!vmcb12_lma) {
-		if (vmcb12->save.cr4 & X86_CR4_PAE) {
-			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
-				return false;
-		} else {
-			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
-				return false;
-		}
-	} else {
+	if (vmcb12_lma) {
 		if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
 		    !(vmcb12->save.cr0 & X86_CR0_PE) ||
-		    (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+		    (vmcb12->save.cr3 & svm->vcpu.arch.cr3_lm_rsvd_bits))
 			return false;
 	}
 	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0fe874ae5498..6e7d070f8b86 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
-#define MSR_CR3_LONG_MBZ_MASK			0xfff0000000000000U
 #define MSR_INVALID				0xffffffffU
 
 extern int sev;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b748bf0d6d33..97674204bf44 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9660,6 +9660,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 		 */
 		if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
 			return false;
+		if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+			return false;
 	} else {
 		/*
 		 * Not in 64-bit mode: EFER.LMA is clear and the code
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: x86: cleanup CR3 reserved bits checks
  2021-02-02 17:02 [PATCH] KVM: x86: cleanup CR3 reserved bits checks Paolo Bonzini
@ 2021-02-02 18:34 ` Sean Christopherson
  2021-02-03  8:28   ` Paolo Bonzini
  0 siblings, 1 reply; 3+ messages in thread
From: Sean Christopherson @ 2021-02-02 18:34 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm

On Tue, Feb 02, 2021, Paolo Bonzini wrote:
> If not in long mode, the low bits of CR3 are reserved but not enforced to
> be zero, so remove those checks.  If in long mode, however, the MBZ bits
> extend down to the highest physical address bit of the guest, excluding
> the encryption bit.
> 
> Make the checks consistent with the above, and match them between
> nested_vmcb_checks and KVM_SET_SREGS.
> 

Fixes + Cc:stable@?

> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Reviewed-by: Sean Christopherson <seanjc@google.com> 

> ---
>  arch/x86/kvm/svm/nested.c | 12 ++----------
>  arch/x86/kvm/svm/svm.h    |  3 ---
>  arch/x86/kvm/x86.c        |  2 ++
>  3 files changed, 4 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index eecb548bdda6..9ee542ea3f56 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -244,18 +244,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
>  
>  	vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
>  
> -	if (!vmcb12_lma) {
> -		if (vmcb12->save.cr4 & X86_CR4_PAE) {
> -			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
> -				return false;
> -		} else {
> -			if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
> -				return false;
> -		}
> -	} else {
> +	if (vmcb12_lma) {
>  		if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
>  		    !(vmcb12->save.cr0 & X86_CR0_PE) ||
> -		    (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
> +		    (vmcb12->save.cr3 & svm->vcpu.arch.cr3_lm_rsvd_bits))

Gah, I was too slow as usual.  I have a series to clean up GPA validity checks,
this one included.  I'll base that series on this patch, if I get it sent before
this hits kvm/queue...

>  			return false;
>  	}
>  	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 0fe874ae5498..6e7d070f8b86 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
>  }
>  
>  /* svm.c */
> -#define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
> -#define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
> -#define MSR_CR3_LONG_MBZ_MASK			0xfff0000000000000U
>  #define MSR_INVALID				0xffffffffU
>  
>  extern int sev;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b748bf0d6d33..97674204bf44 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9660,6 +9660,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
>  		 */
>  		if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
>  			return false;
> +		if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
> +			return false;
>  	} else {
>  		/*
>  		 * Not in 64-bit mode: EFER.LMA is clear and the code
> -- 
> 2.26.2
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] KVM: x86: cleanup CR3 reserved bits checks
  2021-02-02 18:34 ` Sean Christopherson
@ 2021-02-03  8:28   ` Paolo Bonzini
  0 siblings, 0 replies; 3+ messages in thread
From: Paolo Bonzini @ 2021-02-03  8:28 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: linux-kernel, kvm

On 02/02/21 19:34, Sean Christopherson wrote:
> On Tue, Feb 02, 2021, Paolo Bonzini wrote:
>> If not in long mode, the low bits of CR3 are reserved but not enforced to
>> be zero, so remove those checks.  If in long mode, however, the MBZ bits
>> extend down to the highest physical address bit of the guest, excluding
>> the encryption bit.
>>
>> Make the checks consistent with the above, and match them between
>> nested_vmcb_checks and KVM_SET_SREGS.
>>
> Fixes + Cc:stable@?

Difficult to say what it fixes, it's been there forever for KVM_SET_SREGS.

For the nSVM part I'll go with

Fixes: 761e41693465 ("KVM: nSVM: Check that MBZ bits in CR3 and CR4 are 
not set on vmrun of nested guests")

Paolo

>> Signed-off-by: Paolo Bonzini<pbonzini@redhat.com>
> Reviewed-by: Sean Christopherson<seanjc@google.com>  
> 


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-02-03  8:29 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-02 17:02 [PATCH] KVM: x86: cleanup CR3 reserved bits checks Paolo Bonzini
2021-02-02 18:34 ` Sean Christopherson
2021-02-03  8:28   ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).