kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits
@ 2020-08-29  0:48 Krish Sadhukhan
  2020-08-29  0:48 ` [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52 Krish Sadhukhan
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2020-08-29  0:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson

Patch# 1: Fixes the MBZ mask for CR3
Patch# 2: Adds checks for the reserved bits for CR3 and CR4 to
	  svm_set_nested_state() since these bits need to be checked before
	  VMRUN of the nested guest on the destination.
Patch# 3: Adds a test for the non-MBZ reserved bits in CR3 in long mode.


[PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52
[PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to
[PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode

 arch/x86/kvm/svm/nested.c | 51 +++++++++++++++++++++++++++--------------------
 arch/x86/kvm/svm/svm.h    |  2 +-
 2 files changed, 30 insertions(+), 23 deletions(-)

Krish Sadhukhan (2):
      KVM: nSVM: CR3 MBZ bits are only 63:52
      KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state()

 x86/svm.h       |  3 ++-
 x86/svm_tests.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 49 insertions(+), 8 deletions(-)

Krish Sadhukhan (1):
      KVM: nSVM: Test non-MBZ reserved bits in CR3 in long mode


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52
  2020-08-29  0:48 [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Krish Sadhukhan
@ 2020-08-29  0:48 ` Krish Sadhukhan
  2020-08-29  0:48 ` [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state() Krish Sadhukhan
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2020-08-29  0:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson

Commit 761e4169346553c180bbd4a383aedd72f905bc9a created a wrong mask for the
CR3 MBZ bits. According to APM vol 2, only the upper 12 bits are MBZ.

(Fixes 761e4169346553c180bbd4a383aedd72f905bc9a)

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/svm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index a798e1731709..c0d75b1e0664 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -345,7 +345,7 @@ static inline bool gif_set(struct vcpu_svm *svm)
 /* svm.c */
 #define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
 #define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
-#define MSR_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
+#define MSR_CR3_LONG_MBZ_MASK			0xfff0000000000000U
 #define MSR_INVALID				0xffffffffU
 
 u32 svm_msrpm_offset(u32 msr);
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state()
  2020-08-29  0:48 [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Krish Sadhukhan
  2020-08-29  0:48 ` [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52 Krish Sadhukhan
@ 2020-08-29  0:48 ` Krish Sadhukhan
  2020-09-23 15:53   ` Paolo Bonzini
  2020-08-29  0:48 ` [PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode Krish Sadhukhan
  2020-09-23 15:54 ` [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Paolo Bonzini
  3 siblings, 1 reply; 7+ messages in thread
From: Krish Sadhukhan @ 2020-08-29  0:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/nested.c | 51 ++++++++++++++++++++++-----------------
 1 file changed, 29 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..7a51ce465f3e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -215,9 +215,35 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 	return true;
 }
 
+static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
+				      struct vmcb_save_area *save)
+{
+	bool nested_vmcb_lma =
+	        (save->efer & EFER_LME) &&
+		(save->cr0 & X86_CR0_PG);
+
+	if (!nested_vmcb_lma) {
+		if (save->cr4 & X86_CR4_PAE) {
+			if (save->cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+				return false;
+		} else {
+			if (save->cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+				return false;
+		}
+	} else {
+		if (!(save->cr4 & X86_CR4_PAE) ||
+		    !(save->cr0 & X86_CR0_PE) ||
+		    (save->cr3 & MSR_CR3_LONG_MBZ_MASK))
+			return false;
+	}
+	if (kvm_valid_cr4(&svm->vcpu, save->cr4))
+		return false;
+
+	return true;
+}
+
 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 {
-	bool nested_vmcb_lma;
 	if ((vmcb->save.efer & EFER_SVME) == 0)
 		return false;
 
@@ -228,25 +254,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 	if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
 		return false;
 
-	nested_vmcb_lma =
-	        (vmcb->save.efer & EFER_LME) &&
-		(vmcb->save.cr0 & X86_CR0_PG);
-
-	if (!nested_vmcb_lma) {
-		if (vmcb->save.cr4 & X86_CR4_PAE) {
-			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
-				return false;
-		} else {
-			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
-				return false;
-		}
-	} else {
-		if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
-		    !(vmcb->save.cr0 & X86_CR0_PE) ||
-		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
-			return false;
-	}
-	if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
+	if (!nested_vmcb_check_cr3_cr4(svm, &(vmcb->save)))
 		return false;
 
 	return nested_vmcb_check_controls(&vmcb->control);
@@ -1114,9 +1122,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 	/*
 	 * Validate host state saved from before VMRUN (see
 	 * nested_svm_check_permissions).
-	 * TODO: validate reserved bits for all saved state.
 	 */
-	if (!(save.cr0 & X86_CR0_PG))
+	if (!nested_vmcb_check_cr3_cr4(svm, &save))
 		return -EINVAL;
 
 	/*
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode
  2020-08-29  0:48 [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Krish Sadhukhan
  2020-08-29  0:48 ` [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52 Krish Sadhukhan
  2020-08-29  0:48 ` [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state() Krish Sadhukhan
@ 2020-08-29  0:48 ` Krish Sadhukhan
  2020-09-23 15:54 ` [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Paolo Bonzini
  3 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2020-08-29  0:48 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson

According to section "CR3" in APM vol. 2, the non-MBZ reserved bits in CR3
need to be set by software as follows:

	"Reserved Bits. Reserved fields should be cleared to 0 by software
	when writing CR3."

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 x86/svm.h       |  3 ++-
 x86/svm_tests.c | 54 ++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 49 insertions(+), 8 deletions(-)

diff --git a/x86/svm.h b/x86/svm.h
index 15e0f18..465d794 100644
--- a/x86/svm.h
+++ b/x86/svm.h
@@ -325,7 +325,8 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
 
 #define	SVM_CR0_RESERVED_MASK			0xffffffff00000000U
-#define	SVM_CR3_LONG_RESERVED_MASK		0xfff0000000000000U
+#define	SVM_CR3_LONG_MBZ_MASK			0xfff0000000000000U
+#define	SVM_CR3_LONG_RESERVED_MASK		0x0000000000000fe7U
 #define	SVM_CR4_LEGACY_RESERVED_MASK		0xff88f000U
 #define	SVM_CR4_RESERVED_MASK			0xffffffffff88f000U
 #define	SVM_DR6_RESERVED_MASK			0xffffffffffff1ff0U
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 1908c7c..af8684b 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -1891,11 +1891,11 @@ static bool reg_corruption_check(struct svm_test *test)
  * v2 tests
  */
 
+int KRISH_step = 0;
 static void basic_guest_main(struct svm_test *test)
 {
 }
 
-
 #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val,	\
 				   resv_mask)				\
 {									\
@@ -1913,7 +1913,8 @@ static void basic_guest_main(struct svm_test *test)
         }								\
 }
 
-#define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask)	\
+#define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask,	\
+				  exit_code)				\
 {									\
 	u64 tmp, mask;							\
 	int i;								\
@@ -1933,7 +1934,7 @@ static void basic_guest_main(struct svm_test *test)
 		case 4:							\
 			vmcb->save.cr4 = tmp;				\
 		}							\
-		report(svm_vmrun() == SVM_EXIT_ERR, "Test CR%d %d:%d: %lx",\
+		report(svm_vmrun() == exit_code, "Test CR%d %d:%d: %lx",\
 		    cr, end, start, tmp);				\
 	}								\
 }
@@ -2012,9 +2013,48 @@ static void test_cr3(void)
 	u64 cr3_saved = vmcb->save.cr3;
 
 	SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved,
-	    SVM_CR3_LONG_RESERVED_MASK);
+	    SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR);
+
+	vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK;
+	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
+	    vmcb->save.cr3);
+
+	/*
+	 * CR3 non-MBZ reserved bits based on different modes:
+	 *   [11:5] [2:0] - long mode
+	 */
+	u64 cr4_saved = vmcb->save.cr4;
+
+	/*
+	 * Long mode
+	 */
+	if (this_cpu_has(X86_FEATURE_PCID)) {
+		vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE;
+		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
+		    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL);
+
+		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
+		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
+		    vmcb->save.cr3);
+	} else {
+		u64 *pdpe = npt_get_pml4e();
+
+		vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE;
+
+		/* Clear P (Present) bit in NPT in order to trigger #NPF */
+		pdpe[0] &= ~1ULL;
+
+		SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved,
+		    SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF);
+
+		pdpe[0] |= 1ULL;
+		vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK;
+		report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx",
+		    vmcb->save.cr3);
+	}
 
 	vmcb->save.cr3 = cr3_saved;
+	vmcb->save.cr4 = cr4_saved;
 }
 
 static void test_cr4(void)
@@ -2031,14 +2071,14 @@ static void test_cr4(void)
 	efer &= ~EFER_LME;
 	vmcb->save.efer = efer;
 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
-	    SVM_CR4_LEGACY_RESERVED_MASK);
+	    SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR);
 
 	efer |= EFER_LME;
 	vmcb->save.efer = efer;
 	SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved,
-	    SVM_CR4_RESERVED_MASK);
+	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR);
 	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
-	    SVM_CR4_RESERVED_MASK);
+	    SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR);
 
 	vmcb->save.cr4 = cr4_saved;
 	vmcb->save.efer = efer_saved;
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state()
  2020-08-29  0:48 ` [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state() Krish Sadhukhan
@ 2020-09-23 15:53   ` Paolo Bonzini
  2020-09-28  7:23     ` Krish Sadhukhan
  0 siblings, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2020-09-23 15:53 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm; +Cc: jmattson

On 29/08/20 02:48, Krish Sadhukhan wrote:
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> ---
>  arch/x86/kvm/svm/nested.c | 51 ++++++++++++++++++++++-----------------
>  1 file changed, 29 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index fb68467e6049..7a51ce465f3e 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -215,9 +215,35 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
>  	return true;
>  }
>  
> +static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
> +				      struct vmcb_save_area *save)
> +{
> +	bool nested_vmcb_lma =
> +	        (save->efer & EFER_LME) &&
> +		(save->cr0 & X86_CR0_PG);
> +
> +	if (!nested_vmcb_lma) {
> +		if (save->cr4 & X86_CR4_PAE) {
> +			if (save->cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
> +				return false;
> +		} else {
> +			if (save->cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
> +				return false;
> +		}
> +	} else {
> +		if (!(save->cr4 & X86_CR4_PAE) ||
> +		    !(save->cr0 & X86_CR0_PE) ||
> +		    (save->cr3 & MSR_CR3_LONG_MBZ_MASK))
> +			return false;
> +	}
> +	if (kvm_valid_cr4(&svm->vcpu, save->cr4))
> +		return false;
> +
> +	return true;
> +}
> +
>  static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
>  {
> -	bool nested_vmcb_lma;
>  	if ((vmcb->save.efer & EFER_SVME) == 0)
>  		return false;
>  
> @@ -228,25 +254,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
>  	if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
>  		return false;
>  
> -	nested_vmcb_lma =
> -	        (vmcb->save.efer & EFER_LME) &&
> -		(vmcb->save.cr0 & X86_CR0_PG);
> -
> -	if (!nested_vmcb_lma) {
> -		if (vmcb->save.cr4 & X86_CR4_PAE) {
> -			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
> -				return false;
> -		} else {
> -			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
> -				return false;
> -		}
> -	} else {
> -		if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
> -		    !(vmcb->save.cr0 & X86_CR0_PE) ||
> -		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
> -			return false;
> -	}
> -	if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
> +	if (!nested_vmcb_check_cr3_cr4(svm, &(vmcb->save)))
>  		return false;
>  
>  	return nested_vmcb_check_controls(&vmcb->control);
> @@ -1114,9 +1122,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  	/*
>  	 * Validate host state saved from before VMRUN (see
>  	 * nested_svm_check_permissions).
> -	 * TODO: validate reserved bits for all saved state.
>  	 */
> -	if (!(save.cr0 & X86_CR0_PG))
> +	if (!nested_vmcb_check_cr3_cr4(svm, &save))
>  		return -EINVAL;

Removing the "if" is incorrect.  Also are there really no more reserved
bits to check?

Paolo


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits
  2020-08-29  0:48 [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Krish Sadhukhan
                   ` (2 preceding siblings ...)
  2020-08-29  0:48 ` [PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode Krish Sadhukhan
@ 2020-09-23 15:54 ` Paolo Bonzini
  3 siblings, 0 replies; 7+ messages in thread
From: Paolo Bonzini @ 2020-09-23 15:54 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm; +Cc: jmattson

On 29/08/20 02:48, Krish Sadhukhan wrote:
> Patch# 1: Fixes the MBZ mask for CR3
> Patch# 2: Adds checks for the reserved bits for CR3 and CR4 to
> 	  svm_set_nested_state() since these bits need to be checked before
> 	  VMRUN of the nested guest on the destination.
> Patch# 3: Adds a test for the non-MBZ reserved bits in CR3 in long mode.
> 
> 
> [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52
> [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to
> [PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode
> 
>  arch/x86/kvm/svm/nested.c | 51 +++++++++++++++++++++++++++--------------------
>  arch/x86/kvm/svm/svm.h    |  2 +-
>  2 files changed, 30 insertions(+), 23 deletions(-)
> 
> Krish Sadhukhan (2):
>       KVM: nSVM: CR3 MBZ bits are only 63:52
>       KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state()
> 
>  x86/svm.h       |  3 ++-
>  x86/svm_tests.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++-------
>  2 files changed, 49 insertions(+), 8 deletions(-)
> 
> Krish Sadhukhan (1):
>       KVM: nSVM: Test non-MBZ reserved bits in CR3 in long mode
> 

The patches does not apply anymore, please resend on top of kvm/queue.

Paolo


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state()
  2020-09-23 15:53   ` Paolo Bonzini
@ 2020-09-28  7:23     ` Krish Sadhukhan
  0 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2020-09-28  7:23 UTC (permalink / raw)
  To: Paolo Bonzini, kvm; +Cc: jmattson


On 9/23/20 8:53 AM, Paolo Bonzini wrote:
> On 29/08/20 02:48, Krish Sadhukhan wrote:
>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
>> ---
>>   arch/x86/kvm/svm/nested.c | 51 ++++++++++++++++++++++-----------------
>>   1 file changed, 29 insertions(+), 22 deletions(-)
>>
>> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
>> index fb68467e6049..7a51ce465f3e 100644
>> --- a/arch/x86/kvm/svm/nested.c
>> +++ b/arch/x86/kvm/svm/nested.c
>> @@ -215,9 +215,35 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
>>   	return true;
>>   }
>>   
>> +static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
>> +				      struct vmcb_save_area *save)
>> +{
>> +	bool nested_vmcb_lma =
>> +	        (save->efer & EFER_LME) &&
>> +		(save->cr0 & X86_CR0_PG);
>> +
>> +	if (!nested_vmcb_lma) {
>> +		if (save->cr4 & X86_CR4_PAE) {
>> +			if (save->cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
>> +				return false;
>> +		} else {
>> +			if (save->cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
>> +				return false;
>> +		}
>> +	} else {
>> +		if (!(save->cr4 & X86_CR4_PAE) ||
>> +		    !(save->cr0 & X86_CR0_PE) ||
>> +		    (save->cr3 & MSR_CR3_LONG_MBZ_MASK))
>> +			return false;
>> +	}
>> +	if (kvm_valid_cr4(&svm->vcpu, save->cr4))
>> +		return false;
>> +
>> +	return true;
>> +}
>> +
>>   static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
>>   {
>> -	bool nested_vmcb_lma;
>>   	if ((vmcb->save.efer & EFER_SVME) == 0)
>>   		return false;
>>   
>> @@ -228,25 +254,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
>>   	if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
>>   		return false;
>>   
>> -	nested_vmcb_lma =
>> -	        (vmcb->save.efer & EFER_LME) &&
>> -		(vmcb->save.cr0 & X86_CR0_PG);
>> -
>> -	if (!nested_vmcb_lma) {
>> -		if (vmcb->save.cr4 & X86_CR4_PAE) {
>> -			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
>> -				return false;
>> -		} else {
>> -			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
>> -				return false;
>> -		}
>> -	} else {
>> -		if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
>> -		    !(vmcb->save.cr0 & X86_CR0_PE) ||
>> -		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
>> -			return false;
>> -	}
>> -	if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
>> +	if (!nested_vmcb_check_cr3_cr4(svm, &(vmcb->save)))
>>   		return false;
>>   
>>   	return nested_vmcb_check_controls(&vmcb->control);
>> @@ -1114,9 +1122,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>>   	/*
>>   	 * Validate host state saved from before VMRUN (see
>>   	 * nested_svm_check_permissions).
>> -	 * TODO: validate reserved bits for all saved state.
>>   	 */
>> -	if (!(save.cr0 & X86_CR0_PG))
>> +	if (!nested_vmcb_check_cr3_cr4(svm, &save))
>>   		return -EINVAL;
> Removing the "if" is incorrect.
Sorry, my mistake.
>   Also are there really no more reserved
> bits to check?
I have sent v2 which has added checks for DR6, DR7 and EFER.
>
> Paolo
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-09-28  7:26 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-29  0:48 [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Krish Sadhukhan
2020-08-29  0:48 ` [PATCH 1/3] KVM: nSVM: CR3 MBZ bits are only 63:52 Krish Sadhukhan
2020-08-29  0:48 ` [PATCH 2/3] KVM: nSVM: Add check for CR3 and CR4 reserved bits to svm_set_nested_state() Krish Sadhukhan
2020-09-23 15:53   ` Paolo Bonzini
2020-09-28  7:23     ` Krish Sadhukhan
2020-08-29  0:48 ` [PATCH 3/3] nSVM: Test non-MBZ reserved bits in CR3 in long mode Krish Sadhukhan
2020-09-23 15:54 ` [PATCH 0/3] KVM: nSVM: Add checks for CR3 and CR4 reserved bits to svm_set_nested_state() and test CR3 non-MBZ reserved bits Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).