All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3 v2] KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests
@ 2020-05-09  0:36 Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid() Krish Sadhukhan
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Krish Sadhukhan @ 2020-05-09  0:36 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini

v1 -> v2:
	1. Removed the formation of the mask for guest CR4 bits, from
	   kvm_valid_cr4() to kvm_update_cpuid(). The mask is stashed
	   in a global variable called '__guest_cr4_reserved_bits'.
	   Patch# 1 contains these changes.
	2. nested_vmcb_checks() now uses is_long_mode(), instead of the
	   guest EFER, to check for Long Mode. Patch# 2 contains these
	   changes.
	3. Patch# 3 contains the kvm-unit-tests. No changes have been
	   made to the tests.


[PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in
[PATCH 2/3 v2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on
[PATCH 3/3 v2] KVM: nSVM: Test that MBZ bits in CR3 and CR4 are not set on vmrun

 arch/x86/kvm/cpuid.c      |  3 +++
 arch/x86/kvm/svm/nested.c | 22 ++++++++++++++++++++--
 arch/x86/kvm/svm/svm.h    |  5 ++++-
 arch/x86/kvm/x86.c        | 27 ++++-----------------------
 arch/x86/kvm/x86.h        | 21 +++++++++++++++++++++
 5 files changed, 52 insertions(+), 26 deletions(-)

Krish Sadhukhan (2):
      KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid()
      nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested gu

 x86/svm.h       |   6 ++++
 x86/svm_tests.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 99 insertions(+), 12 deletions(-)

Krish Sadhukhan (1):
      nSVM: Test that MBZ bits in CR3 and CR4 are not set on vmrun of nested gue


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid()
  2020-05-09  0:36 [PATCH 0/3 v2] KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests Krish Sadhukhan
@ 2020-05-09  0:36 ` Krish Sadhukhan
  2020-05-09 12:56   ` Paolo Bonzini
  2020-05-09  0:36 ` [PATCH 2/3 v2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 3/3 v2] KVM: nSVM: Test " Krish Sadhukhan
  2 siblings, 1 reply; 5+ messages in thread
From: Krish Sadhukhan @ 2020-05-09  0:36 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini

Instead of creating the mask for guest CR4 reserved bits in kvm_valid_cr4(),
do it in kvm_update_cpuid() so that it can be reused instead of creating it
each time kvm_valid_cr4() is called.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/cpuid.c |  3 +++
 arch/x86/kvm/x86.c   | 24 ++----------------------
 arch/x86/kvm/x86.h   | 21 +++++++++++++++++++++
 3 files changed, 26 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 901cd1f..c4e4e87 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -54,6 +54,8 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
 
 #define F feature_bit
 
+extern u64 __guest_cr4_reserved_bits;
+
 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
@@ -129,6 +131,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 	kvm_mmu_reset_context(vcpu);
 
 	kvm_pmu_refresh(vcpu);
+	__guest_cr4_reserved_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu);
 	return 0;
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c5835f9..6030d65 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -96,6 +96,7 @@
 #endif
 
 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+u64 __guest_cr4_reserved_bits;
 
 #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
 #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
@@ -905,27 +906,6 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-#define __cr4_reserved_bits(__cpu_has, __c)		\
-({							\
-	u64 __reserved_bits = CR4_RESERVED_BITS;	\
-							\
-	if (!__cpu_has(__c, X86_FEATURE_XSAVE))		\
-		__reserved_bits |= X86_CR4_OSXSAVE;	\
-	if (!__cpu_has(__c, X86_FEATURE_SMEP))		\
-		__reserved_bits |= X86_CR4_SMEP;	\
-	if (!__cpu_has(__c, X86_FEATURE_SMAP))		\
-		__reserved_bits |= X86_CR4_SMAP;	\
-	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))	\
-		__reserved_bits |= X86_CR4_FSGSBASE;	\
-	if (!__cpu_has(__c, X86_FEATURE_PKU))		\
-		__reserved_bits |= X86_CR4_PKE;		\
-	if (!__cpu_has(__c, X86_FEATURE_LA57))		\
-		__reserved_bits |= X86_CR4_LA57;	\
-	if (!__cpu_has(__c, X86_FEATURE_UMIP))		\
-		__reserved_bits |= X86_CR4_UMIP;	\
-	__reserved_bits;				\
-})
-
 static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
 {
 	u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
@@ -944,7 +924,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	if (cr4 & cr4_reserved_bits)
 		return -EINVAL;
 
-	if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu))
+	if (cr4 & __guest_cr4_reserved_bits)
 		return -EINVAL;
 
 	return 0;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b968acc..3a7310f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -359,4 +359,25 @@ static inline bool kvm_dr7_valid(u64 data)
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
 
+#define __cr4_reserved_bits(__cpu_has, __c)             \
+({                                                      \
+	u64 __reserved_bits = CR4_RESERVED_BITS;        \
+                                                        \
+	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
+		__reserved_bits |= X86_CR4_OSXSAVE;     \
+	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
+		__reserved_bits |= X86_CR4_SMEP;        \
+	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
+		__reserved_bits |= X86_CR4_SMAP;        \
+	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
+		__reserved_bits |= X86_CR4_FSGSBASE;    \
+	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
+		__reserved_bits |= X86_CR4_PKE;         \
+	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
+		__reserved_bits |= X86_CR4_LA57;        \
+	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
+		__reserved_bits |= X86_CR4_UMIP;        \
+	__reserved_bits;                                \
+})
+
 #endif
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3 v2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests
  2020-05-09  0:36 [PATCH 0/3 v2] KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid() Krish Sadhukhan
@ 2020-05-09  0:36 ` Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 3/3 v2] KVM: nSVM: Test " Krish Sadhukhan
  2 siblings, 0 replies; 5+ messages in thread
From: Krish Sadhukhan @ 2020-05-09  0:36 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini

According to section "Canonicalization and Consistency Checks" in APM vol. 2
the following guest state is illegal:

    "Any MBZ bit of CR3 is set."
    "Any MBZ bit of CR4 is set."

Suggeted-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/nested.c | 22 ++++++++++++++++++++--
 arch/x86/kvm/svm/svm.h    |  5 ++++-
 arch/x86/kvm/x86.c        |  3 ++-
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 90a1ca9..9824de9 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -202,11 +202,29 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 	return true;
 }
 
-static bool nested_vmcb_checks(struct vmcb *vmcb)
+extern int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+
+static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
 {
 	if ((vmcb->save.efer & EFER_SVME) == 0)
 		return false;
 
+	if (!is_long_mode(&(svm->vcpu))) {
+		if (vmcb->save.cr4 & X86_CR4_PAE) {
+			if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+				return false;
+		} else {
+			if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+				return false;
+		}
+	} else {
+		if ((vmcb->save.cr4 & X86_CR4_PAE) &&
+		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
+			return false;
+	}
+	if (kvm_valid_cr4(&(svm->vcpu), vmcb->save.cr4))
+		return false;
+
 	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
 		return false;
 
@@ -355,7 +373,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 
 	nested_vmcb = map.hva;
 
-	if (!nested_vmcb_checks(nested_vmcb)) {
+	if (!nested_vmcb_checks(svm, nested_vmcb)) {
 		nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
 		nested_vmcb->control.exit_code_hi = 0;
 		nested_vmcb->control.exit_info_1  = 0;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index df3474f..83b480f 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -354,7 +354,10 @@ static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_INVALID			0xffffffffU
+#define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
+#define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
+#define MSR_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
+#define MSR_INVALID				0xffffffffU
 
 u32 svm_msrpm_offset(u32 msr);
 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6030d65..236301b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -919,7 +919,7 @@ static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
 	return reserved_bits;
 }
 
-static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	if (cr4 & cr4_reserved_bits)
 		return -EINVAL;
@@ -929,6 +929,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(kvm_valid_cr4);
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3 v2] KVM: nSVM: Test that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests
  2020-05-09  0:36 [PATCH 0/3 v2] KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid() Krish Sadhukhan
  2020-05-09  0:36 ` [PATCH 2/3 v2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests Krish Sadhukhan
@ 2020-05-09  0:36 ` Krish Sadhukhan
  2 siblings, 0 replies; 5+ messages in thread
From: Krish Sadhukhan @ 2020-05-09  0:36 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini

According to section "Canonicalization and Consistency Checks" in APM vol. 2,
the following guest state is illegal:

	"Any MBZ bit of CR3 is set."
	"Any MBZ bit of CR4 is set."

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 x86/svm.h       |   6 ++++
 x86/svm_tests.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 99 insertions(+), 12 deletions(-)

diff --git a/x86/svm.h b/x86/svm.h
index 645deb7..3d5b79c 100644
--- a/x86/svm.h
+++ b/x86/svm.h
@@ -323,6 +323,12 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXIT_ERR		-1
 
 #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
+#define	SVM_CR0_RESERVED_MASK			0xffffffff00000000U
+#define	SVM_CR3_LEGACY_RESERVED_MASK		0xfe7U
+#define	SVM_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
+#define	SVM_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
+#define	SVM_CR4_LEGACY_RESERVED_MASK		0xffbaf000U
+#define	SVM_CR4_RESERVED_MASK			0xffffffffffbaf000U
 
 #define MSR_BITMAP_SIZE 8192
 
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 5a571eb..5d57ad0 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -1596,6 +1596,31 @@ static void basic_guest_main(struct svm_test *test)
 {
 }
 
+#define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask)	\
+{									\
+        u64 tmp, mask;							\
+        int i;								\
+									\
+        for (i = start; i <= end; i = i + inc) {			\
+                mask = 1ull << i;					\
+                if (!(mask & resv_mask))				\
+                        continue;					\
+                tmp = val | mask;					\
+                switch (cr) {						\
+                        case 0:						\
+                                vmcb->save.cr0 = tmp;			\
+                                break;					\
+                        case 3:						\
+                                vmcb->save.cr3 = tmp;			\
+                                break;					\
+                        case 4:						\
+                                vmcb->save.cr4 = tmp;			\
+                }							\
+                report(svm_vmrun() == SVM_EXIT_ERR, "Test CR%d %d:%d: %lx",\
+		    cr, end, start, tmp);				\
+        }								\
+}
+
 static void svm_guest_state_test(void)
 {
 	test_set_guest(basic_guest_main);
@@ -1621,32 +1646,88 @@ static void svm_guest_state_test(void)
 	cr0 |= X86_CR0_CD;
 	cr0 &= ~X86_CR0_NW;
 	vmcb->save.cr0 = cr0;
-	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
+	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx",
+	    cr0);
 	cr0 |= X86_CR0_NW;
 	vmcb->save.cr0 = cr0;
-	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
+	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx",
+	    cr0);
 	cr0 &= ~X86_CR0_NW;
 	cr0 &= ~X86_CR0_CD;
 	vmcb->save.cr0 = cr0;
-	report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0);
+	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx",
+	    cr0);
 	cr0 |= X86_CR0_NW;
 	vmcb->save.cr0 = cr0;
-	report (svm_vmrun() == SVM_EXIT_ERR, "CR0: %lx", cr0);
+	report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=1: %lx",
+	    cr0);
 	vmcb->save.cr0 = cr0_saved;
 
 	/*
 	 * CR0[63:32] are not zero
 	 */
-	int i;
-
 	cr0 = cr0_saved;
-	for (i = 32; i < 63; i = i + 4) {
-		cr0 = cr0_saved | (1ull << i);
-		vmcb->save.cr0 = cr0;
-		report (svm_vmrun() == SVM_EXIT_ERR, "CR0[63:32]: %lx",
-		    cr0 >> 32);
-	}
+
+	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 0, cr0_saved,
+	    SVM_CR0_RESERVED_MASK);
 	vmcb->save.cr0 = cr0_saved;
+
+	/*
+	 * CR3 MBZ bits based on different modes:
+	 *   [2:0]		    - legacy PAE
+	 *   [2:0], [11:5]	    - legacy non-PAE
+	 *   [2:0], [11:5], [63:52] - long mode
+	 */
+	u64 cr3_saved = vmcb->save.cr3;
+	u64 cr4_saved = vmcb->save.cr4;
+	u64 cr4 = cr4_saved;
+	efer_saved = vmcb->save.efer;
+	efer = efer_saved;
+
+	efer &= ~EFER_LMA;
+	vmcb->save.efer = efer;
+	cr4 |= X86_CR4_PAE;
+	vmcb->save.cr4 = cr4;
+	SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved,
+	    SVM_CR3_LEGACY_PAE_RESERVED_MASK);
+
+	cr4 = cr4_saved & ~X86_CR4_PAE;
+	vmcb->save.cr4 = cr4;
+	SVM_TEST_CR_RESERVED_BITS(0, 11, 2, 3, cr3_saved,
+	    SVM_CR3_LEGACY_RESERVED_MASK);
+
+	cr4 |= X86_CR4_PAE;
+	vmcb->save.cr4 = cr4;
+	efer |= EFER_LMA;
+	vmcb->save.efer = efer;
+	SVM_TEST_CR_RESERVED_BITS(0, 63, 2, 3, cr3_saved,
+	    SVM_CR3_LONG_RESERVED_MASK);
+
+	vmcb->save.cr4 = cr4_saved;
+	vmcb->save.cr3 = cr3_saved;
+	vmcb->save.efer = efer_saved;
+
+	/*
+	 * CR4 MBZ bits based on different modes:
+	 *   [15:12], 17, 19, [31:22] - legacy mode
+	 *   [15:12], 17, 19, [63:22] - long mode
+	 */
+	cr4_saved = vmcb->save.cr4;
+	efer_saved = vmcb->save.efer;
+	efer &= ~EFER_LMA;
+	vmcb->save.efer = efer;
+	SVM_TEST_CR_RESERVED_BITS(12, 31, 2, 4, cr4_saved,
+	    SVM_CR4_LEGACY_RESERVED_MASK);
+
+	efer |= EFER_LMA;
+	vmcb->save.efer = efer;
+	SVM_TEST_CR_RESERVED_BITS(12, 31, 2, 4, cr4_saved,
+	    SVM_CR4_RESERVED_MASK);
+	SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved,
+	    SVM_CR4_RESERVED_MASK);
+
+	vmcb->save.cr4 = cr4_saved;
+	vmcb->save.efer = efer_saved;
 }
 
 struct svm_test svm_tests[] = {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid()
  2020-05-09  0:36 ` [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid() Krish Sadhukhan
@ 2020-05-09 12:56   ` Paolo Bonzini
  0 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2020-05-09 12:56 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm

On 09/05/20 02:36, Krish Sadhukhan wrote:
> +extern u64 __guest_cr4_reserved_bits;
> +
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu)

This cannot be a global, it is different for each guest since it depends
on their CPUID.  Please put it in vcpu->arch.

Paolo


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-05-09 12:56 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-09  0:36 [PATCH 0/3 v2] KVM: nSVM: Check MBZ bits in CR3 and CR4 on vmrun of nested guests Krish Sadhukhan
2020-05-09  0:36 ` [PATCH 1/3 v2] KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid() Krish Sadhukhan
2020-05-09 12:56   ` Paolo Bonzini
2020-05-09  0:36 ` [PATCH 2/3 v2] KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests Krish Sadhukhan
2020-05-09  0:36 ` [PATCH 3/3 v2] KVM: nSVM: Test " Krish Sadhukhan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.