All of lore.kernel.org
 help / color / mirror / Atom feed
* [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test
@ 2022-06-08 23:52 Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 01/10] x86: Use BIT() to define architectural bits Sean Christopherson
                   ` (9 more replies)
  0 siblings, 10 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Add a testcase to verify VMXON #UDs when attempted with an incompatible
CR0 or CR4.

Patches 1-10 are cleanups to make implementing that relatively simple test
less painful.

Sean Christopherson (10):
  x86: Use BIT() to define architectural bits
  x86: Replace spaces with tables in processor.h
  x86: Use "safe" terminology instead of "checking"
  x86: Use "safe" helpers to implement unsafe CRs accessors
  x86: Provide result of RDMSR from "safe" variant
  nVMX: Check the results of VMXON/VMXOFF in feature control test
  nVMX: Check result of VMXON in INIT/SIPI tests
  nVMX: Wrap VMXON in ASM_TRY(), a.k.a. in exception fixup
  nVMX: Simplify test_vmxon() by returning directly on failure
  nVMX: Add subtest to verify VMXON succeeds/#UDs on good/bad CR0/CR4

 lib/x86/desc.c      |   8 -
 lib/x86/desc.h      |   1 -
 lib/x86/processor.h | 403 +++++++++++++++++++++++++-------------------
 x86/access.c        |   8 +-
 x86/la57.c          |   2 +-
 x86/msr.c           |   5 +-
 x86/pcid.c          |  28 ++-
 x86/rdpru.c         |   4 +-
 x86/vmx.c           | 141 +++++++++++++---
 x86/vmx.h           |  31 +++-
 x86/vmx_tests.c     |  12 +-
 x86/xsave.c         |  31 ++--
 12 files changed, 411 insertions(+), 263 deletions(-)


base-commit: 2eed0bf1096077144cc3a0dd9974689487f9511a
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 01/10] x86: Use BIT() to define architectural bits
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 02/10] x86: Replace spaces with tables in processor.h Sean Christopherson
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Use BIT() to define bits in EFLAGS, CR0, and CR4.  Intel's SDM and AMD's
APM reference flags/features by the bit number, not by their mask, making
it absurdly difficult to audit and/or add definitions.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 lib/x86/processor.h | 103 ++++++++++++++++++++++++++------------------
 x86/xsave.c         |   1 -
 2 files changed, 61 insertions(+), 43 deletions(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 9a0dad67..1dfd5285 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -4,6 +4,7 @@
 #include "libcflat.h"
 #include "desc.h"
 #include "msr.h"
+#include <bitops.h>
 #include <stdint.h>
 
 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
@@ -30,49 +31,67 @@
 #define AC_VECTOR 17
 #define CP_VECTOR 21
 
-#define X86_CR0_PE	0x00000001
-#define X86_CR0_MP	0x00000002
-#define X86_CR0_EM	0x00000004
-#define X86_CR0_TS	0x00000008
-#define X86_CR0_WP	0x00010000
-#define X86_CR0_AM	0x00040000
-#define X86_CR0_NW	0x20000000
-#define X86_CR0_CD	0x40000000
-#define X86_CR0_PG	0x80000000
-#define X86_CR3_PCID_MASK 0x00000fff
-#define X86_CR4_TSD	0x00000004
-#define X86_CR4_DE	0x00000008
-#define X86_CR4_PSE	0x00000010
-#define X86_CR4_PAE	0x00000020
-#define X86_CR4_MCE	0x00000040
-#define X86_CR4_PGE	0x00000080
-#define X86_CR4_PCE	0x00000100
-#define X86_CR4_UMIP	0x00000800
-#define X86_CR4_LA57	0x00001000
-#define X86_CR4_VMXE	0x00002000
-#define X86_CR4_PCIDE	0x00020000
-#define X86_CR4_OSXSAVE	0x00040000
-#define X86_CR4_SMEP	0x00100000
-#define X86_CR4_SMAP	0x00200000
-#define X86_CR4_PKE	0x00400000
-#define X86_CR4_CET	0x00800000
-#define X86_CR4_PKS	0x01000000
+#define X86_CR0_PE	BIT(0)
+#define X86_CR0_MP	BIT(1)
+#define X86_CR0_EM	BIT(2)
+#define X86_CR0_TS	BIT(3)
+#define X86_CR0_ET	BIT(4)
+#define X86_CR0_NE	BIT(5)
+#define X86_CR0_WP	BIT(16)
+#define X86_CR0_AM	BIT(18)
+#define X86_CR0_NW	BIT(29)
+#define X86_CR0_CD	BIT(30)
+#define X86_CR0_PG	BIT(31)
 
-#define X86_EFLAGS_CF    0x00000001
-#define X86_EFLAGS_FIXED 0x00000002
-#define X86_EFLAGS_PF    0x00000004
-#define X86_EFLAGS_AF    0x00000010
-#define X86_EFLAGS_ZF    0x00000040
-#define X86_EFLAGS_SF    0x00000080
-#define X86_EFLAGS_TF    0x00000100
-#define X86_EFLAGS_IF    0x00000200
-#define X86_EFLAGS_DF    0x00000400
-#define X86_EFLAGS_OF    0x00000800
-#define X86_EFLAGS_IOPL  0x00003000
-#define X86_EFLAGS_NT    0x00004000
-#define X86_EFLAGS_RF    0x00010000
-#define X86_EFLAGS_VM    0x00020000
-#define X86_EFLAGS_AC    0x00040000
+#define X86_CR3_PCID_MASK	GENMASK(11, 0)
+
+#define X86_CR4_VME		BIT(0)
+#define X86_CR4_PVI		BIT(1)
+#define X86_CR4_TSD		BIT(2)
+#define X86_CR4_DE		BIT(3)
+#define X86_CR4_PSE		BIT(4)
+#define X86_CR4_PAE		BIT(5)
+#define X86_CR4_MCE		BIT(6)
+#define X86_CR4_PGE		BIT(7)
+#define X86_CR4_PCE		BIT(8)
+#define X86_CR4_OSFXSR		BIT(9)
+#define X86_CR4_OSXMMEXCPT	BIT(10)
+#define X86_CR4_UMIP		BIT(11)
+#define X86_CR4_LA57		BIT(12)
+#define X86_CR4_VMXE		BIT(13)
+#define X86_CR4_SMXE		BIT(14)
+/* UNUSED			BIT(15) */
+#define X86_CR4_FSGSBASE	BIT(16)
+#define X86_CR4_PCIDE		BIT(17)
+#define X86_CR4_OSXSAVE		BIT(18)
+#define X86_CR4_KL		BIT(19)
+#define X86_CR4_SMEP		BIT(20)
+#define X86_CR4_SMAP		BIT(21)
+#define X86_CR4_PKE		BIT(22)
+#define X86_CR4_CET		BIT(23)
+#define X86_CR4_PKS		BIT(24)
+
+#define X86_EFLAGS_CF		BIT(0)
+#define X86_EFLAGS_FIXED	BIT(1)
+#define X86_EFLAGS_PF		BIT(2)
+/* RESERVED 0			BIT(3) */
+#define X86_EFLAGS_AF		BIT(4)
+/* RESERVED 0			BIT(5) */
+#define X86_EFLAGS_ZF		BIT(6)
+#define X86_EFLAGS_SF		BIT(7)
+#define X86_EFLAGS_TF		BIT(8)
+#define X86_EFLAGS_IF		BIT(9)
+#define X86_EFLAGS_DF		BIT(10)
+#define X86_EFLAGS_OF		BIT(11)
+#define X86_EFLAGS_IOPL		GENMASK(13, 12)
+#define X86_EFLAGS_NT		BIT(14)
+/* RESERVED 0			BIT(15) */
+#define X86_EFLAGS_RF		BIT(16)
+#define X86_EFLAGS_VM		BIT(17)
+#define X86_EFLAGS_AC		BIT(18)
+#define X86_EFLAGS_VIF		BIT(19)
+#define X86_EFLAGS_VIP		BIT(20)
+#define X86_EFLAGS_ID		BIT(21)
 
 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
diff --git a/x86/xsave.c b/x86/xsave.c
index 892bf561..84170033 100644
--- a/x86/xsave.c
+++ b/x86/xsave.c
@@ -42,7 +42,6 @@ static uint64_t get_supported_xcr0(void)
     return r.a + ((u64)r.d << 32);
 }
 
-#define X86_CR4_OSXSAVE			0x00040000
 #define XCR_XFEATURE_ENABLED_MASK       0x00000000
 #define XCR_XFEATURE_ILLEGAL_MASK       0x00000010
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 02/10] x86: Replace spaces with tables in processor.h
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 01/10] x86: Use BIT() to define architectural bits Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking" Sean Christopherson
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Fix the myriad instances of using spaces instead of tabs in processor.h.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 lib/x86/processor.h | 250 ++++++++++++++++++++++----------------------
 1 file changed, 125 insertions(+), 125 deletions(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 1dfd5285..13d9d9bb 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -7,7 +7,7 @@
 #include <bitops.h>
 #include <stdint.h>
 
-#define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
+#define NONCANONICAL	0xaaaaaaaaaaaaaaaaull
 
 #ifdef __x86_64__
 #  define R "r"
@@ -112,31 +112,31 @@ struct cpuid { u32 a, b, c, d; };
 
 static inline struct cpuid raw_cpuid(u32 function, u32 index)
 {
-    struct cpuid r;
-    asm volatile ("cpuid"
-                  : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
-                  : "0"(function), "2"(index));
-    return r;
+	struct cpuid r;
+	asm volatile ("cpuid"
+		      : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
+		      : "0"(function), "2"(index));
+	return r;
 }
 
 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
 {
-    u32 level = raw_cpuid(function & 0xf0000000, 0).a;
-    if (level < function)
-        return (struct cpuid) { 0, 0, 0, 0 };
-    return raw_cpuid(function, index);
+	u32 level = raw_cpuid(function & 0xf0000000, 0).a;
+	if (level < function)
+	return (struct cpuid) { 0, 0, 0, 0 };
+	return raw_cpuid(function, index);
 }
 
 static inline struct cpuid cpuid(u32 function)
 {
-    return cpuid_indexed(function, 0);
+	return cpuid_indexed(function, 0);
 }
 
 static inline u8 cpuid_maxphyaddr(void)
 {
-    if (raw_cpuid(0x80000000, 0).a < 0x80000008)
-        return 36;
-    return raw_cpuid(0x80000008, 0).a & 0xff;
+	if (raw_cpuid(0x80000000, 0).a < 0x80000008)
+	return 36;
+	return raw_cpuid(0x80000008, 0).a & 0xff;
 }
 
 static inline bool is_intel(void)
@@ -178,7 +178,7 @@ static inline bool is_intel(void)
 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
-#define	X86_FEATURE_SMEP	        (CPUID(0x7, 0, EBX, 7))
+#define	X86_FEATURE_SMEP		(CPUID(0x7, 0, EBX, 7))
 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
@@ -208,9 +208,9 @@ static inline bool is_intel(void)
 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
 #define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
-#define X86_FEATURE_TSCRATEMSR  (CPUID(0x8000000A, 0, EDX, 4))
-#define X86_FEATURE_PAUSEFILTER     (CPUID(0x8000000A, 0, EDX, 10))
-#define X86_FEATURE_PFTHRESHOLD     (CPUID(0x8000000A, 0, EDX, 12))
+#define X86_FEATURE_TSCRATEMSR		(CPUID(0x8000000A, 0, EDX, 4))
+#define X86_FEATURE_PAUSEFILTER		(CPUID(0x8000000A, 0, EDX, 10))
+#define X86_FEATURE_PFTHRESHOLD		(CPUID(0x8000000A, 0, EDX, 12))
 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
 
 
@@ -235,66 +235,66 @@ struct far_pointer32 {
 } __attribute__((packed));
 
 struct descriptor_table_ptr {
-    u16 limit;
-    ulong base;
+	u16 limit;
+	ulong base;
 } __attribute__((packed));
 
 static inline void clac(void)
 {
-    asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
+	asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
 }
 
 static inline void stac(void)
 {
-    asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
+	asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
 }
 
 static inline u16 read_cs(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%cs, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%cs, %0" : "=mr"(val));
+	return val;
 }
 
 static inline u16 read_ds(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%ds, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%ds, %0" : "=mr"(val));
+	return val;
 }
 
 static inline u16 read_es(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%es, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%es, %0" : "=mr"(val));
+	return val;
 }
 
 static inline u16 read_ss(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%ss, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%ss, %0" : "=mr"(val));
+	return val;
 }
 
 static inline u16 read_fs(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%fs, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%fs, %0" : "=mr"(val));
+	return val;
 }
 
 static inline u16 read_gs(void)
 {
-    unsigned val;
+	unsigned val;
 
-    asm volatile ("mov %%gs, %0" : "=mr"(val));
-    return val;
+	asm volatile ("mov %%gs, %0" : "=mr"(val));
+	return val;
 }
 
 static inline unsigned long read_rflags(void)
@@ -306,32 +306,32 @@ static inline unsigned long read_rflags(void)
 
 static inline void write_ds(unsigned val)
 {
-    asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
+	asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
 }
 
 static inline void write_es(unsigned val)
 {
-    asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
+	asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
 }
 
 static inline void write_ss(unsigned val)
 {
-    asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
+	asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
 }
 
 static inline void write_fs(unsigned val)
 {
-    asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
+	asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
 }
 
 static inline void write_gs(unsigned val)
 {
-    asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
+	asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
 }
 
 static inline void write_rflags(unsigned long f)
 {
-    asm volatile ("push %0; popf\n\t" : : "rm"(f));
+	asm volatile ("push %0; popf\n\t" : : "rm"(f));
 }
 
 static inline void set_iopl(int iopl)
@@ -343,15 +343,15 @@ static inline void set_iopl(int iopl)
 
 static inline u64 rdmsr(u32 index)
 {
-    u32 a, d;
-    asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
-    return a | ((u64)d << 32);
+	u32 a, d;
+	asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
+	return a | ((u64)d << 32);
 }
 
 static inline void wrmsr(u32 index, u64 val)
 {
-    u32 a = val, d = val >> 32;
-    asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
+	u32 a = val, d = val >> 32;
+	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
 }
 
 static inline int rdmsr_checking(u32 index)
@@ -365,7 +365,7 @@ static inline int rdmsr_checking(u32 index)
 
 static inline int wrmsr_checking(u32 index, u64 val)
 {
-        u32 a = val, d = val >> 32;
+	u32 a = val, d = val >> 32;
 
 	asm volatile (ASM_TRY("1f")
 		      "wrmsr\n\t"
@@ -376,177 +376,177 @@ static inline int wrmsr_checking(u32 index, u64 val)
 
 static inline uint64_t rdpmc(uint32_t index)
 {
-    uint32_t a, d;
-    asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
-    return a | ((uint64_t)d << 32);
+	uint32_t a, d;
+	asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
+	return a | ((uint64_t)d << 32);
 }
 
 static inline void write_cr0(ulong val)
 {
-    asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
 }
 
 static inline ulong read_cr0(void)
 {
-    ulong val;
-    asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
-    return val;
+	ulong val;
+	asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
+	return val;
 }
 
 static inline void write_cr2(ulong val)
 {
-    asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
 }
 
 static inline ulong read_cr2(void)
 {
-    ulong val;
-    asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
-    return val;
+	ulong val;
+	asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
+	return val;
 }
 
 static inline void write_cr3(ulong val)
 {
-    asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
 }
 
 static inline ulong read_cr3(void)
 {
-    ulong val;
-    asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
-    return val;
+	ulong val;
+	asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
+	return val;
 }
 
 static inline void update_cr3(void *cr3)
 {
-    write_cr3((ulong)cr3);
+	write_cr3((ulong)cr3);
 }
 
 static inline void write_cr4(ulong val)
 {
-    asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
 }
 
 static inline ulong read_cr4(void)
 {
-    ulong val;
-    asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
-    return val;
+	ulong val;
+	asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
+	return val;
 }
 
 static inline void write_cr8(ulong val)
 {
-    asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
 }
 
 static inline ulong read_cr8(void)
 {
-    ulong val;
-    asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
-    return val;
+	ulong val;
+	asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
+	return val;
 }
 
 static inline void lgdt(const struct descriptor_table_ptr *ptr)
 {
-    asm volatile ("lgdt %0" : : "m"(*ptr));
+	asm volatile ("lgdt %0" : : "m"(*ptr));
 }
 
 static inline void sgdt(struct descriptor_table_ptr *ptr)
 {
-    asm volatile ("sgdt %0" : "=m"(*ptr));
+	asm volatile ("sgdt %0" : "=m"(*ptr));
 }
 
 static inline void lidt(const struct descriptor_table_ptr *ptr)
 {
-    asm volatile ("lidt %0" : : "m"(*ptr));
+	asm volatile ("lidt %0" : : "m"(*ptr));
 }
 
 static inline void sidt(struct descriptor_table_ptr *ptr)
 {
-    asm volatile ("sidt %0" : "=m"(*ptr));
+	asm volatile ("sidt %0" : "=m"(*ptr));
 }
 
 static inline void lldt(u16 val)
 {
-    asm volatile ("lldt %0" : : "rm"(val));
+	asm volatile ("lldt %0" : : "rm"(val));
 }
 
 static inline u16 sldt(void)
 {
-    u16 val;
-    asm volatile ("sldt %0" : "=rm"(val));
-    return val;
+	u16 val;
+	asm volatile ("sldt %0" : "=rm"(val));
+	return val;
 }
 
 static inline void ltr(u16 val)
 {
-    asm volatile ("ltr %0" : : "rm"(val));
+	asm volatile ("ltr %0" : : "rm"(val));
 }
 
 static inline u16 str(void)
 {
-    u16 val;
-    asm volatile ("str %0" : "=rm"(val));
-    return val;
+	u16 val;
+	asm volatile ("str %0" : "=rm"(val));
+	return val;
 }
 
 static inline void write_dr0(void *val)
 {
-    asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
 }
 
 static inline void write_dr1(void *val)
 {
-    asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
 }
 
 static inline void write_dr2(void *val)
 {
-    asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
 }
 
 static inline void write_dr3(void *val)
 {
-    asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
 }
 
 static inline void write_dr6(ulong val)
 {
-    asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
 }
 
 static inline ulong read_dr6(void)
 {
-    ulong val;
-    asm volatile ("mov %%dr6, %0" : "=r"(val));
-    return val;
+	ulong val;
+	asm volatile ("mov %%dr6, %0" : "=r"(val));
+	return val;
 }
 
 static inline void write_dr7(ulong val)
 {
-    asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
+	asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
 }
 
 static inline ulong read_dr7(void)
 {
-    ulong val;
-    asm volatile ("mov %%dr7, %0" : "=r"(val));
-    return val;
+	ulong val;
+	asm volatile ("mov %%dr7, %0" : "=r"(val));
+	return val;
 }
 
 static inline void pause(void)
 {
-    asm volatile ("pause");
+	asm volatile ("pause");
 }
 
 static inline void cli(void)
 {
-    asm volatile ("cli");
+	asm volatile ("cli");
 }
 
 static inline void sti(void)
 {
-    asm volatile ("sti");
+	asm volatile ("sti");
 }
 
 static inline unsigned long long rdrand(void)
@@ -600,17 +600,17 @@ static inline unsigned long long fenced_rdtsc(void)
 
 static inline unsigned long long rdtscp(u32 *aux)
 {
-       long long r;
+	long long r;
 
 #ifdef __x86_64__
-       unsigned a, d;
+	unsigned a, d;
 
-       asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
-       r = a | ((long long)d << 32);
+	asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
+	r = a | ((long long)d << 32);
 #else
-       asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
+	asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
 #endif
-       return r;
+	return r;
 }
 
 static inline void wrtsc(u64 tsc)
@@ -620,7 +620,7 @@ static inline void wrtsc(u64 tsc)
 
 static inline void irq_disable(void)
 {
-    asm volatile("cli");
+	asm volatile("cli");
 }
 
 /* Note that irq_enable() does not ensure an interrupt shadow due
@@ -629,7 +629,7 @@ static inline void irq_disable(void)
  */
 static inline void irq_enable(void)
 {
-    asm volatile("sti");
+	asm volatile("sti");
 }
 
 static inline void invlpg(volatile void *va)
@@ -644,25 +644,25 @@ static inline void safe_halt(void)
 
 static inline u32 read_pkru(void)
 {
-    unsigned int eax, edx;
-    unsigned int ecx = 0;
-    unsigned int pkru;
+	unsigned int eax, edx;
+	unsigned int ecx = 0;
+	unsigned int pkru;
 
-    asm volatile(".byte 0x0f,0x01,0xee\n\t"
-                 : "=a" (eax), "=d" (edx)
-                 : "c" (ecx));
-    pkru = eax;
-    return pkru;
+	asm volatile(".byte 0x0f,0x01,0xee\n\t"
+		     : "=a" (eax), "=d" (edx)
+		     : "c" (ecx));
+	pkru = eax;
+	return pkru;
 }
 
 static inline void write_pkru(u32 pkru)
 {
-    unsigned int eax = pkru;
-    unsigned int ecx = 0;
-    unsigned int edx = 0;
+	unsigned int eax = pkru;
+	unsigned int ecx = 0;
+	unsigned int edx = 0;
 
-    asm volatile(".byte 0x0f,0x01,0xef\n\t"
-        : : "a" (eax), "c" (ecx), "d" (edx));
+	asm volatile(".byte 0x0f,0x01,0xef\n\t"
+		     : : "a" (eax), "c" (ecx), "d" (edx));
 }
 
 static inline bool is_canonical(u64 addr)
@@ -696,7 +696,7 @@ static inline void flush_tlb(void)
 
 static inline int has_spec_ctrl(void)
 {
-    return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
+	return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
 }
 
 static inline int cpu_has_efer_nx(void)
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking"
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 01/10] x86: Use BIT() to define architectural bits Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 02/10] x86: Replace spaces with tables in processor.h Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2023-08-22  8:09   ` Like Xu
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 04/10] x86: Use "safe" helpers to implement unsafe CRs accessors Sean Christopherson
                   ` (6 subsequent siblings)
  9 siblings, 1 reply; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Rename all helpers that eat (and return) exceptions to use "safe" instead
of "checking".  This aligns KUT with the kernel and KVM selftests.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 lib/x86/desc.c      |  2 +-
 lib/x86/desc.h      |  2 +-
 lib/x86/processor.h |  4 ++--
 x86/access.c        |  8 ++++----
 x86/la57.c          |  2 +-
 x86/msr.c           |  4 ++--
 x86/pcid.c          | 22 +++++++++++-----------
 x86/rdpru.c         |  4 ++--
 x86/vmx.c           |  4 ++--
 x86/vmx.h           |  2 +-
 x86/vmx_tests.c     |  6 +++---
 x86/xsave.c         | 30 +++++++++++++++---------------
 12 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/lib/x86/desc.c b/lib/x86/desc.c
index 0677fcd9..ff9bd6b7 100644
--- a/lib/x86/desc.c
+++ b/lib/x86/desc.c
@@ -299,7 +299,7 @@ unsigned exception_vector(void)
 	return this_cpu_read_exception_vector();
 }
 
-int write_cr4_checking(unsigned long val)
+int write_cr4_safe(unsigned long val)
 {
 	asm volatile(ASM_TRY("1f")
 		"mov %0,%%cr4\n\t"
diff --git a/lib/x86/desc.h b/lib/x86/desc.h
index 5224b58b..0bd44445 100644
--- a/lib/x86/desc.h
+++ b/lib/x86/desc.h
@@ -213,7 +213,7 @@ extern tss64_t tss[];
 extern gdt_entry_t gdt[];
 
 unsigned exception_vector(void);
-int write_cr4_checking(unsigned long val);
+int write_cr4_safe(unsigned long val);
 unsigned exception_error_code(void);
 bool exception_rflags_rf(void);
 void set_idt_entry(int vec, void *addr, int dpl);
diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 13d9d9bb..e169aac8 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -354,7 +354,7 @@ static inline void wrmsr(u32 index, u64 val)
 	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
 }
 
-static inline int rdmsr_checking(u32 index)
+static inline int rdmsr_safe(u32 index)
 {
 	asm volatile (ASM_TRY("1f")
 		      "rdmsr\n\t"
@@ -363,7 +363,7 @@ static inline int rdmsr_checking(u32 index)
 	return exception_vector();
 }
 
-static inline int wrmsr_checking(u32 index, u64 val)
+static inline int wrmsr_safe(u32 index, u64 val)
 {
 	u32 a = val, d = val >> 32;
 
diff --git a/x86/access.c b/x86/access.c
index 3832e2ef..4dfec230 100644
--- a/x86/access.c
+++ b/x86/access.c
@@ -280,7 +280,7 @@ static unsigned set_cr4_smep(ac_test_t *at, int smep)
 
 	if (smep)
 		walk_ptes(at, code_start, code_end, clear_user_mask);
-	r = write_cr4_checking(cr4);
+	r = write_cr4_safe(cr4);
 	if (r || !smep)
 		walk_ptes(at, code_start, code_end, set_user_mask);
 	if (!r)
@@ -1188,7 +1188,7 @@ int ac_test_run(int pt_levels)
 		/* Now PKRU = 0xFFFFFFFF.  */
 	} else {
 		tests++;
-		if (write_cr4_checking(shadow_cr4 | X86_CR4_PKE) == GP_VECTOR) {
+		if (write_cr4_safe(shadow_cr4 | X86_CR4_PKE) == GP_VECTOR) {
 			successes++;
 			invalid_mask |= AC_PKU_AD_MASK;
 			invalid_mask |= AC_PKU_WD_MASK;
@@ -1216,12 +1216,12 @@ int ac_test_run(int pt_levels)
 	/* Toggling LA57 in 64-bit mode (guaranteed for this test) is illegal. */
 	if (this_cpu_has(X86_FEATURE_LA57)) {
 		tests++;
-		if (write_cr4_checking(shadow_cr4 ^ X86_CR4_LA57) == GP_VECTOR)
+		if (write_cr4_safe(shadow_cr4 ^ X86_CR4_LA57) == GP_VECTOR)
 			successes++;
 
 		/* Force a VM-Exit on KVM, which doesn't intercept LA57 itself. */
 		tests++;
-		if (write_cr4_checking(shadow_cr4 ^ (X86_CR4_LA57 | X86_CR4_PSE)) == GP_VECTOR)
+		if (write_cr4_safe(shadow_cr4 ^ (X86_CR4_LA57 | X86_CR4_PSE)) == GP_VECTOR)
 			successes++;
 	}
 
diff --git a/x86/la57.c b/x86/la57.c
index b537bb22..1f11412c 100644
--- a/x86/la57.c
+++ b/x86/la57.c
@@ -4,7 +4,7 @@
 
 int main(int ac, char **av)
 {
-	int vector = write_cr4_checking(read_cr4() | X86_CR4_LA57);
+	int vector = write_cr4_safe(read_cr4() | X86_CR4_LA57);
 	int expected = this_cpu_has(X86_FEATURE_LA57) ? 0 : 13;
 
 	report(vector == expected, "%s when CR4.LA57 %ssupported",
diff --git a/x86/msr.c b/x86/msr.c
index 44fbb3b2..eaca19ed 100644
--- a/x86/msr.c
+++ b/x86/msr.c
@@ -72,7 +72,7 @@ static void test_msr_rw(struct msr_info *msr, unsigned long long val)
 
 static void test_wrmsr_fault(struct msr_info *msr, unsigned long long val)
 {
-	unsigned char vector = wrmsr_checking(msr->index, val);
+	unsigned char vector = wrmsr_safe(msr->index, val);
 
 	report(vector == GP_VECTOR,
 	       "Expected #GP on WRSMR(%s, 0x%llx), got vector %d",
@@ -81,7 +81,7 @@ static void test_wrmsr_fault(struct msr_info *msr, unsigned long long val)
 
 static void test_rdmsr_fault(struct msr_info *msr)
 {
-	unsigned char vector = rdmsr_checking(msr->index);
+	unsigned char vector = rdmsr_safe(msr->index);
 
 	report(vector == GP_VECTOR,
 	       "Expected #GP on RDSMR(%s), got vector %d", msr->name, vector);
diff --git a/x86/pcid.c b/x86/pcid.c
index 80a4611d..5e08f576 100644
--- a/x86/pcid.c
+++ b/x86/pcid.c
@@ -10,7 +10,7 @@ struct invpcid_desc {
     u64 addr : 64;
 };
 
-static int write_cr0_checking(unsigned long val)
+static int write_cr0_safe(unsigned long val)
 {
     asm volatile(ASM_TRY("1f")
                  "mov %0, %%cr0\n\t"
@@ -18,7 +18,7 @@ static int write_cr0_checking(unsigned long val)
     return exception_vector();
 }
 
-static int invpcid_checking(unsigned long type, void *desc)
+static int invpcid_safe(unsigned long type, void *desc)
 {
     asm volatile (ASM_TRY("1f")
                   ".byte 0x66,0x0f,0x38,0x82,0x18 \n\t" /* invpcid (%rax), %rbx */
@@ -32,18 +32,18 @@ static void test_pcid_enabled(void)
     ulong cr0 = read_cr0(), cr3 = read_cr3(), cr4 = read_cr4();
 
     /* try setting CR4.PCIDE, no exception expected */
-    if (write_cr4_checking(cr4 | X86_CR4_PCIDE) != 0)
+    if (write_cr4_safe(cr4 | X86_CR4_PCIDE) != 0)
         goto report;
 
     /* try clearing CR0.PG when CR4.PCIDE=1, #GP expected */
-    if (write_cr0_checking(cr0 & ~X86_CR0_PG) != GP_VECTOR)
+    if (write_cr0_safe(cr0 & ~X86_CR0_PG) != GP_VECTOR)
         goto report;
 
     write_cr4(cr4);
 
     /* try setting CR4.PCIDE when CR3[11:0] != 0 , #GP expected */
     write_cr3(cr3 | 0x001);
-    if (write_cr4_checking(cr4 | X86_CR4_PCIDE) != GP_VECTOR)
+    if (write_cr4_safe(cr4 | X86_CR4_PCIDE) != GP_VECTOR)
         goto report;
     write_cr3(cr3);
 
@@ -59,7 +59,7 @@ static void test_pcid_disabled(void)
     ulong cr4 = read_cr4();
 
     /* try setting CR4.PCIDE, #GP expected */
-    if (write_cr4_checking(cr4 | X86_CR4_PCIDE) != GP_VECTOR)
+    if (write_cr4_safe(cr4 | X86_CR4_PCIDE) != GP_VECTOR)
         goto report;
 
     passed = 1;
@@ -80,7 +80,7 @@ static void test_invpcid_enabled(int pcid_enabled)
      * no exception expected
      */
     for (i = 0; i < 4; i++) {
-        if (invpcid_checking(i, &desc) != 0)
+        if (invpcid_safe(i, &desc) != 0)
             goto report;
     }
 
@@ -89,7 +89,7 @@ static void test_invpcid_enabled(int pcid_enabled)
      */
     desc.pcid = 1;
     for (i = 0; i < 2; i++) {
-        if (invpcid_checking(i, &desc) != GP_VECTOR)
+        if (invpcid_safe(i, &desc) != GP_VECTOR)
             goto report;
     }
 
@@ -97,14 +97,14 @@ static void test_invpcid_enabled(int pcid_enabled)
     if (!pcid_enabled)
         goto success;
 
-    if (write_cr4_checking(cr4 | X86_CR4_PCIDE) != 0)
+    if (write_cr4_safe(cr4 | X86_CR4_PCIDE) != 0)
         goto report;
 
     /* try executing invpcid when CR4.PCIDE=1
      * no exception expected
      */
     desc.pcid = 10;
-    if (invpcid_checking(2, &desc) != 0)
+    if (invpcid_safe(2, &desc) != 0)
         goto report;
 
 success:
@@ -120,7 +120,7 @@ static void test_invpcid_disabled(void)
     struct invpcid_desc desc;
 
     /* try executing invpcid, #UD expected */
-    if (invpcid_checking(2, &desc) != UD_VECTOR)
+    if (invpcid_safe(2, &desc) != UD_VECTOR)
         goto report;
 
     passed = 1;
diff --git a/x86/rdpru.c b/x86/rdpru.c
index 5cb69cb6..85c5edd6 100644
--- a/x86/rdpru.c
+++ b/x86/rdpru.c
@@ -4,7 +4,7 @@
 #include "processor.h"
 #include "desc.h"
 
-static int rdpru_checking(void)
+static int rdpru_safe(void)
 {
 	asm volatile (ASM_TRY("1f")
 		      ".byte 0x0f,0x01,0xfd \n\t" /* rdpru */
@@ -17,7 +17,7 @@ int main(int ac, char **av)
 	if (this_cpu_has(X86_FEATURE_RDPRU))
 		report_skip("RDPRU raises #UD");
 	else
-		report(rdpru_checking() == UD_VECTOR, "RDPRU raises #UD");
+		report(rdpru_safe() == UD_VECTOR, "RDPRU raises #UD");
 
 	return report_summary();
 }
diff --git a/x86/vmx.c b/x86/vmx.c
index c0242986..65305238 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -292,7 +292,7 @@ static bool check_vmcs_field(struct vmcs_field *f, u8 cookie)
 		return true;
 	}
 
-	ret = vmcs_read_checking(f->encoding, &actual);
+	ret = vmcs_read_safe(f->encoding, &actual);
 	assert(!(ret & X86_EFLAGS_CF));
 	/* Skip VMCS fields that aren't recognized by the CPU */
 	if (ret & X86_EFLAGS_ZF)
@@ -352,7 +352,7 @@ static u32 find_vmcs_max_index(void)
 				      (type << VMCS_FIELD_TYPE_SHIFT) |
 				      (width << VMCS_FIELD_WIDTH_SHIFT);
 
-				ret = vmcs_read_checking(enc, &actual);
+				ret = vmcs_read_safe(enc, &actual);
 				assert(!(ret & X86_EFLAGS_CF));
 				if (!(ret & X86_EFLAGS_ZF))
 					return idx;
diff --git a/x86/vmx.h b/x86/vmx.h
index 11cb6651..7cd02410 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -933,7 +933,7 @@ static inline u64 vmcs_readm(enum Encoding enc)
 	return val;
 }
 
-static inline int vmcs_read_checking(enum Encoding enc, u64 *value)
+static inline int vmcs_read_safe(enum Encoding enc, u64 *value)
 {
 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
 	u64 encoding = enc;
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 4d581e70..1b277cfb 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -8311,7 +8311,7 @@ static void vmx_cr_load_test(void)
 	/* Enable PCID for L1. */
 	cr4 = orig_cr4 | X86_CR4_PCIDE;
 	cr3 = orig_cr3 | 0x1;
-	TEST_ASSERT(!write_cr4_checking(cr4));
+	TEST_ASSERT(!write_cr4_safe(cr4));
 	write_cr3(cr3);
 
 	test_set_guest(vmx_single_vmcall_guest);
@@ -8328,11 +8328,11 @@ static void vmx_cr_load_test(void)
 	 *     have no side effect because normally no guest MCE (e.g., as the
 	 *     result of bad memory) would happen during this test.
 	 */
-	TEST_ASSERT(!write_cr4_checking(cr4 ^ X86_CR4_MCE));
+	TEST_ASSERT(!write_cr4_safe(cr4 ^ X86_CR4_MCE));
 
 	/* Cleanup L1 state. */
 	write_cr3(orig_cr3);
-	TEST_ASSERT(!write_cr4_checking(orig_cr4));
+	TEST_ASSERT(!write_cr4_safe(orig_cr4));
 
 	if (!this_cpu_has(X86_FEATURE_LA57))
 		goto done;
diff --git a/x86/xsave.c b/x86/xsave.c
index 84170033..39a55d66 100644
--- a/x86/xsave.c
+++ b/x86/xsave.c
@@ -21,7 +21,7 @@ static int xgetbv_checking(u32 index, u64 *result)
     return exception_vector();
 }
 
-static int xsetbv_checking(u32 index, u64 value)
+static int xsetbv_safe(u32 index, u64 value)
 {
     u32 eax = value;
     u32 edx = value >> 32;
@@ -66,60 +66,60 @@ static void test_xsave(void)
            "Check minimal XSAVE required bits");
 
     cr4 = read_cr4();
-    report(write_cr4_checking(cr4 | X86_CR4_OSXSAVE) == 0, "Set CR4 OSXSAVE");
+    report(write_cr4_safe(cr4 | X86_CR4_OSXSAVE) == 0, "Set CR4 OSXSAVE");
     report(this_cpu_has(X86_FEATURE_OSXSAVE),
            "Check CPUID.1.ECX.OSXSAVE - expect 1");
 
     printf("\tLegal tests\n");
     test_bits = XSTATE_FP;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == 0,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == 0,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_FP)");
 
     test_bits = XSTATE_FP | XSTATE_SSE;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == 0,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == 0,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_FP | XSTATE_SSE)");
     report(xgetbv_checking(XCR_XFEATURE_ENABLED_MASK, &xcr0) == 0,
            "        xgetbv(XCR_XFEATURE_ENABLED_MASK)");
 
     printf("\tIllegal tests\n");
     test_bits = 0;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, 0) - expect #GP");
 
     test_bits = XSTATE_SSE;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_SSE) - expect #GP");
 
     if (supported_xcr0 & XSTATE_YMM) {
         test_bits = XSTATE_YMM;
-        report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
+        report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
                "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_YMM) - expect #GP");
 
         test_bits = XSTATE_FP | XSTATE_YMM;
-        report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
+        report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == GP_VECTOR,
                "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_FP | XSTATE_YMM) - expect #GP");
     }
 
     test_bits = XSTATE_SSE;
-    report(xsetbv_checking(XCR_XFEATURE_ILLEGAL_MASK, test_bits) == GP_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ILLEGAL_MASK, test_bits) == GP_VECTOR,
            "\t\txsetbv(XCR_XFEATURE_ILLEGAL_MASK, XSTATE_FP) - expect #GP");
 
     test_bits = XSTATE_SSE;
-    report(xsetbv_checking(XCR_XFEATURE_ILLEGAL_MASK, test_bits) == GP_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ILLEGAL_MASK, test_bits) == GP_VECTOR,
            "\t\txgetbv(XCR_XFEATURE_ILLEGAL_MASK, XSTATE_FP) - expect #GP");
 
     cr4 &= ~X86_CR4_OSXSAVE;
-    report(write_cr4_checking(cr4) == 0, "Unset CR4 OSXSAVE");
+    report(write_cr4_safe(cr4) == 0, "Unset CR4 OSXSAVE");
     report(this_cpu_has(X86_FEATURE_OSXSAVE) == 0,
            "Check CPUID.1.ECX.OSXSAVE - expect 0");
 
     printf("\tIllegal tests:\n");
     test_bits = XSTATE_FP;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == UD_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == UD_VECTOR,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_FP) - expect #UD");
 
     test_bits = XSTATE_FP | XSTATE_SSE;
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, test_bits) == UD_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, test_bits) == UD_VECTOR,
            "\t\txsetbv(XCR_XFEATURE_ENABLED_MASK, XSTATE_FP | XSTATE_SSE) - expect #UD");
 
     printf("\tIllegal tests:\n");
@@ -138,13 +138,13 @@ static void test_no_xsave(void)
     printf("Illegal instruction testing:\n");
 
     cr4 = read_cr4();
-    report(write_cr4_checking(cr4 | X86_CR4_OSXSAVE) == GP_VECTOR,
+    report(write_cr4_safe(cr4 | X86_CR4_OSXSAVE) == GP_VECTOR,
            "Set OSXSAVE in CR4 - expect #GP");
 
     report(xgetbv_checking(XCR_XFEATURE_ENABLED_MASK, &xcr0) == UD_VECTOR,
            "Execute xgetbv - expect #UD");
 
-    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, 0x3) == UD_VECTOR,
+    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, 0x3) == UD_VECTOR,
            "Execute xsetbv - expect #UD");
 }
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 04/10] x86: Use "safe" helpers to implement unsafe CRs accessors
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (2 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking" Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 05/10] x86: Provide result of RDMSR from "safe" variant Sean Christopherson
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Use the "safe" helpers to read and write CR0, CR3, and CR4, so that an
unexpected fault results in a detailed message instead of an generic
"unexpected fault" explosion.

Do not give RDMSR/WRMSR the same treatment.  KUT's exception fixup uses
per-CPU data and thus needs a stable GS.base.  Various tests modify
MSR_GS_BASE and routing them through the safe variants will cause
fireworks when trying to clear/read the exception vector with a garbage
GS.base.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 lib/x86/desc.c      |  8 --------
 lib/x86/desc.h      |  1 -
 lib/x86/processor.h | 45 ++++++++++++++++++++++++++++++++++++++++++---
 x86/pcid.c          |  8 --------
 4 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/lib/x86/desc.c b/lib/x86/desc.c
index ff9bd6b7..7620dc81 100644
--- a/lib/x86/desc.c
+++ b/lib/x86/desc.c
@@ -299,14 +299,6 @@ unsigned exception_vector(void)
 	return this_cpu_read_exception_vector();
 }
 
-int write_cr4_safe(unsigned long val)
-{
-	asm volatile(ASM_TRY("1f")
-		"mov %0,%%cr4\n\t"
-		"1:": : "r" (val));
-	return exception_vector();
-}
-
 unsigned exception_error_code(void)
 {
 	return this_cpu_read_exception_error_code();
diff --git a/lib/x86/desc.h b/lib/x86/desc.h
index 0bd44445..ac843c35 100644
--- a/lib/x86/desc.h
+++ b/lib/x86/desc.h
@@ -213,7 +213,6 @@ extern tss64_t tss[];
 extern gdt_entry_t gdt[];
 
 unsigned exception_vector(void);
-int write_cr4_safe(unsigned long val);
 unsigned exception_error_code(void);
 bool exception_rflags_rf(void);
 void set_idt_entry(int vec, void *addr, int dpl);
diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index e169aac8..bc6c8d94 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -341,6 +341,12 @@ static inline void set_iopl(int iopl)
 	write_rflags(flags);
 }
 
+/*
+ * Don't use the safe variants for rdmsr() or wrmsr().  The exception fixup
+ * infrastructure uses per-CPU data and thus consumes GS.base.  Various tests
+ * temporarily modify MSR_GS_BASE and will explode when trying to determine
+ * whether or not RDMSR/WRMSR faulted.
+ */
 static inline u64 rdmsr(u32 index)
 {
 	u32 a, d;
@@ -381,9 +387,20 @@ static inline uint64_t rdpmc(uint32_t index)
 	return a | ((uint64_t)d << 32);
 }
 
+static inline int write_cr0_safe(ulong val)
+{
+	asm volatile(ASM_TRY("1f")
+		     "mov %0,%%cr0\n\t"
+		     "1:": : "r" (val));
+	return exception_vector();
+}
+
 static inline void write_cr0(ulong val)
 {
-	asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
+	int vector = write_cr0_safe(val);
+
+	assert_msg(!vector, "Unexpected fault '%d' writing CR0 = %lx",
+		   vector, val);
 }
 
 static inline ulong read_cr0(void)
@@ -405,9 +422,20 @@ static inline ulong read_cr2(void)
 	return val;
 }
 
+static inline int write_cr3_safe(ulong val)
+{
+	asm volatile(ASM_TRY("1f")
+		     "mov %0,%%cr3\n\t"
+		     "1:": : "r" (val));
+	return exception_vector();
+}
+
 static inline void write_cr3(ulong val)
 {
-	asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
+	int vector = write_cr3_safe(val);
+
+	assert_msg(!vector, "Unexpected fault '%d' writing CR3 = %lx",
+		   vector, val);
 }
 
 static inline ulong read_cr3(void)
@@ -422,9 +450,20 @@ static inline void update_cr3(void *cr3)
 	write_cr3((ulong)cr3);
 }
 
+static inline int write_cr4_safe(ulong val)
+{
+	asm volatile(ASM_TRY("1f")
+		     "mov %0,%%cr4\n\t"
+		     "1:": : "r" (val));
+	return exception_vector();
+}
+
 static inline void write_cr4(ulong val)
 {
-	asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
+	int vector = write_cr4_safe(val);
+
+	assert_msg(!vector, "Unexpected fault '%d' writing CR4 = %lx",
+		   vector, val);
 }
 
 static inline ulong read_cr4(void)
diff --git a/x86/pcid.c b/x86/pcid.c
index 5e08f576..4dfc6fd0 100644
--- a/x86/pcid.c
+++ b/x86/pcid.c
@@ -10,14 +10,6 @@ struct invpcid_desc {
     u64 addr : 64;
 };
 
-static int write_cr0_safe(unsigned long val)
-{
-    asm volatile(ASM_TRY("1f")
-                 "mov %0, %%cr0\n\t"
-                 "1:": : "r" (val));
-    return exception_vector();
-}
-
 static int invpcid_safe(unsigned long type, void *desc)
 {
     asm volatile (ASM_TRY("1f")
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 05/10] x86: Provide result of RDMSR from "safe" variant
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (3 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 04/10] x86: Use "safe" helpers to implement unsafe CRs accessors Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 06/10] nVMX: Check the results of VMXON/VMXOFF in feature control test Sean Christopherson
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Provide the result of RDMSR from rdmsr_safe() so that it can be used by
tests that are unsure whether or not RDMSR will fault, but want the value
if it doesn't fault.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 lib/x86/processor.h | 9 +++++++--
 x86/msr.c           | 3 ++-
 2 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index bc6c8d94..82f8df58 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -360,12 +360,17 @@ static inline void wrmsr(u32 index, u64 val)
 	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
 }
 
-static inline int rdmsr_safe(u32 index)
+static inline int rdmsr_safe(u32 index, uint64_t *val)
 {
+	uint32_t a, d;
+
 	asm volatile (ASM_TRY("1f")
 		      "rdmsr\n\t"
 		      "1:"
-		      : : "c"(index) : "memory", "eax", "edx");
+		      : "=a"(a), "=d"(d)
+		      : "c"(index) : "memory");
+
+	*val = (uint64_t)a | ((uint64_t)d << 32);
 	return exception_vector();
 }
 
diff --git a/x86/msr.c b/x86/msr.c
index eaca19ed..ee1d3984 100644
--- a/x86/msr.c
+++ b/x86/msr.c
@@ -81,7 +81,8 @@ static void test_wrmsr_fault(struct msr_info *msr, unsigned long long val)
 
 static void test_rdmsr_fault(struct msr_info *msr)
 {
-	unsigned char vector = rdmsr_safe(msr->index);
+	uint64_t ignored;
+	unsigned char vector = rdmsr_safe(msr->index, &ignored);
 
 	report(vector == GP_VECTOR,
 	       "Expected #GP on RDSMR(%s), got vector %d", msr->name, vector);
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 06/10] nVMX: Check the results of VMXON/VMXOFF in feature control test
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (4 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 05/10] x86: Provide result of RDMSR from "safe" variant Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 07/10] nVMX: Check result of VMXON in INIT/SIPI tests Sean Christopherson
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Assert that VMXON and VMXOFF succeed in the feature control test.
Despite a lack of "safe" or underscores, vmx_on() and vmx_off() do not
guarantee success.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 x86/vmx.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/x86/vmx.c b/x86/vmx.c
index 65305238..8475bf3b 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1401,8 +1401,8 @@ static void init_bsp_vmx(void)
 
 static void do_vmxon_off(void *data)
 {
-	vmx_on();
-	vmx_off();
+	TEST_ASSERT(!vmx_on());
+	TEST_ASSERT(!vmx_off());
 }
 
 static void do_write_feature_control(void *data)
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 07/10] nVMX: Check result of VMXON in INIT/SIPI tests
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (5 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 06/10] nVMX: Check the results of VMXON/VMXOFF in feature control test Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 08/10] nVMX: Wrap VMXON in ASM_TRY(), a.k.a. in exception fixup Sean Christopherson
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Assert that VMXON succeeds in the INIT/SIPI tests, _vmx_on() doesn't
check the result, i.e. doesn't guarantee success.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 x86/vmx_tests.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 1b277cfb..4c963b96 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -9695,7 +9695,7 @@ static void init_signal_test_thread(void *data)
 	u64 *ap_vmxon_region = alloc_page();
 	enable_vmx();
 	init_vmx(ap_vmxon_region);
-	_vmx_on(ap_vmxon_region);
+	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
 
 	/* Signal CPU have entered VMX operation */
 	vmx_set_test_stage(1);
@@ -9743,7 +9743,7 @@ static void init_signal_test_thread(void *data)
 	while (vmx_get_test_stage() != 8)
 		;
 	/* Enter VMX operation (i.e. exec VMXON) */
-	_vmx_on(ap_vmxon_region);
+	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
 	/* Signal to BSP we are in VMX operation */
 	vmx_set_test_stage(9);
 
@@ -9920,7 +9920,7 @@ static void sipi_test_ap_thread(void *data)
 	ap_vmxon_region = alloc_page();
 	enable_vmx();
 	init_vmx(ap_vmxon_region);
-	_vmx_on(ap_vmxon_region);
+	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
 	init_vmcs(&ap_vmcs);
 	make_vmcs_current(ap_vmcs);
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 08/10] nVMX: Wrap VMXON in ASM_TRY(), a.k.a. in exception fixup
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (6 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 07/10] nVMX: Check result of VMXON in INIT/SIPI tests Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 09/10] nVMX: Simplify test_vmxon() by returning directly on failure Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 10/10] nVMX: Add subtest to verify VMXON succeeds/#UDs on good/bad CR0/CR4 Sean Christopherson
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Gracefully handle and return faults on VMXON instead of letting VMXON
explode.  The primary motivation is to be able to reuse the helper in
tests that verify VMXON faults when it's supposed to, but printing a nice
error message on fault is also nice.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 x86/vmx.c       | 20 ++++++++++----------
 x86/vmx.h       | 29 +++++++++++++++++++++++------
 x86/vmx_tests.c |  6 +++---
 3 files changed, 36 insertions(+), 19 deletions(-)

diff --git a/x86/vmx.c b/x86/vmx.c
index 8475bf3b..09e54332 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1461,34 +1461,34 @@ static int test_vmxon(void)
 
 	/* Unaligned page access */
 	vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region + 1);
-	ret1 = _vmx_on(vmxon_region);
-	report(ret1, "test vmxon with unaligned vmxon region");
-	if (!ret1) {
+	ret1 = __vmxon_safe(vmxon_region);
+	report(ret1 < 0, "test vmxon with unaligned vmxon region");
+	if (ret1 >= 0) {
 		ret = 1;
 		goto out;
 	}
 
 	/* gpa bits beyond physical address width are set*/
 	vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region | ((u64)1 << (width+1)));
-	ret1 = _vmx_on(vmxon_region);
-	report(ret1, "test vmxon with bits set beyond physical address width");
-	if (!ret1) {
+	ret1 = __vmxon_safe(vmxon_region);
+	report(ret1 < 0, "test vmxon with bits set beyond physical address width");
+	if (ret1 >= 0) {
 		ret = 1;
 		goto out;
 	}
 
 	/* invalid revision identifier */
 	*bsp_vmxon_region = 0xba9da9;
-	ret1 = vmx_on();
-	report(ret1, "test vmxon with invalid revision identifier");
-	if (!ret1) {
+	ret1 = vmxon_safe();
+	report(ret1 < 0, "test vmxon with invalid revision identifier");
+	if (ret1 >= 0) {
 		ret = 1;
 		goto out;
 	}
 
 	/* and finally a valid region */
 	*bsp_vmxon_region = basic.revision;
-	ret = vmx_on();
+	ret = vmxon_safe();
 	report(!ret, "test vmxon with valid vmxon region");
 
 out:
diff --git a/x86/vmx.h b/x86/vmx.h
index 7cd02410..604c78f6 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -870,18 +870,35 @@ void vmx_set_test_stage(u32 s);
 u32 vmx_get_test_stage(void);
 void vmx_inc_test_stage(void);
 
-static int _vmx_on(u64 *vmxon_region)
+/* -1 on VM-Fail, 0 on success, >1 on fault */
+static int __vmxon_safe(u64 *vmxon_region)
 {
-	bool ret;
+	bool vmfail;
 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
-	asm volatile ("push %1; popf; vmxon %2; setbe %0\n\t"
-		      : "=q" (ret) : "q" (rflags), "m" (vmxon_region) : "cc");
-	return ret;
+
+	asm volatile ("push %1\n\t"
+		      "popf\n\t"
+		      ASM_TRY("1f") "vmxon %2\n\t"
+		      "setbe %0\n\t"
+		      "jmp 2f\n\t"
+		      "1: movb $0, %0\n\t"
+		      "2:\n\t"
+		      : "=q" (vmfail) : "q" (rflags), "m" (vmxon_region) : "cc");
+
+	if (vmfail)
+		return -1;
+
+	return exception_vector();
+}
+
+static int vmxon_safe(void)
+{
+	return __vmxon_safe(bsp_vmxon_region);
 }
 
 static int vmx_on(void)
 {
-	return _vmx_on(bsp_vmxon_region);
+	return vmxon_safe();
 }
 
 static int vmx_off(void)
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 4c963b96..60762285 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -9695,7 +9695,7 @@ static void init_signal_test_thread(void *data)
 	u64 *ap_vmxon_region = alloc_page();
 	enable_vmx();
 	init_vmx(ap_vmxon_region);
-	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
+	TEST_ASSERT(!__vmxon_safe(ap_vmxon_region));
 
 	/* Signal CPU have entered VMX operation */
 	vmx_set_test_stage(1);
@@ -9743,7 +9743,7 @@ static void init_signal_test_thread(void *data)
 	while (vmx_get_test_stage() != 8)
 		;
 	/* Enter VMX operation (i.e. exec VMXON) */
-	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
+	TEST_ASSERT(!__vmxon_safe(ap_vmxon_region));
 	/* Signal to BSP we are in VMX operation */
 	vmx_set_test_stage(9);
 
@@ -9920,7 +9920,7 @@ static void sipi_test_ap_thread(void *data)
 	ap_vmxon_region = alloc_page();
 	enable_vmx();
 	init_vmx(ap_vmxon_region);
-	TEST_ASSERT(!_vmx_on(ap_vmxon_region));
+	TEST_ASSERT(!__vmxon_safe(ap_vmxon_region));
 	init_vmcs(&ap_vmcs);
 	make_vmcs_current(ap_vmcs);
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 09/10] nVMX: Simplify test_vmxon() by returning directly on failure
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (7 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 08/10] nVMX: Wrap VMXON in ASM_TRY(), a.k.a. in exception fixup Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 10/10] nVMX: Add subtest to verify VMXON succeeds/#UDs on good/bad CR0/CR4 Sean Christopherson
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Return '1' directly if a VMXON subtest fails, using a second variable and
a goto is silly.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 x86/vmx.c | 34 +++++++++++++---------------------
 1 file changed, 13 insertions(+), 21 deletions(-)

diff --git a/x86/vmx.c b/x86/vmx.c
index 09e54332..4562e91f 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1455,43 +1455,35 @@ static int test_vmx_feature_control(void)
 
 static int test_vmxon(void)
 {
-	int ret, ret1;
 	u64 *vmxon_region;
 	int width = cpuid_maxphyaddr();
+	int ret;
 
 	/* Unaligned page access */
 	vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region + 1);
-	ret1 = __vmxon_safe(vmxon_region);
-	report(ret1 < 0, "test vmxon with unaligned vmxon region");
-	if (ret1 >= 0) {
-		ret = 1;
-		goto out;
-	}
+	ret = __vmxon_safe(vmxon_region);
+	report(ret < 0, "test vmxon with unaligned vmxon region");
+	if (ret >= 0)
+		return 1;
 
 	/* gpa bits beyond physical address width are set*/
 	vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region | ((u64)1 << (width+1)));
-	ret1 = __vmxon_safe(vmxon_region);
-	report(ret1 < 0, "test vmxon with bits set beyond physical address width");
-	if (ret1 >= 0) {
-		ret = 1;
-		goto out;
-	}
+	ret = __vmxon_safe(vmxon_region);
+	report(ret < 0, "test vmxon with bits set beyond physical address width");
+	if (ret >= 0)
+		return 1;
 
 	/* invalid revision identifier */
 	*bsp_vmxon_region = 0xba9da9;
-	ret1 = vmxon_safe();
-	report(ret1 < 0, "test vmxon with invalid revision identifier");
-	if (ret1 >= 0) {
-		ret = 1;
-		goto out;
-	}
+	ret = vmxon_safe();
+	report(ret < 0, "test vmxon with invalid revision identifier");
+	if (ret >= 0)
+		return 1;
 
 	/* and finally a valid region */
 	*bsp_vmxon_region = basic.revision;
 	ret = vmxon_safe();
 	report(!ret, "test vmxon with valid vmxon region");
-
-out:
 	return ret;
 }
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 10/10] nVMX: Add subtest to verify VMXON succeeds/#UDs on good/bad CR0/CR4
  2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
                   ` (8 preceding siblings ...)
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 09/10] nVMX: Simplify test_vmxon() by returning directly on failure Sean Christopherson
@ 2022-06-08 23:52 ` Sean Christopherson
  9 siblings, 0 replies; 12+ messages in thread
From: Sean Christopherson @ 2022-06-08 23:52 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Sean Christopherson

Verify that VMXON #UDs on CR0 and CR4 bits that are either fixed-0 or
fixed-1, and toggle flexible CR0/CR4 bits for the final, successful VMXON
to gain some amount of coverage for VMXON with variable CR0/CR4 values.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 x86/vmx.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 95 insertions(+), 2 deletions(-)

diff --git a/x86/vmx.c b/x86/vmx.c
index 4562e91f..fd845d7e 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -1453,12 +1453,101 @@ static int test_vmx_feature_control(void)
 	return !vmx_enabled;
 }
 
+
+static void write_cr(int cr_number, unsigned long val)
+{
+	if (!cr_number)
+		write_cr0(val);
+	else
+		write_cr4(val);
+}
+
+static int write_cr_safe(int cr_number, unsigned long val)
+{
+	if (!cr_number)
+		return write_cr0_safe(val);
+	else
+		return write_cr4_safe(val);
+}
+
+static int test_vmxon_bad_cr(int cr_number, unsigned long orig_cr,
+			     unsigned long *flexible_bits)
+{
+	unsigned long required1, disallowed1, val, bit;
+	int ret, i;
+
+	if (!cr_number) {
+		required1 =  rdmsr(MSR_IA32_VMX_CR0_FIXED0);
+		disallowed1 = ~rdmsr(MSR_IA32_VMX_CR0_FIXED1);
+	} else {
+		required1 =  rdmsr(MSR_IA32_VMX_CR4_FIXED0);
+		disallowed1 = ~rdmsr(MSR_IA32_VMX_CR4_FIXED1);
+	}
+
+	*flexible_bits = 0;
+
+	for (i = 0; i < BITS_PER_LONG; i++) {
+		bit = BIT(i);
+
+		/*
+		 * Don't touch bits that will affect the current paging mode,
+		 * toggling them will send the test into the weeds before it
+		 * gets to VMXON.  nVMX tests are 64-bit only, so CR4.PAE is
+		 * guaranteed to be '1', i.e. PSE is fair game.  PKU/PKS are
+		 * also fair game as KVM doesn't configure any keys.  SMAP and
+		 * SMEP are off limits because the page tables have the USER
+		 * bit set at all levels.
+		 */
+		if ((cr_number == 0 && (bit == X86_CR0_PE || bit == X86_CR0_PG)) ||
+		    (cr_number == 4 && (bit == X86_CR4_PAE || bit == X86_CR4_SMAP || X86_CR4_SMEP)))
+			continue;
+
+		if (!(bit & required1) && !(bit & disallowed1)) {
+			if (!write_cr_safe(cr_number, orig_cr ^ bit)) {
+				*flexible_bits |= bit;
+				write_cr(cr_number, orig_cr);
+			}
+			continue;
+		}
+
+		assert(!(required1 & disallowed1));
+
+		if (required1 & bit)
+			val = orig_cr & ~bit;
+		else
+			val = orig_cr | bit;
+
+		if (write_cr_safe(cr_number, val))
+			continue;
+
+		ret = vmx_on();
+		report(ret == UD_VECTOR,
+		       "VMXON with CR%d bit %d %s should #UD, got '%d'",
+		       cr_number, i, (required1 & bit) ? "cleared" : "set", ret);
+
+		write_cr(cr_number, orig_cr);
+
+		if (ret <= 0)
+			return 1;
+	}
+	return 0;
+}
+
 static int test_vmxon(void)
 {
+	unsigned long orig_cr0, flexible_cr0, orig_cr4, flexible_cr4;
+	int width = cpuid_maxphyaddr();
 	u64 *vmxon_region;
-	int width = cpuid_maxphyaddr();
 	int ret;
 
+	orig_cr0 = read_cr0();
+	if (test_vmxon_bad_cr(0, orig_cr0, &flexible_cr0))
+		return 1;
+
+	orig_cr4 = read_cr4();
+	if (test_vmxon_bad_cr(4, orig_cr4, &flexible_cr4))
+		return 1;
+
 	/* Unaligned page access */
 	vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region + 1);
 	ret = __vmxon_safe(vmxon_region);
@@ -1480,10 +1569,14 @@ static int test_vmxon(void)
 	if (ret >= 0)
 		return 1;
 
-	/* and finally a valid region */
+	/* and finally a valid region, with valid-but-tweaked cr0/cr4 */
+	write_cr0(orig_cr0 ^ flexible_cr0);
+	write_cr4(orig_cr4 ^ flexible_cr4);
 	*bsp_vmxon_region = basic.revision;
 	ret = vmxon_safe();
 	report(!ret, "test vmxon with valid vmxon region");
+	write_cr0(orig_cr0);
+	write_cr4(orig_cr4);
 	return ret;
 }
 
-- 
2.36.1.255.ge46751e96f-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking"
  2022-06-08 23:52 ` [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking" Sean Christopherson
@ 2023-08-22  8:09   ` Like Xu
  0 siblings, 0 replies; 12+ messages in thread
From: Like Xu @ 2023-08-22  8:09 UTC (permalink / raw)
  To: Sean Christopherson; +Cc: kvm, Paolo Bonzini

On 9/6/2022 7:52 am, Sean Christopherson wrote:
> @@ -138,13 +138,13 @@ static void test_no_xsave(void)
>       printf("Illegal instruction testing:\n");
>   
>       cr4 = read_cr4();
> -    report(write_cr4_checking(cr4 | X86_CR4_OSXSAVE) == GP_VECTOR,
> +    report(write_cr4_safe(cr4 | X86_CR4_OSXSAVE) == GP_VECTOR,
>              "Set OSXSAVE in CR4 - expect #GP");
>   
>       report(xgetbv_checking(XCR_XFEATURE_ENABLED_MASK, &xcr0) == UD_VECTOR,
>              "Execute xgetbv - expect #UD");

Oops, xgetbv_checking() has not been renamed to xgetbv_safe().

>   
> -    report(xsetbv_checking(XCR_XFEATURE_ENABLED_MASK, 0x3) == UD_VECTOR,
> +    report(xsetbv_safe(XCR_XFEATURE_ENABLED_MASK, 0x3) == UD_VECTOR,
>              "Execute xsetbv - expect #UD");
>   }

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2023-08-22  8:10 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-08 23:52 [kvm-unit-tests PATCH 00/10] x86: nVMX: Add VMXON #UD test Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 01/10] x86: Use BIT() to define architectural bits Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 02/10] x86: Replace spaces with tables in processor.h Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 03/10] x86: Use "safe" terminology instead of "checking" Sean Christopherson
2023-08-22  8:09   ` Like Xu
2022-06-08 23:52 ` [kvm-unit-tests PATCH 04/10] x86: Use "safe" helpers to implement unsafe CRs accessors Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 05/10] x86: Provide result of RDMSR from "safe" variant Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 06/10] nVMX: Check the results of VMXON/VMXOFF in feature control test Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 07/10] nVMX: Check result of VMXON in INIT/SIPI tests Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 08/10] nVMX: Wrap VMXON in ASM_TRY(), a.k.a. in exception fixup Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 09/10] nVMX: Simplify test_vmxon() by returning directly on failure Sean Christopherson
2022-06-08 23:52 ` [kvm-unit-tests PATCH 10/10] nVMX: Add subtest to verify VMXON succeeds/#UDs on good/bad CR0/CR4 Sean Christopherson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.