All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] AMD's LBR test for nesting and few more fixes
@ 2021-09-14 16:30 Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 1/4] svm: add SVM_BARE_VMRUN Maxim Levitsky
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Maxim Levitsky @ 2021-09-14 16:30 UTC (permalink / raw)
  To: kvm; +Cc: Maxim Levitsky

This patch series adds a test for nested LBR usage
on AMD, and few more things.

Best regards,
	Maxim Levitsky

Maxim Levitsky (4):
  svm: add SVM_BARE_VMRUN
  svm: intercept shutdown in all svm tests by default
  few fixes for pmu_lbr test
  svm: add tests for LBR virtualization

 lib/x86/processor.h |   1 +
 x86/pmu_lbr.c       |   8 ++
 x86/svm.c           |  41 ++------
 x86/svm.h           |  52 +++++++++-
 x86/svm_tests.c     | 239 ++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg   |   1 +
 6 files changed, 308 insertions(+), 34 deletions(-)

-- 
2.26.3



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/4] svm: add SVM_BARE_VMRUN
  2021-09-14 16:30 [PATCH 0/4] AMD's LBR test for nesting and few more fixes Maxim Levitsky
@ 2021-09-14 16:30 ` Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 2/4] svm: intercept shutdown in all svm tests by default Maxim Levitsky
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Maxim Levitsky @ 2021-09-14 16:30 UTC (permalink / raw)
  To: kvm; +Cc: Maxim Levitsky

This will be useful in nested LBR tests to ensure that no extra
branches are made in the guest entry.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 x86/svm.c | 32 --------------------------------
 x86/svm.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/x86/svm.c b/x86/svm.c
index beb40b7..f109caa 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -194,41 +194,9 @@ struct regs get_regs(void)
 
 // rax handled specially below
 
-#define SAVE_GPR_C                              \
-        "xchg %%rbx, regs+0x8\n\t"              \
-        "xchg %%rcx, regs+0x10\n\t"             \
-        "xchg %%rdx, regs+0x18\n\t"             \
-        "xchg %%rbp, regs+0x28\n\t"             \
-        "xchg %%rsi, regs+0x30\n\t"             \
-        "xchg %%rdi, regs+0x38\n\t"             \
-        "xchg %%r8, regs+0x40\n\t"              \
-        "xchg %%r9, regs+0x48\n\t"              \
-        "xchg %%r10, regs+0x50\n\t"             \
-        "xchg %%r11, regs+0x58\n\t"             \
-        "xchg %%r12, regs+0x60\n\t"             \
-        "xchg %%r13, regs+0x68\n\t"             \
-        "xchg %%r14, regs+0x70\n\t"             \
-        "xchg %%r15, regs+0x78\n\t"
-
-#define LOAD_GPR_C      SAVE_GPR_C
 
 struct svm_test *v2_test;
 
-#define ASM_PRE_VMRUN_CMD                       \
-                "vmload %%rax\n\t"              \
-                "mov regs+0x80, %%r15\n\t"      \
-                "mov %%r15, 0x170(%%rax)\n\t"   \
-                "mov regs, %%r15\n\t"           \
-                "mov %%r15, 0x1f8(%%rax)\n\t"   \
-                LOAD_GPR_C                      \
-
-#define ASM_POST_VMRUN_CMD                      \
-                SAVE_GPR_C                      \
-                "mov 0x170(%%rax), %%r15\n\t"   \
-                "mov %%r15, regs+0x80\n\t"      \
-                "mov 0x1f8(%%rax), %%r15\n\t"   \
-                "mov %%r15, regs\n\t"           \
-                "vmsave %%rax\n\t"              \
 
 u64 guest_stack[10000];
 
diff --git a/x86/svm.h b/x86/svm.h
index ae35d08..74ba818 100644
--- a/x86/svm.h
+++ b/x86/svm.h
@@ -416,10 +416,57 @@ void vmcb_ident(struct vmcb *vmcb);
 struct regs get_regs(void);
 void vmmcall(void);
 int __svm_vmrun(u64 rip);
+void __svm_bare_vmrun(void);
 int svm_vmrun(void);
 void test_set_guest(test_guest_func func);
 
 extern struct vmcb *vmcb;
 extern struct svm_test svm_tests[];
 
+
+#define SAVE_GPR_C                              \
+        "xchg %%rbx, regs+0x8\n\t"              \
+        "xchg %%rcx, regs+0x10\n\t"             \
+        "xchg %%rdx, regs+0x18\n\t"             \
+        "xchg %%rbp, regs+0x28\n\t"             \
+        "xchg %%rsi, regs+0x30\n\t"             \
+        "xchg %%rdi, regs+0x38\n\t"             \
+        "xchg %%r8, regs+0x40\n\t"              \
+        "xchg %%r9, regs+0x48\n\t"              \
+        "xchg %%r10, regs+0x50\n\t"             \
+        "xchg %%r11, regs+0x58\n\t"             \
+        "xchg %%r12, regs+0x60\n\t"             \
+        "xchg %%r13, regs+0x68\n\t"             \
+        "xchg %%r14, regs+0x70\n\t"             \
+        "xchg %%r15, regs+0x78\n\t"
+
+#define LOAD_GPR_C      SAVE_GPR_C
+
+#define ASM_PRE_VMRUN_CMD                       \
+                "vmload %%rax\n\t"              \
+                "mov regs+0x80, %%r15\n\t"      \
+                "mov %%r15, 0x170(%%rax)\n\t"   \
+                "mov regs, %%r15\n\t"           \
+                "mov %%r15, 0x1f8(%%rax)\n\t"   \
+                LOAD_GPR_C                      \
+
+#define ASM_POST_VMRUN_CMD                      \
+                SAVE_GPR_C                      \
+                "mov 0x170(%%rax), %%r15\n\t"   \
+                "mov %%r15, regs+0x80\n\t"      \
+                "mov 0x1f8(%%rax), %%r15\n\t"   \
+                "mov %%r15, regs\n\t"           \
+                "vmsave %%rax\n\t"              \
+
+
+
+#define SVM_BARE_VMRUN \
+	asm volatile ( \
+		ASM_PRE_VMRUN_CMD \
+                "vmrun %%rax\n\t"               \
+		ASM_POST_VMRUN_CMD \
+		: \
+		: "a" (virt_to_phys(vmcb)) \
+		: "memory", "r15") \
+
 #endif
-- 
2.26.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/4] svm: intercept shutdown in all svm tests by default
  2021-09-14 16:30 [PATCH 0/4] AMD's LBR test for nesting and few more fixes Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 1/4] svm: add SVM_BARE_VMRUN Maxim Levitsky
@ 2021-09-14 16:30 ` Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 3/4] few fixes for pmu_lbr test Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 4/4] svm: add tests for LBR virtualization Maxim Levitsky
  3 siblings, 0 replies; 5+ messages in thread
From: Maxim Levitsky @ 2021-09-14 16:30 UTC (permalink / raw)
  To: kvm; +Cc: Maxim Levitsky

If L1 doesn't intercept shutdown, then L1 itself gets it,
which doesn't allow it to report the error that happened.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 x86/svm.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/x86/svm.c b/x86/svm.c
index f109caa..2210d68 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -174,7 +174,9 @@ void vmcb_ident(struct vmcb *vmcb)
 	save->cr2 = read_cr2();
 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
-	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
+	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
+			  (1ULL << INTERCEPT_VMMCALL) |
+			  (1ULL << INTERCEPT_SHUTDOWN);
 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
 
-- 
2.26.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/4] few fixes for pmu_lbr test
  2021-09-14 16:30 [PATCH 0/4] AMD's LBR test for nesting and few more fixes Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 1/4] svm: add SVM_BARE_VMRUN Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 2/4] svm: intercept shutdown in all svm tests by default Maxim Levitsky
@ 2021-09-14 16:30 ` Maxim Levitsky
  2021-09-14 16:30 ` [PATCH 4/4] svm: add tests for LBR virtualization Maxim Levitsky
  3 siblings, 0 replies; 5+ messages in thread
From: Maxim Levitsky @ 2021-09-14 16:30 UTC (permalink / raw)
  To: kvm; +Cc: Maxim Levitsky

* don't run this test on AMD since AMD's LBR is not the same as Intel's LBR
and needs a different test.

* don't run this test on 32 bit as it is not built for 32 bit anyway

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 x86/pmu_lbr.c     | 8 ++++++++
 x86/unittests.cfg | 1 +
 2 files changed, 9 insertions(+)

diff --git a/x86/pmu_lbr.c b/x86/pmu_lbr.c
index 3bd9e9f..5d6c424 100644
--- a/x86/pmu_lbr.c
+++ b/x86/pmu_lbr.c
@@ -68,6 +68,12 @@ int main(int ac, char **av)
 	int max, i;
 
 	setup_vm();
+
+	if (!is_intel()) {
+		report_skip("PMU_LBR test is for intel CPU's only");
+		return 0;
+	}
+
 	perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
 	eax.full = id.a;
 
@@ -83,6 +89,8 @@ int main(int ac, char **av)
 	printf("PMU version:		 %d\n", eax.split.version_id);
 	printf("LBR version:		 %ld\n", perf_cap & PMU_CAP_LBR_FMT);
 
+
+
 	/* Look for LBR from and to MSRs */
 	lbr_from = MSR_LBR_CORE_FROM;
 	lbr_to = MSR_LBR_CORE_TO;
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index d5efab0..e3c8a98 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -180,6 +180,7 @@ extra_params = -cpu max
 check = /proc/sys/kernel/nmi_watchdog=0
 
 [pmu_lbr]
+arch = x86_64
 file = pmu_lbr.flat
 extra_params = -cpu host,migratable=no
 check = /sys/module/kvm/parameters/ignore_msrs=N
-- 
2.26.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/4] svm: add tests for LBR virtualization
  2021-09-14 16:30 [PATCH 0/4] AMD's LBR test for nesting and few more fixes Maxim Levitsky
                   ` (2 preceding siblings ...)
  2021-09-14 16:30 ` [PATCH 3/4] few fixes for pmu_lbr test Maxim Levitsky
@ 2021-09-14 16:30 ` Maxim Levitsky
  3 siblings, 0 replies; 5+ messages in thread
From: Maxim Levitsky @ 2021-09-14 16:30 UTC (permalink / raw)
  To: kvm; +Cc: Maxim Levitsky

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 lib/x86/processor.h |   1 +
 x86/svm.c           |   5 +
 x86/svm.h           |   5 +-
 x86/svm_tests.c     | 239 ++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 249 insertions(+), 1 deletion(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index f380321..6454951 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -187,6 +187,7 @@ static inline bool is_intel(void)
 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
+#define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
 
diff --git a/x86/svm.c b/x86/svm.c
index 2210d68..13f857e 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -70,6 +70,11 @@ bool vgif_supported(void)
 	return this_cpu_has(X86_FEATURE_VGIF);
 }
 
+bool lbrv_supported(void)
+{
+    return this_cpu_has(X86_FEATURE_LBRV);
+}
+
 void default_prepare(struct svm_test *test)
 {
 	vmcb_ident(vmcb);
diff --git a/x86/svm.h b/x86/svm.h
index 74ba818..7f1b031 100644
--- a/x86/svm.h
+++ b/x86/svm.h
@@ -98,7 +98,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u32 event_inj;
 	u32 event_inj_err;
 	u64 nested_cr3;
-	u64 lbr_ctl;
+	u64 virt_ext;
 	u32 clean;
 	u32 reserved_5;
 	u64 next_rip;
@@ -360,6 +360,8 @@ struct __attribute__ ((__packed__)) vmcb {
 
 #define MSR_BITMAP_SIZE 8192
 
+#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+
 struct svm_test {
 	const char *name;
 	bool (*supported)(void);
@@ -405,6 +407,7 @@ u64 *npt_get_pml4e(void);
 bool smp_supported(void);
 bool default_supported(void);
 bool vgif_supported(void);
+bool lbrv_supported(void);
 void default_prepare(struct svm_test *test);
 void default_prepare_gif_clear(struct svm_test *test);
 bool default_finished(struct svm_test *test);
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index b998b24..774a7c5 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -2978,6 +2978,240 @@ static bool vgif_check(struct svm_test *test)
 }
 
 
+
+static bool check_lbr(u64 *from_excepted, u64 *to_expected)
+{
+	u64 from = rdmsr(MSR_IA32_LASTBRANCHFROMIP);
+	u64 to = rdmsr(MSR_IA32_LASTBRANCHTOIP);
+
+	if ((u64)from_excepted != from) {
+		report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx",
+			(u64)from_excepted, from);
+		return false;
+	}
+
+	if ((u64)to_expected != to) {
+		report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx",
+			(u64)from_excepted, from);
+		return false;
+	}
+
+	return true;
+}
+
+static bool check_dbgctl(u64 dbgctl, u64 dbgctl_expected)
+{
+	if (dbgctl != dbgctl_expected) {
+		report(false, "Unexpected MSR_IA32_DEBUGCTLMSR value 0x%lx", dbgctl);
+		return false;
+	}
+	return true;
+}
+
+
+#define DO_BRANCH(branch_name) \
+	asm volatile ( \
+		# branch_name "_from:" \
+		"jmp " # branch_name  "_to\n" \
+		"nop\n" \
+		"nop\n" \
+		# branch_name  "_to:" \
+		"nop\n" \
+	)
+
+
+extern u64 guest_branch0_from, guest_branch0_to;
+extern u64 guest_branch2_from, guest_branch2_to;
+
+extern u64 host_branch0_from, host_branch0_to;
+extern u64 host_branch2_from, host_branch2_to;
+extern u64 host_branch3_from, host_branch3_to;
+extern u64 host_branch4_from, host_branch4_to;
+
+u64 dbgctl;
+
+static void svm_lbrv_test_guest1(void)
+{
+	/*
+	 * This guest expects the LBR to be already enabled when it starts,
+	 * it does a branch, and then disables the LBR and then checks.
+	 */
+
+	DO_BRANCH(guest_branch0);
+
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	if (dbgctl != DEBUGCTLMSR_LBR)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch0_from)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch0_to)
+		asm volatile("ud2\n");
+
+	asm volatile ("vmmcall\n");
+}
+
+static void svm_lbrv_test_guest2(void)
+{
+	/*
+	 * This guest expects the LBR to be disabled when it starts,
+	 * enables it, does a branch, disables it and then checks.
+	 */
+
+	DO_BRANCH(guest_branch1);
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+
+	if (dbgctl != 0)
+		asm volatile("ud2\n");
+
+	if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&host_branch2_from)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&host_branch2_to)
+		asm volatile("ud2\n");
+
+
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	DO_BRANCH(guest_branch2);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	if (dbgctl != DEBUGCTLMSR_LBR)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch2_from)
+		asm volatile("ud2\n");
+	if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch2_to)
+		asm volatile("ud2\n");
+
+	asm volatile ("vmmcall\n");
+}
+
+static void svm_lbrv_test0(void)
+{
+	report(true, "Basic LBR test");
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	DO_BRANCH(host_branch0);
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	check_dbgctl(dbgctl, DEBUGCTLMSR_LBR);
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	check_dbgctl(dbgctl, 0);
+
+	check_lbr(&host_branch0_from, &host_branch0_to);
+}
+
+static void svm_lbrv_test1(void)
+{
+	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)");
+
+	vmcb->save.rip = (ulong)svm_lbrv_test_guest1;
+	vmcb->control.virt_ext = 0;
+
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	DO_BRANCH(host_branch1);
+	SVM_BARE_VMRUN;
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+
+	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
+		report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
+		vmcb->control.exit_code);
+		return;
+	}
+
+	check_dbgctl(dbgctl, 0);
+	check_lbr(&guest_branch0_from, &guest_branch0_to);
+}
+
+static void svm_lbrv_test2(void)
+{
+	report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)");
+
+	vmcb->save.rip = (ulong)svm_lbrv_test_guest2;
+	vmcb->control.virt_ext = 0;
+
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	DO_BRANCH(host_branch2);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+	SVM_BARE_VMRUN;
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
+		report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
+		vmcb->control.exit_code);
+		return;
+	}
+
+	check_dbgctl(dbgctl, 0);
+	check_lbr(&guest_branch2_from, &guest_branch2_to);
+}
+
+static void svm_lbrv_nested_test1(void)
+{
+	if (!lbrv_supported()) {
+		report_skip("LBRV not supported in the guest");
+		return;
+	}
+
+	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)");
+	vmcb->save.rip = (ulong)svm_lbrv_test_guest1;
+	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
+	vmcb->save.dbgctl = DEBUGCTLMSR_LBR;
+
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	DO_BRANCH(host_branch3);
+	SVM_BARE_VMRUN;
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
+		report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
+		vmcb->control.exit_code);
+		return;
+	}
+
+	if (vmcb->save.dbgctl != 0) {
+		report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl);
+		return;
+	}
+
+	check_dbgctl(dbgctl, DEBUGCTLMSR_LBR);
+	check_lbr(&host_branch3_from, &host_branch3_to);
+}
+static void svm_lbrv_nested_test2(void)
+{
+	if (!lbrv_supported()) {
+		report_skip("LBRV not supported in the guest");
+		return;
+	}
+
+	report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)");
+	vmcb->save.rip = (ulong)svm_lbrv_test_guest2;
+	vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK;
+
+	vmcb->save.dbgctl = 0;
+	vmcb->save.br_from = (u64)&host_branch2_from;
+	vmcb->save.br_to = (u64)&host_branch2_to;
+
+	wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR);
+	DO_BRANCH(host_branch4);
+	SVM_BARE_VMRUN;
+	dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+	wrmsr(MSR_IA32_DEBUGCTLMSR, 0);
+
+	if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
+		report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
+		vmcb->control.exit_code);
+		return;
+	}
+
+	check_dbgctl(dbgctl, DEBUGCTLMSR_LBR);
+	check_lbr(&host_branch4_from, &host_branch4_to);
+}
+
 struct svm_test svm_tests[] = {
     { "null", default_supported, default_prepare,
       default_prepare_gif_clear, null_test,
@@ -3097,5 +3331,10 @@ struct svm_test svm_tests[] = {
     TEST(svm_vmrun_errata_test),
     TEST(svm_vmload_vmsave),
     TEST(svm_test_singlestep),
+    TEST(svm_lbrv_test0),
+    TEST(svm_lbrv_test1),
+    TEST(svm_lbrv_test2),
+    TEST(svm_lbrv_nested_test1),
+    TEST(svm_lbrv_nested_test2),
     { NULL, NULL, NULL, NULL, NULL, NULL, NULL }
 };
-- 
2.26.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-09-14 16:30 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-14 16:30 [PATCH 0/4] AMD's LBR test for nesting and few more fixes Maxim Levitsky
2021-09-14 16:30 ` [PATCH 1/4] svm: add SVM_BARE_VMRUN Maxim Levitsky
2021-09-14 16:30 ` [PATCH 2/4] svm: intercept shutdown in all svm tests by default Maxim Levitsky
2021-09-14 16:30 ` [PATCH 3/4] few fixes for pmu_lbr test Maxim Levitsky
2021-09-14 16:30 ` [PATCH 4/4] svm: add tests for LBR virtualization Maxim Levitsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.