All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH kvm-unit-tests] nSVM: Modified canonicalization tests
@ 2021-08-04 11:25 Lara Lazier
  2021-08-04 12:22 ` Paolo Bonzini
  0 siblings, 1 reply; 2+ messages in thread
From: Lara Lazier @ 2021-08-04 11:25 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, Lara Lazier

APM2 states that VMRUN and VMLOAD should canonicalize all base
addresses in the segment registers that have been loaded respectively.

Split up in test_canonicalization the TEST_CANONICAL for VMLOAD and
VMRUN. Added the respective test for KERNEL_GS.

In general the canonicalization should be from 48/57 to 64 based on LA57.

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
 lib/x86/processor.h |  4 +++-
 x86/svm_tests.c     | 54 +++++++++++++++++++++++++++++++--------------
 2 files changed, 41 insertions(+), 17 deletions(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index f4d1757..ae708ac 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -619,7 +619,9 @@ static inline void write_pkru(u32 pkru)
 
 static inline bool is_canonical(u64 addr)
 {
-	return (s64)(addr << 16) >> 16 == addr;
+	int shift_amt = (this_cpu_has(X86_FEATURE_LA57)) ? 7 /* 57 bits virtual */
+                        : 16 /* 48 bits virtual */;
+	return (s64)(addr << shift_amt) >> shift_amt == addr;
 }
 
 static inline void clear_bit(int bit, u8 *addr)
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 7c7b19d..273b80b 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -2460,32 +2460,54 @@ static void test_msrpm_iopm_bitmap_addrs(void)
 	vmcb->control.intercept = saved_intercept;
 }
 
-#define TEST_CANONICAL(seg_base, msg)					\
-	saved_addr = seg_base;						\
+#define TEST_CANONICAL_VMRUN(seg_base, msg)					\
+	saved_addr = seg_base;					\
 	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
-	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test %s.base for canonical form: %lx", msg, seg_base);							\
+	return_value = svm_vmrun(); \
+	if (is_canonical(seg_base)) { \
+		report(return_value == SVM_EXIT_VMMCALL, \
+			"Test %s.base for canonical form: %lx", msg, seg_base); \
+	} else { \
+		report(false, \
+			"Test a %s.base not canonicalized: %lx", msg, seg_base); \
+	} \
+	seg_base = saved_addr;
+
+
+#define TEST_CANONICAL_VMLOAD(seg_base, msg)					\
+	saved_addr = seg_base;					\
+	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
+	asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \
+	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \
+	report(is_canonical(seg_base), \
+			"Test %s.base for canonical form: %lx", msg, seg_base); \
 	seg_base = saved_addr;
 
 /*
  * VMRUN canonicalizes (i.e., sign-extend to bit 63) all base addresses
  • in the segment registers that have been loaded.
  */
-static void test_vmrun_canonicalization(void)
+static void test_canonicalization(void)
 {
 	u64 saved_addr;
-	u8 addr_limit = cpuid_maxphyaddr();
+	u64 return_value;
+	u64 addr_limit;
+	u64 vmcb_phys = virt_to_phys(vmcb);
+
+	addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48;
 	u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1);
 
-	TEST_CANONICAL(vmcb->save.es.base, "ES");
-	TEST_CANONICAL(vmcb->save.cs.base, "CS");
-	TEST_CANONICAL(vmcb->save.ss.base, "SS");
-	TEST_CANONICAL(vmcb->save.ds.base, "DS");
-	TEST_CANONICAL(vmcb->save.fs.base, "FS");
-	TEST_CANONICAL(vmcb->save.gs.base, "GS");
-	TEST_CANONICAL(vmcb->save.gdtr.base, "GDTR");
-	TEST_CANONICAL(vmcb->save.ldtr.base, "LDTR");
-	TEST_CANONICAL(vmcb->save.idtr.base, "IDTR");
-	TEST_CANONICAL(vmcb->save.tr.base, "TR");
+	TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS");
+	TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS");
+	TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR");
+	TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR");
+	TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS");
+	TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES");
+	TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS");
+	TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS");
+	TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS");
+	TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR");
+	TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR");
 }
 
 static void svm_guest_state_test(void)
@@ -2497,7 +2519,7 @@ static void svm_guest_state_test(void)
 	test_cr4();
 	test_dr();
 	test_msrpm_iopm_bitmap_addrs();
-	test_vmrun_canonicalization();
+	test_canonicalization();
 }
 
 static void __svm_npt_rsvd_bits_test(u64 *pxe, u64 rsvd_bits, u64 efer,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH kvm-unit-tests] nSVM: Modified canonicalization tests
  2021-08-04 11:25 [PATCH kvm-unit-tests] nSVM: Modified canonicalization tests Lara Lazier
@ 2021-08-04 12:22 ` Paolo Bonzini
  0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2021-08-04 12:22 UTC (permalink / raw)
  To: Lara Lazier, kvm

Better subject "nSVM: test canonicalization of segment bases in VMLOAD".

On 04/08/21 13:25, Lara Lazier wrote:
> APM2 states that VMRUN and VMLOAD should canonicalize all base
> addresses in the segment registers that have been loaded respectively.
> 
> Split up in test_canonicalization the TEST_CANONICAL for VMLOAD and
> VMRUN. Added the respective test for KERNEL_GS.
> 
> In general the canonicalization should be from 48/57 to 64 based on LA57.

It is correct to improve is_canonical like that, because it is not in an 
AMD-specific file so it should support virtual address width.  However, 
AMD processors do not yet support LA57; therefore, the change to 
is_canonical is not really related to nSVM.  I would split this change 
in two patches, one for processor.h and one for svm_tests.c, because of 
this reason.

> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
>   lib/x86/processor.h |  4 +++-
>   x86/svm_tests.c     | 54 +++++++++++++++++++++++++++++++--------------
>   2 files changed, 41 insertions(+), 17 deletions(-)
> 
> diff --git a/lib/x86/processor.h b/lib/x86/processor.h
> index f4d1757..ae708ac 100644
> --- a/lib/x86/processor.h
> +++ b/lib/x86/processor.h
> @@ -619,7 +619,9 @@ static inline void write_pkru(u32 pkru)
>   
>   static inline bool is_canonical(u64 addr)
>   {
> -	return (s64)(addr << 16) >> 16 == addr;
> +	int shift_amt = (this_cpu_has(X86_FEATURE_LA57)) ? 7 /* 57 bits virtual */
> +                        : 16 /* 48 bits virtual */;

You can use the virtual address width from CPUID instead of LA57:

int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
int shift_amt = 64 - va_width;

> +	return (s64)(addr << shift_amt) >> shift_amt == addr;
>   }
>   
>   static inline void clear_bit(int bit, u8 *addr)
> diff --git a/x86/svm_tests.c b/x86/svm_tests.c
> index 7c7b19d..273b80b 100644
> --- a/x86/svm_tests.c
> +++ b/x86/svm_tests.c
> @@ -2460,32 +2460,54 @@ static void test_msrpm_iopm_bitmap_addrs(void)
>   	vmcb->control.intercept = saved_intercept;
>   }
>   
> -#define TEST_CANONICAL(seg_base, msg)					\
> -	saved_addr = seg_base;						\
> +#define TEST_CANONICAL_VMRUN(seg_base, msg)					\
> +	saved_addr = seg_base;					\
>   	seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \
> -	report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test %s.base for canonical form: %lx", msg, seg_base);							\
> +	return_value = svm_vmrun(); \
> +	if (is_canonical(seg_base)) { \
> +		report(return_value == SVM_EXIT_VMMCALL, \
> +			"Test %s.base for canonical form: %lx", msg, seg_base); \
> +	} else { \
> +		report(false, \
> +			"Test a %s.base not canonicalized: %lx", msg, seg_base); \
> +	} \

You can also split the tests in two different "report"s:

report(svm_vmrun() == SVM_EXIT_VMMCALL,
        "Successful VMRUN with noncanonical %s.base", msg);
report(is_canonical(seg_base),
        "Test %s.base for canonical form: %lx", msg, seg_base);

> -	TEST_CANONICAL(vmcb->save.es.base, "ES");
> -	TEST_CANONICAL(vmcb->save.cs.base, "CS");
> -	TEST_CANONICAL(vmcb->save.ss.base, "SS");
> -	TEST_CANONICAL(vmcb->save.ds.base, "DS");
> -	TEST_CANONICAL(vmcb->save.fs.base, "FS");
> -	TEST_CANONICAL(vmcb->save.gs.base, "GS");
> -	TEST_CANONICAL(vmcb->save.gdtr.base, "GDTR");
> -	TEST_CANONICAL(vmcb->save.ldtr.base, "LDTR");
> -	TEST_CANONICAL(vmcb->save.idtr.base, "IDTR");
> -	TEST_CANONICAL(vmcb->save.tr.base, "TR");
> +	TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS");
> +	TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS");
> +	TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR");
> +	TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR");
> +	TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS");

For kernel_gs_base, we might even use VMLOAD without VMSAVE, and check 
rdmsr(MSR_KERNEL_GS_BASE).  This checks that the canonicalization is 
actually performed by VMLOAD and not VMSAVE.

(The same could be done also for gs.base using the SWAPGS instruction, 
and for fs.base if the processor has the RDFSBASE instruction, but one 
thing at a time).

But, one thing at a time.  You can keep the tests like this, and just 
fix the other things that I reported above.

Thanks!

Paolo

> +	TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES");
> +	TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS");
> +	TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS");
> +	TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS");
> +	TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR");
> +	TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR");
>   }
>   
>   static void svm_guest_state_test(void)
> @@ -2497,7 +2519,7 @@ static void svm_guest_state_test(void)
>   	test_cr4();
>   	test_dr();
>   	test_msrpm_iopm_bitmap_addrs();
> -	test_vmrun_canonicalization();
> +	test_canonicalization();
>   }
>   
>   static void __svm_npt_rsvd_bits_test(u64 *pxe, u64 rsvd_bits, u64 efer,
> 


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-08-04 12:22 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-04 11:25 [PATCH kvm-unit-tests] nSVM: Modified canonicalization tests Lara Lazier
2021-08-04 12:22 ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.