All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT
@ 2023-04-21 14:57 Martin Doucha
  2023-04-21 14:57 ` [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers Martin Doucha
                   ` (5 more replies)
  0 siblings, 6 replies; 19+ messages in thread
From: Martin Doucha @ 2023-04-21 14:57 UTC (permalink / raw)
  To: nstange, ltp

The fragmented structure of GDT and LDT requires helper functions
to work with descriptors. Add the necessary structure definitions,
constants and functions. Also increase GDT size to 32 descriptors
for later use.

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 doc/kvm-test-api.txt                    | 20 ++++++
 testcases/kernel/kvm/bootstrap_x86.S    |  3 +-
 testcases/kernel/kvm/bootstrap_x86_64.S |  3 +-
 testcases/kernel/kvm/include/kvm_x86.h  | 53 ++++++++++++++
 testcases/kernel/kvm/lib_x86.c          | 92 +++++++++++++++++++++++++
 5 files changed, 169 insertions(+), 2 deletions(-)

diff --git a/doc/kvm-test-api.txt b/doc/kvm-test-api.txt
index 25b72172d..f62819764 100644
--- a/doc/kvm-test-api.txt
+++ b/doc/kvm-test-api.txt
@@ -343,6 +343,26 @@ Developer's Manual, Volume 3, Chapter 4 for explanation of the fields.
   the known position in page table hierarchy and `entry->page_type`. Returns
   zero if the `entry` does not reference any memory page.
 
+- `void kvm_set_segment_descriptor(struct segment_descriptor *dst, uint64_t baseaddr, uint32_t limit, unsigned int flags)` -
+  Fill the `dst` segment descriptor with given values. The maximum value
+  of `limit` is `0xfffff` (inclusive) regardless of `flags`.
+
+- `void kvm_parse_segment_descriptor(struct segment_descriptor *src, uint64_t *baseaddr, uint32_t *limit, unsigned int *flags)` -
+  Parse data in the `src` segment descriptor and copy them to variables
+  pointed to by the other arguments. Any parameter except the first one can
+  be `NULL`.
+
+- `int kvm_find_free_descriptor(const struct segment_descriptor *table, size_t size)` -
+  Find the first segment descriptor in `table` which does not have
+  the `SEGFLAG_PRESENT` bit set. The function handles double-size descriptors
+  correctly. Returns index of the first available descriptor or -1 if all
+  `size` descriptors are taken.
+
+- `unsigned int kvm_create_stack_descriptor(struct segment_descriptor *table, size_t tabsize, void *stack_base)` -
+  Convenience function for registering a stack segment descriptor. It'll
+  automatically find a free slot in `table` and fill the necessary flags.
+  The `stack_base` pointer must point to the bottom of the stack.
+
 - `void kvm_get_cpuid(unsigned int eax, unsigned int ecx,
   struct kvm_cpuid *buf)` – Executes the CPUID instruction with the given
   `eax` and `ecx` arguments and stores the results in `buf`.
diff --git a/testcases/kernel/kvm/bootstrap_x86.S b/testcases/kernel/kvm/bootstrap_x86.S
index 1aaf0a4d1..3c17a2b47 100644
--- a/testcases/kernel/kvm/bootstrap_x86.S
+++ b/testcases/kernel/kvm/bootstrap_x86.S
@@ -7,6 +7,7 @@
 
 .set KVM_TEXIT, 0xff
 .set RESULT_ADDRESS, 0xfffff000
+.set KVM_GDT_SIZE, 32
 
 /*
  * This section will be allocated at address 0x1000 and
@@ -44,7 +45,7 @@ kvm_gdt:
 	.8byte 0
 	gdt32_entry type=0x1a l=0 d=1 /* Code segment protected_mode, 32bits */
 	gdt32_entry type=0x12 /* Data segment, writable */
-	.skip 16 /* Stack and TSS segment descriptors */
+	.skip (KVM_GDT_SIZE-3)*8 /* Stack, TSS and other segment descriptors */
 
 .Lgdt_end:
 .global kvm_gdt_desc
diff --git a/testcases/kernel/kvm/bootstrap_x86_64.S b/testcases/kernel/kvm/bootstrap_x86_64.S
index 0cffd5a12..3d8c49b10 100644
--- a/testcases/kernel/kvm/bootstrap_x86_64.S
+++ b/testcases/kernel/kvm/bootstrap_x86_64.S
@@ -8,6 +8,7 @@
 .set KVM_TCONF, 32
 .set KVM_TEXIT, 0xff
 .set RESULT_ADDRESS, 0xfffff000
+.set KVM_GDT_SIZE, 32
 
 /*
  * This section will be allocated at address 0x1000 and
@@ -478,7 +479,7 @@ kvm_pgtable_l4:
 kvm_gdt:
 	.8byte 0
 	gdt32_entry type=0x1a l=1 limit=0 g=0 /* Code segment long mode */
-	.skip 16 /* TSS segment descriptor */
+	.skip (KVM_GDT_SIZE-2)*8 /* TSS and other segment descriptors */
 
 .Lgdt_end:
 .global kvm_gdt_desc
diff --git a/testcases/kernel/kvm/include/kvm_x86.h b/testcases/kernel/kvm/include/kvm_x86.h
index 4f3671135..a655c9834 100644
--- a/testcases/kernel/kvm/include/kvm_x86.h
+++ b/testcases/kernel/kvm/include/kvm_x86.h
@@ -10,6 +10,9 @@
 
 #include "kvm_test.h"
 
+#define PAGESIZE 0x1000
+#define KVM_GDT_SIZE 32
+
 /* Interrupts */
 #define X86_INTR_COUNT 256
 
@@ -38,6 +41,26 @@
 #define INTR_SECURITY_ERROR 30
 
 
+/* Segment descriptor flags */
+#define SEGTYPE_LDT 0x02
+#define SEGTYPE_TSS 0x09
+#define SEGTYPE_TSS_BUSY 0x0b
+#define SEGTYPE_CALL_GATE 0x0c
+#define SEGTYPE_INTR_GATE 0x0e
+#define SEGTYPE_TRAP_GATE 0x0f
+#define SEGTYPE_RODATA 0x10
+#define SEGTYPE_RWDATA 0x12
+#define SEGTYPE_STACK 0x16
+#define SEGTYPE_CODE 0x1a
+#define SEGTYPE_MASK 0x1f
+
+#define SEGFLAG_NSYSTEM 0x10
+#define SEGFLAG_PRESENT 0x80
+#define SEGFLAG_CODE64 0x200
+#define SEGFLAG_32BIT 0x400
+#define SEGFLAG_PAGE_LIMIT 0x800
+
+
 /* CPUID constants */
 #define CPUID_GET_INPUT_RANGE 0x80000000
 #define CPUID_GET_EXT_FEATURES 0x80000001
@@ -91,6 +114,25 @@ struct intr_descriptor {
 #endif /* defined(__x86_64__) */
 } __attribute__((__packed__));
 
+struct segment_descriptor {
+	unsigned int limit_lo : 16;
+	unsigned int baseaddr_lo : 24;
+	unsigned int flags_lo : 8;
+	unsigned int limit_hi : 4;
+	unsigned int flags_hi : 4;
+	unsigned int baseaddr_hi : 8;
+} __attribute__((__packed__));
+
+struct segment_descriptor64 {
+	unsigned int limit_lo : 16;
+	unsigned int baseaddr_lo : 24;
+	unsigned int flags_lo : 8;
+	unsigned int limit_hi : 4;
+	unsigned int flags_hi : 4;
+	uint64_t baseaddr_hi : 40;
+	uint32_t reserved;
+} __attribute__((__packed__));
+
 struct page_table_entry_pae {
 	unsigned int present: 1;
 	unsigned int writable: 1;
@@ -118,10 +160,21 @@ struct kvm_cregs {
 
 extern struct page_table_entry_pae kvm_pagetable[];
 extern struct intr_descriptor kvm_idt[X86_INTR_COUNT];
+extern struct segment_descriptor kvm_gdt[KVM_GDT_SIZE];
 
 /* Page table helper functions */
 uintptr_t kvm_get_page_address_pae(const struct page_table_entry_pae *entry);
 
+/* Segment descriptor table functions */
+void kvm_set_segment_descriptor(struct segment_descriptor *dst,
+	uint64_t baseaddr, uint32_t limit, unsigned int flags);
+void kvm_parse_segment_descriptor(struct segment_descriptor *src,
+	uint64_t *baseaddr, uint32_t *limit, unsigned int *flags);
+int kvm_find_free_descriptor(const struct segment_descriptor *table,
+	size_t size);
+unsigned int kvm_create_stack_descriptor(struct segment_descriptor *table,
+	size_t tabsize, void *stack_base);
+
 /* Functions for querying CPU info and status */
 void kvm_get_cpuid(unsigned int eax, unsigned int ecx, struct kvm_cpuid *buf);
 void kvm_read_cregs(struct kvm_cregs *buf);
diff --git a/testcases/kernel/kvm/lib_x86.c b/testcases/kernel/kvm/lib_x86.c
index dc2354b10..d206072ee 100644
--- a/testcases/kernel/kvm/lib_x86.c
+++ b/testcases/kernel/kvm/lib_x86.c
@@ -110,6 +110,98 @@ uintptr_t kvm_get_page_address_pae(const struct page_table_entry_pae *entry)
 	return entry->address << 12;
 }
 
+#ifdef __x86_64__
+static void kvm_set_segment_descriptor64(struct segment_descriptor64 *dst,
+	uint64_t baseaddr, uint32_t limit, unsigned int flags)
+{
+
+	dst->baseaddr_lo = baseaddr & 0xffffff;
+	dst->baseaddr_hi = baseaddr >> 24;
+	dst->limit_lo = limit & 0xffff;
+	dst->limit_hi = limit >> 16;
+	dst->flags_lo = flags & 0xff;
+	dst->flags_hi = (flags >> 8) & 0xf;
+	dst->reserved = 0;
+}
+#endif
+
+void kvm_set_segment_descriptor(struct segment_descriptor *dst,
+	uint64_t baseaddr, uint32_t limit, unsigned int flags)
+{
+	if (limit >> 20)
+		tst_brk(TBROK, "Segment limit out of range");
+
+#ifdef __x86_64__
+	/* System descriptors have double size in 64bit mode */
+	if (!(flags & SEGFLAG_NSYSTEM)) {
+		kvm_set_segment_descriptor64((struct segment_descriptor64 *)dst,
+			baseaddr, limit, flags);
+		return;
+	}
+#endif
+
+	if (baseaddr >> 32)
+		tst_brk(TBROK, "Segment base address out of range");
+
+	dst->baseaddr_lo = baseaddr & 0xffffff;
+	dst->baseaddr_hi = baseaddr >> 24;
+	dst->limit_lo = limit & 0xffff;
+	dst->limit_hi = limit >> 16;
+	dst->flags_lo = flags & 0xff;
+	dst->flags_hi = (flags >> 8) & 0xf;
+}
+
+void kvm_parse_segment_descriptor(struct segment_descriptor *src,
+	uint64_t *baseaddr, uint32_t *limit, unsigned int *flags)
+{
+	if (baseaddr) {
+		*baseaddr = (((uint64_t)src->baseaddr_hi) << 24) |
+			src->baseaddr_lo;
+	}
+
+	if (limit)
+		*limit = (((uint32_t)src->limit_hi) << 16) | src->limit_lo;
+
+	if (flags)
+		*flags = (((uint32_t)src->flags_hi) << 8) | src->flags_lo;
+}
+
+int kvm_find_free_descriptor(const struct segment_descriptor *table,
+	size_t size)
+{
+	const struct segment_descriptor *ptr;
+	size_t i;
+
+	for (i = 0, ptr = table; i < size; i++, ptr++) {
+		if (!(ptr->flags_lo & SEGFLAG_PRESENT))
+			return i;
+
+#ifdef __x86_64__
+		/* System descriptors have double size in 64bit mode */
+		if (!(ptr->flags_lo & SEGFLAG_NSYSTEM)) {
+			ptr++;
+			i++;
+		}
+#endif
+	}
+
+	return -1;
+}
+
+unsigned int kvm_create_stack_descriptor(struct segment_descriptor *table,
+	size_t tabsize, void *stack_base)
+{
+	int ret = kvm_find_free_descriptor(table, tabsize);
+
+	if (ret < 0)
+		tst_brk(TBROK, "Descriptor table is full");
+
+	kvm_set_segment_descriptor(table + ret, 0,
+		(((uintptr_t)stack_base) - 1) >> 12, SEGTYPE_STACK |
+		SEGFLAG_PRESENT | SEGFLAG_32BIT | SEGFLAG_PAGE_LIMIT);
+	return ret;
+}
+
 void kvm_get_cpuid(unsigned int eax, unsigned int ecx, struct kvm_cpuid *buf)
 {
 	asm (
-- 
2.40.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
@ 2023-04-21 14:57 ` Martin Doucha
  2023-05-02 14:24   ` Petr Vorel
  2023-04-21 14:57 ` [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex() Martin Doucha
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 19+ messages in thread
From: Martin Doucha @ 2023-04-21 14:57 UTC (permalink / raw)
  To: nstange, ltp

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 doc/kvm-test-api.txt                    |  7 +++++++
 testcases/kernel/kvm/bootstrap_x86.S    | 19 +++++++++++++++++++
 testcases/kernel/kvm/bootstrap_x86_64.S | 16 ++++++++++++++++
 testcases/kernel/kvm/include/kvm_x86.h  |  5 +++++
 4 files changed, 47 insertions(+)

diff --git a/doc/kvm-test-api.txt b/doc/kvm-test-api.txt
index f62819764..0aede5eea 100644
--- a/doc/kvm-test-api.txt
+++ b/doc/kvm-test-api.txt
@@ -329,6 +329,10 @@ struct kvm_cpuid {
 struct kvm_cregs {
 	unsigned long cr0, cr2, cr3, cr4;
 };
+
+struct kvm_sregs {
+	uint16_t cs, ds, es, fs, gs, ss;
+};
 -------------------------------------------------------------------------------
 
 `struct page_table_entry_pae` is the page table entry structure for PAE and
@@ -370,6 +374,9 @@ Developer's Manual, Volume 3, Chapter 4 for explanation of the fields.
 - `void kvm_read_cregs(struct kvm_cregs *buf)` – Copies the current values
   of control registers to `buf`.
 
+- `void kvm_read_sregs(struct kvm_sregs *buf)` - Copies the current values
+  of segment registers to `buf`.
+
 - `uint64_t kvm_rdmsr(unsigned int msr)` – Returns the current value
   of model-specific register `msr`.
 
diff --git a/testcases/kernel/kvm/bootstrap_x86.S b/testcases/kernel/kvm/bootstrap_x86.S
index 3c17a2b47..89f73eba1 100644
--- a/testcases/kernel/kvm/bootstrap_x86.S
+++ b/testcases/kernel/kvm/bootstrap_x86.S
@@ -197,6 +197,25 @@ kvm_read_cregs:
 	pop %edi
 	ret
 
+.global kvm_read_sregs
+kvm_read_sregs:
+	push %edi
+	mov 8(%esp), %edi
+	mov %cs, %ax
+	movw %ax, (%edi)
+	mov %ds, %ax
+	movw %ax, 2(%edi)
+	mov %es, %ax
+	movw %ax, 4(%edi)
+	mov %fs, %ax
+	movw %ax, 6(%edi)
+	mov %gs, %ax
+	movw %ax, 8(%edi)
+	mov %ss, %ax
+	movw %ax, 10(%edi)
+	pop %edi
+	ret
+
 handle_interrupt:
 	/* save CPU state */
 	push %ebp
diff --git a/testcases/kernel/kvm/bootstrap_x86_64.S b/testcases/kernel/kvm/bootstrap_x86_64.S
index 3d8c49b10..e4d160b2e 100644
--- a/testcases/kernel/kvm/bootstrap_x86_64.S
+++ b/testcases/kernel/kvm/bootstrap_x86_64.S
@@ -303,6 +303,22 @@ kvm_read_cregs:
 	mov %rax, 24(%rdi)
 	retq
 
+.global kvm_read_sregs
+kvm_read_sregs:
+	mov %cs, %ax
+	movw %ax, (%rdi)
+	mov %ds, %ax
+	movw %ax, 2(%rdi)
+	mov %es, %ax
+	movw %ax, 4(%rdi)
+	mov %fs, %ax
+	movw %ax, 6(%rdi)
+	mov %gs, %ax
+	movw %ax, 8(%rdi)
+	mov %ss, %ax
+	movw %ax, 10(%rdi)
+	retq
+
 handle_interrupt:
 	/* push CPU state */
 	push %rbp
diff --git a/testcases/kernel/kvm/include/kvm_x86.h b/testcases/kernel/kvm/include/kvm_x86.h
index a655c9834..a94f05451 100644
--- a/testcases/kernel/kvm/include/kvm_x86.h
+++ b/testcases/kernel/kvm/include/kvm_x86.h
@@ -158,6 +158,10 @@ struct kvm_cregs {
 	unsigned long cr0, cr2, cr3, cr4;
 };
 
+struct kvm_sregs {
+	uint16_t cs, ds, es, fs, gs, ss;
+};
+
 extern struct page_table_entry_pae kvm_pagetable[];
 extern struct intr_descriptor kvm_idt[X86_INTR_COUNT];
 extern struct segment_descriptor kvm_gdt[KVM_GDT_SIZE];
@@ -178,6 +182,7 @@ unsigned int kvm_create_stack_descriptor(struct segment_descriptor *table,
 /* Functions for querying CPU info and status */
 void kvm_get_cpuid(unsigned int eax, unsigned int ecx, struct kvm_cpuid *buf);
 void kvm_read_cregs(struct kvm_cregs *buf);
+void kvm_read_sregs(struct kvm_sregs *buf);
 uint64_t kvm_rdmsr(unsigned int msr);
 void kvm_wrmsr(unsigned int msr, uint64_t value);
 
-- 
2.40.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex()
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
  2023-04-21 14:57 ` [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers Martin Doucha
@ 2023-04-21 14:57 ` Martin Doucha
  2023-04-26  7:57   ` Cyril Hrubis
  2023-05-02 14:26   ` Petr Vorel
  2023-04-21 14:57 ` [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM Martin Doucha
                   ` (3 subsequent siblings)
  5 siblings, 2 replies; 19+ messages in thread
From: Martin Doucha @ 2023-04-21 14:57 UTC (permalink / raw)
  To: nstange, ltp

Contrary to the C standard, (x >> 64) is equivalent to (x >> 0) on x86.
This can cause infinite loop in ptr2hex() if the highest nibble
in the second argument is non-zero. Use temporary variable to avoid
bit-shifting by large values.

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 testcases/kernel/kvm/lib_guest.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/testcases/kernel/kvm/lib_guest.c b/testcases/kernel/kvm/lib_guest.c
index d237293fc..d3b2ac3d5 100644
--- a/testcases/kernel/kvm/lib_guest.c
+++ b/testcases/kernel/kvm/lib_guest.c
@@ -82,7 +82,7 @@ char *ptr2hex(char *dest, uintptr_t val)
 	uintptr_t tmp;
 	char *ret = dest;
 
-	for (i = 4; val >> i; i += 4)
+	for (i = 4, tmp = val >> 4; tmp; i += 4, tmp >>= 4)
 		;
 
 	do {
-- 
2.40.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
  2023-04-21 14:57 ` [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers Martin Doucha
  2023-04-21 14:57 ` [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex() Martin Doucha
@ 2023-04-21 14:57 ` Martin Doucha
  2023-05-02 14:29   ` Petr Vorel
  2023-04-21 14:57 ` [LTP] [PATCH 5/5] Add test for CVE 2021-3653 Martin Doucha
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 19+ messages in thread
From: Martin Doucha @ 2023-04-21 14:57 UTC (permalink / raw)
  To: nstange, ltp

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 doc/kvm-test-api.txt                       |  73 ++++++++++
 testcases/kernel/kvm/bootstrap_x86.S       |  79 +++++++++++
 testcases/kernel/kvm/bootstrap_x86_64.S    |  89 ++++++++++++
 testcases/kernel/kvm/include/kvm_x86.h     |  14 ++
 testcases/kernel/kvm/include/kvm_x86_svm.h | 157 +++++++++++++++++++++
 testcases/kernel/kvm/lib_x86.c             | 155 +++++++++++++++++++-
 6 files changed, 566 insertions(+), 1 deletion(-)
 create mode 100644 testcases/kernel/kvm/include/kvm_x86_svm.h

diff --git a/doc/kvm-test-api.txt b/doc/kvm-test-api.txt
index 0aede5eea..812e12b38 100644
--- a/doc/kvm-test-api.txt
+++ b/doc/kvm-test-api.txt
@@ -390,6 +390,79 @@ Developer's Manual, Volume 3, Chapter 4 for explanation of the fields.
 See Intel(R) 64 and IA-32 Architectures Software Developer's Manual
 for documentation of standard and model-specific x86 registers.
 
+3.5 AMD SVM helper functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`#include "kvm_test.h"` +
+`#include "kvm_x86.h"` +
+`#include "kvm_x86_svm.h"`
+
+The KVM guest library provides basic helper functions for creating and running
+nested virtual machines using the AMD SVM technology.
+
+.Example code to execute nested VM
+[source,c]
+-------------------------------------------------------------------------------
+int guest_main(void)
+{
+	...
+	return 0;
+}
+
+void main(void)
+{
+	struct kvm_svm_vcpu *vm;
+
+	kvm_init_svm();
+	vm = kvm_create_svm_vcpu(guest_main, 1);
+	kvm_svm_vmrun(vm);
+}
+-------------------------------------------------------------------------------
+
+- `int kvm_is_svm_supported(void)` - Returns non-zero value if the CPU
+  supports AMD SVM, otherwise returns 0.
+
+- `int kvm_get_svm_state(void)` - Returns non-zero value if SVM is currently
+  enabled, otherwise returns 0.
+
+- `void kvm_set_svm_state(int enabled)` - Enable or disable SVM according
+  to argument. If SVM is disabled by host or not supported, the test will exit
+  with `TCONF`.
+
+- `void kvm_init_svm(void)` - Enable and fully initialize SVM, including
+  allocating and setting up host save area VMCB. If SVM is disabled by host or
+  not supported, the test will exit with `TCONF`.
+
+- `struct kvm_vmcb *kvm_alloc_vmcb(void)` - Allocate new VMCB structure
+  with correct memory alignment and fill it with zeroes.
+
+- `void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id, unsigned int state)` -
+  Set SVM intercept bit `id` to given `state`.
+
+- `void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss, void *rsp, int (*guest_main)(void))` -
+  Initialize new SVM virtual machine. The `asid` parameter is the nested
+  page table ID. The `ss` and `rsp` parameters set the stack segment and stack
+  pointer values, respectively. The `guest_main` parameter sets the code entry
+  point of the virtual machine. All control registers, segment registers
+  (except stack segment register), GDTR and IDTR will be copied
+  from the current CPU state.
+
+- `struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void), int alloc_stack)` -
+  Convenience function for allocating and initializing new SVM virtual CPU.
+  The `guest_main` parameter is passed to `kvm_init_guest_vmcb()`,
+  the `alloc_stack` parameter controls whether a new 8KB stack will be
+  allocated and registered in GDT. Interception will be enabled for `VMSAVE`
+  and `HLT` instructions. If you set `alloc_stack` to zero, you must configure
+  the stack segment register and stack pointer manually.
+
+- `void kvm_svm_vmrun(struct kvm_svm_vcpu *cpu)` - Start or continue execution
+  of a nested virtual machine. Beware that FPU state is not saved.  Do not use
+  floating point types or values in nested guest code. Also do not use
+  `tst_res()` or `tst_brk()` functions in nested guest code.
+
+See AMD64 Architecture Programmer's Manual Volume 2 for documentation
+of the Secure Virtual Machine (SVM) technology.
+
 4. KVM guest environment
 ------------------------
 
diff --git a/testcases/kernel/kvm/bootstrap_x86.S b/testcases/kernel/kvm/bootstrap_x86.S
index 89f73eba1..a39c6bea7 100644
--- a/testcases/kernel/kvm/bootstrap_x86.S
+++ b/testcases/kernel/kvm/bootstrap_x86.S
@@ -9,6 +9,8 @@
 .set RESULT_ADDRESS, 0xfffff000
 .set KVM_GDT_SIZE, 32
 
+.set MSR_VM_HSAVE_PA, 0xc0010117
+
 /*
  * This section will be allocated at address 0x1000 and
  * jumped to from the reset stub provided by kvm_run.
@@ -351,6 +353,83 @@ kvm_yield:
 	hlt
 	ret
 
+.global kvm_svm_guest_entry
+kvm_svm_guest_entry:
+	call *%eax
+1:	hlt
+	jmp 1b
+
+.global kvm_svm_vmrun
+kvm_svm_vmrun:
+	push %edi
+	mov 8(%esp), %edi
+	push %ebx
+	push %esi
+	push %ebp
+
+	clgi
+
+	/* Save full host state */
+	movl $MSR_VM_HSAVE_PA, %ecx
+	rdmsr
+	vmsave
+	push %eax
+
+	/* Load guest registers */
+	push %edi
+	movl (%edi), %eax
+	/* %eax is loaded by vmrun from VMCB */
+	movl 0x0c(%edi), %ebx
+	movl 0x14(%edi), %ecx
+	movl 0x1c(%edi), %edx
+	movl 0x2c(%edi), %esi
+	movl 0x34(%edi), %ebp
+	/* %esp is loaded by vmrun from VMCB */
+	movl 0x24(%edi), %edi
+
+	vmload
+	vmrun
+	vmsave
+
+	/* Clear guest register buffer */
+	push %edi
+	push %ecx
+	movl 8(%esp), %edi
+	addl $4, %edi
+	xorl %eax, %eax
+	mov $32, %ecx
+	pushfl
+	cld
+	rep stosl
+	popfl
+
+	/* Save guest registers */
+	pop %ecx
+	pop %eax
+	pop %edi
+	movl %ebx, 0x0c(%edi)
+	movl %ecx, 0x14(%edi)
+	movl %edx, 0x1c(%edi)
+	movl %eax, 0x24(%edi)
+	movl %esi, 0x2c(%edi)
+	movl %ebp, 0x34(%edi)
+	/* Copy %eax and %esp from VMCB */
+	movl (%edi), %esi
+	movl 0x5f8(%esi), %eax
+	movl %eax, 0x04(%edi)
+	movl 0x5d8(%esi), %eax
+	movl %eax, 0x3c(%edi)
+
+	pop %eax
+	vmload
+	stgi
+
+	pop %ebp
+	pop %esi
+	pop %ebx
+	pop %edi
+	ret
+
 
 .section .bss.pgtables, "aw", @nobits
 .global kvm_pagetable
diff --git a/testcases/kernel/kvm/bootstrap_x86_64.S b/testcases/kernel/kvm/bootstrap_x86_64.S
index e4d160b2e..b02dd4d92 100644
--- a/testcases/kernel/kvm/bootstrap_x86_64.S
+++ b/testcases/kernel/kvm/bootstrap_x86_64.S
@@ -10,6 +10,8 @@
 .set RESULT_ADDRESS, 0xfffff000
 .set KVM_GDT_SIZE, 32
 
+.set MSR_VM_HSAVE_PA, 0xc0010117
+
 /*
  * This section will be allocated at address 0x1000 and
  * jumped to from the reset stub provided by kvm_run.
@@ -474,6 +476,93 @@ kvm_yield:
 	hlt
 	ret
 
+.global kvm_svm_guest_entry
+kvm_svm_guest_entry:
+	call *%rax
+1:	hlt
+	jmp 1b
+
+.global kvm_svm_vmrun
+kvm_svm_vmrun:
+	pushq %rbx
+	pushq %rbp
+	pushq %r12
+	pushq %r13
+	pushq %r14
+	pushq %r15
+
+	clgi
+
+	/* Save full host state */
+	movq $MSR_VM_HSAVE_PA, %rcx
+	rdmsr
+	shlq $32, %rdx
+	orq %rdx, %rax
+	vmsave
+	pushq %rax
+
+	/* Load guest registers */
+	pushq %rdi
+	movq (%rdi), %rax
+	/* %rax is loaded by vmrun from VMCB */
+	movq 0x10(%rdi), %rbx
+	movq 0x18(%rdi), %rcx
+	movq 0x20(%rdi), %rdx
+	movq 0x30(%rdi), %rsi
+	movq 0x38(%rdi), %rbp
+	/* %rsp is loaded by vmrun from VMCB */
+	movq 0x48(%rdi), %r8
+	movq 0x50(%rdi), %r9
+	movq 0x58(%rdi), %r10
+	movq 0x60(%rdi), %r11
+	movq 0x68(%rdi), %r12
+	movq 0x70(%rdi), %r13
+	movq 0x78(%rdi), %r14
+	movq 0x80(%rdi), %r15
+	movq 0x28(%rdi), %rdi
+
+	vmload
+	vmrun
+	vmsave
+
+	/* Save guest registers */
+	movq %rdi, %rax
+	popq %rdi
+	movq %rbx, 0x10(%rdi)
+	movq %rcx, 0x18(%rdi)
+	movq %rdx, 0x20(%rdi)
+	/* %rax contains guest %rdi */
+	movq %rax, 0x28(%rdi)
+	movq %rsi, 0x30(%rdi)
+	movq %rbp, 0x38(%rdi)
+	movq %r8,  0x48(%rdi)
+	movq %r9,  0x50(%rdi)
+	movq %r10, 0x58(%rdi)
+	movq %r11, 0x60(%rdi)
+	movq %r12, 0x68(%rdi)
+	movq %r13, 0x70(%rdi)
+	movq %r14, 0x78(%rdi)
+	movq %r15, 0x80(%rdi)
+	/* copy guest %rax and %rsp from VMCB*/
+	movq (%rdi), %rsi
+	movq 0x5f8(%rsi), %rax
+	movq %rax, 0x08(%rdi)
+	movq 0x5d8(%rsi), %rax
+	movq %rax, 0x40(%rdi)
+
+	/* Reload host state */
+	popq %rax
+	vmload
+
+	stgi
+
+	popq %r15
+	popq %r14
+	popq %r13
+	popq %r12
+	popq %rbp
+	popq %rbx
+	retq
 
 .section .bss.pgtables, "aw", @nobits
 .global kvm_pagetable
diff --git a/testcases/kernel/kvm/include/kvm_x86.h b/testcases/kernel/kvm/include/kvm_x86.h
index a94f05451..bc36c0e0f 100644
--- a/testcases/kernel/kvm/include/kvm_x86.h
+++ b/testcases/kernel/kvm/include/kvm_x86.h
@@ -64,16 +64,25 @@
 /* CPUID constants */
 #define CPUID_GET_INPUT_RANGE 0x80000000
 #define CPUID_GET_EXT_FEATURES 0x80000001
+#define CPUID_GET_SVM_FEATURES 0x8000000a
 
 
 /* Model-specific CPU register constants */
 #define MSR_EFER 0xc0000080
+#define MSR_VM_CR 0xc0010114
+#define MSR_VM_HSAVE_PA 0xc0010117
 
 #define EFER_SCE (1 << 0)	/* SYSCALL/SYSRET instructions enabled */
 #define EFER_LME (1 << 8)	/* CPU is running in 64bit mode */
 #define EFER_LMA (1 << 10)	/* CPU uses 64bit memory paging (read-only) */
 #define EFER_NXE (1 << 11)	/* Execute disable bit active */
+#define EFER_SVME (1 << 12)	/* AMD SVM instructions enabled */
 
+#define VM_CR_DPD (1 << 0)
+#define VM_CR_R_INIT (1 << 1)
+#define VM_CR_DIS_A20M (1 << 2)
+#define VM_CR_LOCK (1 << 3)
+#define VM_CR_SVMDIS (1 << 4)
 
 /* Control register constants */
 #define CR4_VME (1 << 0)
@@ -162,6 +171,11 @@ struct kvm_sregs {
 	uint16_t cs, ds, es, fs, gs, ss;
 };
 
+struct kvm_regs64 {
+	uint64_t rax, rbx, rcx, rdx, rdi, rsi, rbp, rsp;
+	uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
+};
+
 extern struct page_table_entry_pae kvm_pagetable[];
 extern struct intr_descriptor kvm_idt[X86_INTR_COUNT];
 extern struct segment_descriptor kvm_gdt[KVM_GDT_SIZE];
diff --git a/testcases/kernel/kvm/include/kvm_x86_svm.h b/testcases/kernel/kvm/include/kvm_x86_svm.h
new file mode 100644
index 000000000..965d1e716
--- /dev/null
+++ b/testcases/kernel/kvm/include/kvm_x86_svm.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 SUSE LLC <mdoucha@suse.cz>
+ *
+ * x86-specific KVM helper functions and structures for AMD SVM
+ */
+
+#ifndef KVM_X86_SVM_H_
+#define KVM_X86_SVM_H_
+
+#include "kvm_x86.h"
+
+/* CPUID_GET_SVM_FEATURES flags returned in EDX */
+#define SVM_CPUID_NESTED_PAGING (1 << 0)
+#define SVM_CPUID_LBR_VIRT (1 << 1)
+#define SVM_CPUID_LOCK (1 << 2)
+#define SVM_CPUID_NRIP_SAVE (1 << 3)
+#define SVM_CPUID_TSC_RATE_MSR (1 << 4)
+#define SVM_CPUID_VMCB_CLEAN (1 << 5)
+#define SVM_CPUID_FLUSH_ASID (1 << 6)
+#define SVM_CPUID_DECODE_ASSIST (1 << 7)
+#define SVM_CPUID_PAUSE_FILTER (1 << 10)
+#define SVM_CPUID_PAUSE_THRESHOLD (1 << 12)
+#define SVM_CPUID_AVIC (1 << 13)
+#define SVM_CPUID_VMSAVE_VIRT (1 << 15)
+#define SVM_CPUID_VGIF (1 << 16)
+#define SVM_CPUID_GMET (1 << 17)
+#define SVM_CPUID_X2AVIC (1 << 18)
+#define SVM_CPUID_SSSCHECK (1 << 19)
+#define SVM_CPUID_SPEC_CTRL (1 << 20)
+#define SVM_CPUID_ROGPT (1 << 21)
+#define SVM_CPUID_HOST_MCE_OVERRIDE (1 << 23)
+#define SVM_CPUID_TLBI_CTL (1 << 24)
+#define SVM_CPUID_NMI_VIRT (1 << 25)
+#define SVM_CPUID_IBS_VIRT (1 << 26)
+
+/* SVM event intercept IDs */
+#define SVM_INTERCEPT_HLT 0x78
+#define SVM_INTERCEPT_VMRUN 0x80
+#define SVM_INTERCEPT_MAX 0x95
+
+/* SVM vmrun exit codes */
+#define SVM_EXIT_HLT 0x78
+#define SVM_EXIT_AVIC_NOACCEL 0x402
+#define SVM_EXIT_INVALID ((uint64_t)-1)
+
+/* SVM VMCB flags */
+#define SVM_INTR_AVIC (1 << 7)
+
+struct kvm_vmcb_descriptor {
+	uint16_t selector;
+	uint16_t attrib;
+	uint32_t limit;
+	uint64_t base;
+};
+
+struct kvm_vmcb {
+	/* VMCB control area */
+	uint8_t intercepts[20];
+	uint8_t reserved1[44];
+	uint64_t iopm_base_addr;
+	uint64_t msrpm_base_addr;
+	uint64_t tsc_offset;
+	uint32_t guest_asid;
+	uint32_t tlb_control;
+	uint8_t virtual_tpr;
+	uint8_t virtual_irq;
+	unsigned char virt_intr_prio: 4;
+	unsigned char virt_ignore_tpr: 4;
+	uint8_t virt_intr_ctl;
+	uint8_t virt_intr_vector;
+	uint8_t reserved2[3];
+	uint64_t interrupt_shadow;
+	uint64_t exitcode;
+	uint64_t exitinfo1;
+	uint64_t exitinfo2;
+	uint64_t exit_int_info;
+	uint64_t enable_nested_paging;
+	uint64_t avic_bar;
+	uint64_t ghcb_gpa;
+	uint64_t event_injection;
+	uint64_t nested_cr3;
+	uint64_t virt_ext;
+	uint32_t vmcb_clean;
+	uint8_t reserved3[4];
+	uint64_t next_rip;
+	uint8_t instr_len;
+	uint8_t instr_bytes[15];
+	uint64_t avic_backing_page;
+	uint8_t reserved4[8];
+	uint64_t avic_logical_ptr;
+	uint64_t avic_physical_ptr;
+	uint8_t reserved5[8];
+	uint64_t vmsa_pa;
+	uint64_t vmgexit_rax;
+	uint8_t vmgexit_cpl;
+	uint8_t reserved6[0x2e7];
+
+	/* VMCB state save area */
+	struct kvm_vmcb_descriptor es, cs, ss, ds, fs, gs;
+	struct kvm_vmcb_descriptor gdtr, ldtr, idtr, tr;
+	uint8_t reserved7[43];
+	uint8_t cpl;
+	uint8_t reserved8[4];
+	uint64_t efer;
+	uint8_t reserved9[112];
+	uint64_t cr4;
+	uint64_t cr3;
+	uint64_t cr0;
+	uint64_t dr7;
+	uint64_t dr6;
+	uint64_t rflags;
+	uint64_t rip;
+	uint8_t reserved10[88];
+	uint64_t rsp;
+	uint64_t s_cet;
+	uint64_t ssp;
+	uint64_t isst_addr;
+	uint64_t rax;
+	uint64_t star;
+	uint64_t lstar;
+	uint64_t cstar;
+	uint64_t sfmask;
+	uint64_t kernel_gs_base;
+	uint64_t sysenter_cs;
+	uint64_t sysenter_esp;
+	uint64_t sysenter_eip;
+	uint64_t cr2;
+	uint8_t reserved11[32];
+	uint64_t guest_pat;
+	uint8_t padding[0x990];
+};
+
+struct kvm_svm_vcpu {
+	struct kvm_vmcb *vmcb;
+	struct kvm_regs64 regs;
+};
+
+/* AMD SVM virtualization helper functions */
+int kvm_is_svm_supported(void);
+int kvm_get_svm_state(void);
+void kvm_set_svm_state(int enabled);
+
+void kvm_init_svm(void);	/* Fully initialize host SVM environment */
+struct kvm_vmcb *kvm_alloc_vmcb(void);
+void kvm_vmcb_copy_gdt_descriptor(struct kvm_vmcb_descriptor *dst,
+	unsigned int gdt_id);
+void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id,
+	unsigned int state);
+void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss,
+	void *rsp, int (*guest_main)(void));
+struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void),
+	int alloc_stack);
+
+void kvm_svm_vmrun(struct kvm_svm_vcpu *cpu);
+
+#endif /* KVM_X86_SVM_H_ */
diff --git a/testcases/kernel/kvm/lib_x86.c b/testcases/kernel/kvm/lib_x86.c
index d206072ee..3e6656f11 100644
--- a/testcases/kernel/kvm/lib_x86.c
+++ b/testcases/kernel/kvm/lib_x86.c
@@ -5,7 +5,9 @@
  * x86-specific KVM helper functions
  */
 
-#include "kvm_x86.h"
+#include "kvm_x86_svm.h"
+
+void kvm_svm_guest_entry(void);
 
 struct kvm_interrupt_frame {
 	uintptr_t eip, cs, eflags, esp, ss;
@@ -240,3 +242,154 @@ uintptr_t kvm_get_interrupt_ip(const struct kvm_interrupt_frame *ifrm)
 {
 	return ifrm->eip;
 }
+
+int kvm_is_svm_supported(void)
+{
+	struct kvm_cpuid buf;
+
+	kvm_get_cpuid(CPUID_GET_INPUT_RANGE, 0, &buf);
+
+	if (buf.eax < CPUID_GET_EXT_FEATURES)
+		return 0;
+
+	kvm_get_cpuid(CPUID_GET_EXT_FEATURES, 0, &buf);
+	return buf.ecx & 0x4;
+}
+
+int kvm_get_svm_state(void)
+{
+	return kvm_rdmsr(MSR_EFER) & EFER_SVME;
+}
+
+void kvm_set_svm_state(int enabled)
+{
+	uint64_t value;
+
+	if (!kvm_is_svm_supported())
+		tst_brk(TCONF, "CPU does not support SVM");
+
+	if (kvm_rdmsr(MSR_VM_CR) & VM_CR_SVMDIS)
+		tst_brk(TCONF, "SVM is supported but disabled");
+
+	value = kvm_rdmsr(MSR_EFER);
+
+	if (enabled)
+		value |= EFER_SVME;
+	else
+		value &= ~EFER_SVME;
+
+	kvm_wrmsr(MSR_EFER, value);
+}
+
+struct kvm_vmcb *kvm_alloc_vmcb(void)
+{
+	struct kvm_vmcb *ret;
+
+	ret = tst_heap_alloc_aligned(sizeof(struct kvm_vmcb), PAGESIZE);
+	memset(ret, 0, sizeof(struct kvm_vmcb));
+	return ret;
+}
+
+void kvm_init_svm(void)
+{
+	kvm_set_svm_state(1);
+	kvm_wrmsr(MSR_VM_HSAVE_PA, (uintptr_t)kvm_alloc_vmcb());
+}
+
+void kvm_vmcb_copy_gdt_descriptor(struct kvm_vmcb_descriptor *dst,
+	unsigned int gdt_id)
+{
+	uint64_t baseaddr;
+	uint32_t limit;
+	unsigned int flags;
+
+	if (gdt_id >= KVM_GDT_SIZE)
+		tst_brk(TBROK, "GDT descriptor ID out of range");
+
+	kvm_parse_segment_descriptor(kvm_gdt + gdt_id, &baseaddr, &limit,
+		&flags);
+
+	if (!(flags & SEGFLAG_PRESENT)) {
+		memset(dst, 0, sizeof(struct kvm_vmcb_descriptor));
+		return;
+	}
+
+	if (flags & SEGFLAG_PAGE_LIMIT)
+		limit = (limit << 12) | 0xfff;
+
+	dst->selector = gdt_id << 3;
+	dst->attrib = flags;
+	dst->limit = limit;
+	dst->base = baseaddr;
+}
+
+void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id,
+	unsigned int state)
+{
+	unsigned int addr = id / 8, bit = 1 << (id % 8);
+
+	if (id >= SVM_INTERCEPT_MAX)
+		tst_brk(TBROK, "Invalid SVM intercept ID");
+
+	if (state)
+		vmcb->intercepts[addr] |= bit;
+	else
+		vmcb->intercepts[addr] &= ~bit;
+}
+
+void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss,
+	void *rsp, int (*guest_main)(void))
+{
+	struct kvm_cregs cregs;
+	struct kvm_sregs sregs;
+
+	kvm_read_cregs(&cregs);
+	kvm_read_sregs(&sregs);
+
+	kvm_vmcb_set_intercept(vmcb, SVM_INTERCEPT_VMRUN, 1);
+	kvm_vmcb_set_intercept(vmcb, SVM_INTERCEPT_HLT, 1);
+
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->es, sregs.es >> 3);
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->cs, sregs.cs >> 3);
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->ss, ss);
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->ds, sregs.ds >> 3);
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->fs, sregs.fs >> 3);
+	kvm_vmcb_copy_gdt_descriptor(&vmcb->gs, sregs.gs >> 3);
+	vmcb->gdtr.base = (uintptr_t)kvm_gdt;
+	vmcb->gdtr.limit = (KVM_GDT_SIZE*sizeof(struct segment_descriptor)) - 1;
+	vmcb->idtr.base = (uintptr_t)kvm_idt;
+	vmcb->idtr.limit = (X86_INTR_COUNT*sizeof(struct intr_descriptor)) - 1;
+
+	vmcb->guest_asid = asid;
+	vmcb->efer = kvm_rdmsr(MSR_EFER);
+	vmcb->cr0 = cregs.cr0;
+	vmcb->cr3 = cregs.cr3;
+	vmcb->cr4 = cregs.cr4;
+	vmcb->rip = (uintptr_t)kvm_svm_guest_entry;
+	vmcb->rax = (uintptr_t)guest_main;
+	vmcb->rsp = (uintptr_t)rsp;
+	vmcb->rflags = 0x200;	/* Interrupts enabled */
+}
+
+struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void),
+	int alloc_stack)
+{
+	uint16_t ss = 0;
+	char *stack = NULL;
+	struct kvm_vmcb *vmcb;
+	struct kvm_svm_vcpu *ret;
+
+	vmcb = kvm_alloc_vmcb();
+
+	if (alloc_stack) {
+		stack = tst_heap_alloc_aligned(2 * PAGESIZE, PAGESIZE);
+		ss = kvm_create_stack_descriptor(kvm_gdt, KVM_GDT_SIZE, stack);
+		stack += 2 * PAGESIZE;
+	}
+
+	kvm_init_guest_vmcb(vmcb, 1, ss, stack, guest_main);
+	ret = tst_heap_alloc(sizeof(struct kvm_svm_vcpu));
+	memset(ret, 0, sizeof(struct kvm_svm_vcpu));
+	ret->vmcb = vmcb;
+	return ret;
+}
-- 
2.40.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [LTP] [PATCH 5/5] Add test for CVE 2021-3653
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
                   ` (2 preceding siblings ...)
  2023-04-21 14:57 ` [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM Martin Doucha
@ 2023-04-21 14:57 ` Martin Doucha
  2023-04-24  9:09   ` Martin Doucha
  2023-04-25 12:48   ` Petr Vorel
  2023-04-26  8:08 ` [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Cyril Hrubis
  2023-05-02 14:24 ` Petr Vorel
  5 siblings, 2 replies; 19+ messages in thread
From: Martin Doucha @ 2023-04-21 14:57 UTC (permalink / raw)
  To: nstange, ltp

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 testcases/kernel/kvm/kvm_svm01.c | 123 +++++++++++++++++++++++++++++++
 1 file changed, 123 insertions(+)
 create mode 100644 testcases/kernel/kvm/kvm_svm01.c

diff --git a/testcases/kernel/kvm/kvm_svm01.c b/testcases/kernel/kvm/kvm_svm01.c
new file mode 100644
index 000000000..32d15526b
--- /dev/null
+++ b/testcases/kernel/kvm/kvm_svm01.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 SUSE LLC
+ * Author: Nicolai Stange <nstange@suse.de>
+ * LTP port: Martin Doucha <mdoucha@suse.cz>
+ */
+
+/*\
+ * CVE 2021-3653
+ *
+ * Check that KVM either blocks enabling virtual interrupt controller (AVIC)
+ * in nested VMs or correctly sets up the required memory address translation.
+ * If AVIC is enabled without address translation in the host kernel,
+ * the nested VM will be able to read and write an arbitraty physical memory
+ * page specified by the parent VM. Unauthorized memory access fixed in:
+ *
+ *  commit 0f923e07124df069ba68d8bb12324398f4b6b709
+ *  Author: Maxim Levitsky <mlevitsk@redhat.com>
+ *  Date:   Thu Jul 15 01:56:24 2021 +0300
+ *
+ *  KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
+ */
+
+#include "kvm_test.h"
+
+#ifdef COMPILE_PAYLOAD
+#if defined(__i386__) || defined(__x86_64__)
+
+#include "kvm_x86_svm.h"
+
+#define AVIC_REG_ADDR 0x280
+#define AVIC_TEST_VAL 0xec
+#define AVIC_READ_FAIL 0x12ead
+
+#define AVIC_INFO_MASK ((1ULL << 32) | 0xff0)
+#define AVIC_INFO_EXP ((1ULL << 32) | AVIC_REG_ADDR)
+
+static uint32_t * const avic_ptr = (uint32_t *)AVIC_REG_ADDR;
+
+static int guest_main(void)
+{
+	if (*avic_ptr != 0xaaaaaaaa)
+		return AVIC_READ_FAIL;
+
+	*avic_ptr = AVIC_TEST_VAL;
+	return 0;
+}
+
+void main(void)
+{
+	struct kvm_svm_vcpu *vcpu;
+
+	kvm_init_svm();
+	vcpu = kvm_create_svm_vcpu(guest_main, 1);
+
+	/*
+	 * Enable AVIC and set both the AVIC base address (where the nested VM
+	 * will write) and backing page address (where the parent VM expects
+	 * to see the changes) to 0
+	 */
+	vcpu->vmcb->virt_intr_ctl |= SVM_INTR_AVIC;
+	vcpu->vmcb->avic_backing_page = 0;
+	vcpu->vmcb->avic_bar = 0;
+	memset((void *)8, 0xaa, PAGESIZE - 8);
+
+	/* Write into AVIC backing page in the nested VM */
+	kvm_svm_vmrun(vcpu);
+
+	switch (vcpu->vmcb->exitcode) {
+	case SVM_EXIT_HLT:
+		if (vcpu->vmcb->rax == AVIC_READ_FAIL) {
+			tst_res(TFAIL, "Nested VM can read host memory");
+			return;
+		}
+
+		if (vcpu->vmcb->rax)
+			tst_brk(TBROK, "Unexpected guest_main() return value");
+
+		break;
+
+	case SVM_EXIT_AVIC_NOACCEL:
+		if ((vcpu->vmcb->exitinfo1 & AVIC_INFO_MASK) == AVIC_INFO_EXP) {
+			tst_res(TPASS, "AVIC register write caused VMEXIT");
+			break;
+		}
+
+		/* unexpected exit, fall through */
+
+	default:
+		tst_brk(TBROK, "Nested VM exited unexpectedly");
+	}
+
+	if (*avic_ptr != AVIC_TEST_VAL) {
+		tst_res(TFAIL, "Write into AVIC ESR redirected to host memory");
+		return;
+	}
+
+	tst_res(TPASS, "Writes into AVIC backing page were not redirected");
+}
+
+#else /* defined(__i386__) || defined(__x86_64__) */
+TST_TEST_TCONF("Test supported only on x86");
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#else /* COMPILE_PAYLOAD */
+
+static struct tst_test test = {
+	.test_all = tst_kvm_run,
+	.setup = tst_kvm_setup,
+	.cleanup = tst_kvm_cleanup,
+	.supported_archs = (const char *const []) {
+		"x86_64",
+		"x86",
+		NULL
+	},
+	.tags = (struct tst_tag[]){
+		{"linux-git", "0f923e07124d"},
+		{"CVE", "2021-3653"},
+		{}
+	}
+};
+
+#endif /* COMPILE_PAYLOAD */
-- 
2.40.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 5/5] Add test for CVE 2021-3653
  2023-04-21 14:57 ` [LTP] [PATCH 5/5] Add test for CVE 2021-3653 Martin Doucha
@ 2023-04-24  9:09   ` Martin Doucha
  2023-05-02 14:32     ` Petr Vorel
  2023-04-25 12:48   ` Petr Vorel
  1 sibling, 1 reply; 19+ messages in thread
From: Martin Doucha @ 2023-04-24  9:09 UTC (permalink / raw)
  To: ltp

Sorry, I forgot to update .gitignore and add the test to the KVM 
runfile. I'll send a v2 for the last patch either when the first four 
patches get merged or together with any other fixes.

-- 
Martin Doucha   mdoucha@suse.cz
QA Engineer for Software Maintenance
SUSE LINUX, s.r.o.
CORSO IIa
Krizikova 148/34
186 00 Prague 8
Czech Republic


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 5/5] Add test for CVE 2021-3653
  2023-04-21 14:57 ` [LTP] [PATCH 5/5] Add test for CVE 2021-3653 Martin Doucha
  2023-04-24  9:09   ` Martin Doucha
@ 2023-04-25 12:48   ` Petr Vorel
  1 sibling, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-04-25 12:48 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi Martin,

> +/*\
> + * CVE 2021-3653
> + *
> + * Check that KVM either blocks enabling virtual interrupt controller (AVIC)
> + * in nested VMs or correctly sets up the required memory address translation.
> + * If AVIC is enabled without address translation in the host kernel,
> + * the nested VM will be able to read and write an arbitraty physical memory
> + * page specified by the parent VM. Unauthorized memory access fixed in:
> + *
> + *  commit 0f923e07124df069ba68d8bb12324398f4b6b709
> + *  Author: Maxim Levitsky <mlevitsk@redhat.com>
> + *  Date:   Thu Jul 15 01:56:24 2021 +0300
> + *
> + *  KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
very nit: I'd prefer just:
0f923e07124d ("KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)")

which looks better in docparse generated output.

Kind regards,
Petr


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex()
  2023-04-21 14:57 ` [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex() Martin Doucha
@ 2023-04-26  7:57   ` Cyril Hrubis
  2023-04-26  8:09     ` Martin Doucha
  2023-05-02 14:26   ` Petr Vorel
  1 sibling, 1 reply; 19+ messages in thread
From: Cyril Hrubis @ 2023-04-26  7:57 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi!
> Contrary to the C standard, (x >> 64) is equivalent to (x >> 0) on x86.

As far as I can tell right shift larger than the left operand size are
undefined, at least in C99.

Other than that the patch looks good.

Reviewed-by: Cyril Hrubis <chrubis@suse.cz>

> This can cause infinite loop in ptr2hex() if the highest nibble
> in the second argument is non-zero. Use temporary variable to avoid
> bit-shifting by large values.
> 
> Signed-off-by: Martin Doucha <mdoucha@suse.cz>
> ---
>  testcases/kernel/kvm/lib_guest.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/testcases/kernel/kvm/lib_guest.c b/testcases/kernel/kvm/lib_guest.c
> index d237293fc..d3b2ac3d5 100644
> --- a/testcases/kernel/kvm/lib_guest.c
> +++ b/testcases/kernel/kvm/lib_guest.c
> @@ -82,7 +82,7 @@ char *ptr2hex(char *dest, uintptr_t val)
>  	uintptr_t tmp;
>  	char *ret = dest;
>  
> -	for (i = 4; val >> i; i += 4)
> +	for (i = 4, tmp = val >> 4; tmp; i += 4, tmp >>= 4)
>  		;
>  
>  	do {
> -- 
> 2.40.0
> 
> 
> -- 
> Mailing list info: https://lists.linux.it/listinfo/ltp

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
                   ` (3 preceding siblings ...)
  2023-04-21 14:57 ` [LTP] [PATCH 5/5] Add test for CVE 2021-3653 Martin Doucha
@ 2023-04-26  8:08 ` Cyril Hrubis
  2023-04-26  9:19   ` Martin Doucha
  2023-05-02 14:24 ` Petr Vorel
  5 siblings, 1 reply; 19+ messages in thread
From: Cyril Hrubis @ 2023-04-26  8:08 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi!
I did have a look at the patchset and I do not see anything wrong, but I
cannot really review the code without spending a weeks reading the AMD
CPU manuals. What about sending v2 with the runtest file change and
CCing the x86@kernel.org mailing list? Hopefully some of the kernel devs
on that list will have a bit of time to check the actual correctness of
the code...

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex()
  2023-04-26  7:57   ` Cyril Hrubis
@ 2023-04-26  8:09     ` Martin Doucha
  0 siblings, 0 replies; 19+ messages in thread
From: Martin Doucha @ 2023-04-26  8:09 UTC (permalink / raw)
  To: Cyril Hrubis; +Cc: ltp

On 26. 04. 23 9:57, Cyril Hrubis wrote:
> Hi!
>> Contrary to the C standard, (x >> 64) is equivalent to (x >> 0) on x86.
> 
> As far as I can tell right shift larger than the left operand size are
> undefined, at least in C99.
> 
> Other than that the patch looks good.

You're right, sorry. Should I send a v2 with fixed commit message?

-- 
Martin Doucha   mdoucha@suse.cz
QA Engineer for Software Maintenance
SUSE LINUX, s.r.o.
CORSO IIa
Krizikova 148/34
186 00 Prague 8
Czech Republic


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT
  2023-04-26  8:08 ` [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Cyril Hrubis
@ 2023-04-26  9:19   ` Martin Doucha
  2023-04-26  9:20     ` Cyril Hrubis
  0 siblings, 1 reply; 19+ messages in thread
From: Martin Doucha @ 2023-04-26  9:19 UTC (permalink / raw)
  To: Cyril Hrubis; +Cc: ltp, nstange

On 26. 04. 23 10:08, Cyril Hrubis wrote:
> Hi!
> I did have a look at the patchset and I do not see anything wrong, but I
> cannot really review the code without spending a weeks reading the AMD
> CPU manuals. What about sending v2 with the runtest file change and
> CCing the x86@kernel.org mailing list? Hopefully some of the kernel devs
> on that list will have a bit of time to check the actual correctness of
> the code...

I've CC'd Nicolai who wrote the original KVM tests. If he approves, I'd 
say it'd good enough to merge. I've tested the code on AMD machines and 
it works and successfully reproduces the CVE on affected kernels.

-- 
Martin Doucha   mdoucha@suse.cz
QA Engineer for Software Maintenance
SUSE LINUX, s.r.o.
CORSO IIa
Krizikova 148/34
186 00 Prague 8
Czech Republic


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT
  2023-04-26  9:19   ` Martin Doucha
@ 2023-04-26  9:20     ` Cyril Hrubis
  0 siblings, 0 replies; 19+ messages in thread
From: Cyril Hrubis @ 2023-04-26  9:20 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi!
> > I did have a look at the patchset and I do not see anything wrong, but I
> > cannot really review the code without spending a weeks reading the AMD
> > CPU manuals. What about sending v2 with the runtest file change and
> > CCing the x86@kernel.org mailing list? Hopefully some of the kernel devs
> > on that list will have a bit of time to check the actual correctness of
> > the code...
> 
> I've CC'd Nicolai who wrote the original KVM tests. If he approves, I'd 
> say it'd good enough to merge. I've tested the code on AMD machines and 
> it works and successfully reproduces the CVE on affected kernels.

Fair enough, Nikoai can you add you Reviewed-by?

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT
  2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
                   ` (4 preceding siblings ...)
  2023-04-26  8:08 ` [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Cyril Hrubis
@ 2023-05-02 14:24 ` Petr Vorel
  5 siblings, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 14:24 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi Martin,

Acked-by: Petr Vorel <pvorel@suse.cz>
(LGTM, but my knowledge here is very limited)

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers
  2023-04-21 14:57 ` [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers Martin Doucha
@ 2023-05-02 14:24   ` Petr Vorel
  0 siblings, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 14:24 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi Martin,

Acked-by: Petr Vorel <pvorel@suse.cz>
(LGTM, but my knowledge here is very limited)

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex()
  2023-04-21 14:57 ` [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex() Martin Doucha
  2023-04-26  7:57   ` Cyril Hrubis
@ 2023-05-02 14:26   ` Petr Vorel
  2023-05-02 19:46     ` Petr Vorel
  1 sibling, 1 reply; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 14:26 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi Martin,

Reviewed-by: Petr Vorel <pvorel@suse.cz>

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM
  2023-04-21 14:57 ` [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM Martin Doucha
@ 2023-05-02 14:29   ` Petr Vorel
  0 siblings, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 14:29 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp, nstange

Hi Martin,

Acked-by: Petr Vorel <pvorel@suse.cz>

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 5/5] Add test for CVE 2021-3653
  2023-04-24  9:09   ` Martin Doucha
@ 2023-05-02 14:32     ` Petr Vorel
  2023-05-03 12:42       ` Petr Vorel
  0 siblings, 1 reply; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 14:32 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp

Hi Martin,

Reviewed-by: Petr Vorel <pvorel@suse.cz>

> Sorry, I forgot to update .gitignore and add the test to the KVM runfile.
> I'll send a v2 for the last patch either when the first four patches get
> merged or together with any other fixes.
+1, please anybody who merges this don't forget to add 2 two changes.

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex()
  2023-05-02 14:26   ` Petr Vorel
@ 2023-05-02 19:46     ` Petr Vorel
  0 siblings, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-05-02 19:46 UTC (permalink / raw)
  To: Martin Doucha, ltp, nstange

Hi Martin,

Thanks a lot, merged with amended commit message.

Kind regards,
Petr

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [LTP] [PATCH 5/5] Add test for CVE 2021-3653
  2023-05-02 14:32     ` Petr Vorel
@ 2023-05-03 12:42       ` Petr Vorel
  0 siblings, 0 replies; 19+ messages in thread
From: Petr Vorel @ 2023-05-03 12:42 UTC (permalink / raw)
  To: Martin Doucha, ltp; +Cc: Nicolai Stange

Hi Martin,

because Nicolai is busy, patches looks good and Cyril also acked them [1], I
merge first four patches.

The test itself introduces warnings. Could you please have look if they could be
easily fixed?

kvm_svm01.c: In function ‘guest_main’:
kvm_svm01.c:42:13: warning: array subscript 0 is outside array bounds of ‘uint32_t[0]’ {aka ‘unsigned int[]’} [-Warray-bounds]
   42 |         if (*avic_ptr != 0xaaaaaaaa)
      |             ^~~~~~~~~
kvm_svm01.c:45:9: warning: array subscript 0 is outside array bounds of ‘uint32_t[0]’ {aka ‘unsigned int[]’} [-Warray-bounds]
   45 |         *avic_ptr = AVIC_TEST_VAL;
      |         ^~~~~~~~~
kvm_svm01.c: In function ‘main’:
kvm_svm01.c:93:13: warning: array subscript 0 is outside array bounds of ‘uint32_t[0]’ {aka ‘unsigned int[]’} [-Warray-bounds]
   93 |         if (*avic_ptr != AVIC_TEST_VAL) {

Kind regards,
Petr

[1] https://lore.kernel.org/ltp/ZFIp4Sulcy20GWkc@rei/

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2023-05-03 12:42 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-21 14:57 [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Martin Doucha
2023-04-21 14:57 ` [LTP] [PATCH 2/5] KVM: Add helper function for reading x86 segment registers Martin Doucha
2023-05-02 14:24   ` Petr Vorel
2023-04-21 14:57 ` [LTP] [PATCH 3/5] KVM: Fix infinite loop in ptr2hex() Martin Doucha
2023-04-26  7:57   ` Cyril Hrubis
2023-04-26  8:09     ` Martin Doucha
2023-05-02 14:26   ` Petr Vorel
2023-05-02 19:46     ` Petr Vorel
2023-04-21 14:57 ` [LTP] [PATCH 4/5] Add KVM helper functions for AMD SVM Martin Doucha
2023-05-02 14:29   ` Petr Vorel
2023-04-21 14:57 ` [LTP] [PATCH 5/5] Add test for CVE 2021-3653 Martin Doucha
2023-04-24  9:09   ` Martin Doucha
2023-05-02 14:32     ` Petr Vorel
2023-05-03 12:42       ` Petr Vorel
2023-04-25 12:48   ` Petr Vorel
2023-04-26  8:08 ` [LTP] [PATCH 1/5] KVM: Add helper functions for accessing GDT/LDT Cyril Hrubis
2023-04-26  9:19   ` Martin Doucha
2023-04-26  9:20     ` Cyril Hrubis
2023-05-02 14:24 ` Petr Vorel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.