All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library
@ 2022-05-06 14:21 Martin Doucha
  2022-05-06 14:21 ` [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test Martin Doucha
  2022-05-19 14:28 ` [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Cyril Hrubis
  0 siblings, 2 replies; 4+ messages in thread
From: Martin Doucha @ 2022-05-06 14:21 UTC (permalink / raw)
  To: ltp

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---
 testcases/kernel/kvm/include/kvm_host.h |  28 +++++-
 testcases/kernel/kvm/lib_host.c         | 112 +++++++++++++++++++-----
 2 files changed, 117 insertions(+), 23 deletions(-)

diff --git a/testcases/kernel/kvm/include/kvm_host.h b/testcases/kernel/kvm/include/kvm_host.h
index 52cc3f5e9..2359944fd 100644
--- a/testcases/kernel/kvm/include/kvm_host.h
+++ b/testcases/kernel/kvm/include/kvm_host.h
@@ -45,12 +45,13 @@
 
 #define MIN_FREE_RAM (10 * 1024 * 1024)
 #define DEFAULT_RAM_SIZE (16 * 1024 * 1024)
+#define MAX_KVM_MEMSLOTS 8
 
 struct tst_kvm_instance {
 	int vm_fd, vcpu_fd;
 	struct kvm_run *vcpu_info;
 	size_t vcpu_info_size;
-	void *ram;
+	struct kvm_userspace_memory_region ram[MAX_KVM_MEMSLOTS];
 	struct tst_kvm_result *result;
 };
 
@@ -82,8 +83,29 @@ void tst_kvm_validate_result(int value);
  * to free() it. Any extra space added at the beginning or end for page
  * alignment will be writable.
  */
-void *tst_kvm_alloc_memory(int vm, unsigned int slot, uint64_t baseaddr,
-	size_t size, unsigned int flags);
+void *tst_kvm_alloc_memory(struct tst_kvm_instance *inst, unsigned int slot,
+	uint64_t baseaddr, size_t size, unsigned int flags);
+
+/*
+ * Translate VM virtual memory address to the corresponding physical address.
+ * Returns 0 if the virtual address is unmapped or otherwise invalid.
+ */
+uint64_t tst_kvm_get_phys_address(const struct tst_kvm_instance *inst,
+	uint64_t addr);
+
+/*
+ * Find the struct tst_kvm_instance memory slot ID for the give virtual
+ * or physical VM memory address. Returns -1 if the address is not backed
+ * by any memory buffer.
+ */
+int tst_kvm_find_phys_memslot(const struct tst_kvm_instance *inst,
+	uint64_t paddr);
+int tst_kvm_find_memslot(const struct tst_kvm_instance *inst, uint64_t addr);
+
+/*
+ * Convert VM virtual memory address to a directly usable pointer.
+ */
+void *tst_kvm_get_memptr(const struct tst_kvm_instance *inst, uint64_t addr);
 
 /*
  * Find CPUIDs supported by KVM. x86_64 tests must set non-default CPUID,
diff --git a/testcases/kernel/kvm/lib_host.c b/testcases/kernel/kvm/lib_host.c
index a52722b7b..b8994f34e 100644
--- a/testcases/kernel/kvm/lib_host.c
+++ b/testcases/kernel/kvm/lib_host.c
@@ -36,15 +36,85 @@ void tst_kvm_validate_result(int value)
 	tst_brk(TBROK, "KVM test returned invalid result value %d", value);
 }
 
+uint64_t tst_kvm_get_phys_address(const struct tst_kvm_instance *inst,
+	uint64_t addr)
+{
+	struct kvm_translation trans = { .linear_address = addr };
+
+	TEST(ioctl(inst->vcpu_fd, KVM_TRANSLATE, &trans));
+
+	/* ioctl(KVM_TRANSLATE) is not implemented for this arch */
+	if (TST_RET == -1 && TST_ERR == EINVAL)
+		return addr;
+
+	if (TST_RET == -1)
+		tst_brk(TBROK | TTERRNO, "ioctl(KVM_TRANSLATE) failed");
+
+	if (TST_RET) {
+		tst_brk(TBROK | TTERRNO,
+			"Invalid ioctl(KVM_TRANSLATE) return value");
+	}
+
+	return trans.valid ? trans.physical_address : 0;
+}
+
+int tst_kvm_find_phys_memslot(const struct tst_kvm_instance *inst,
+	uint64_t paddr)
+{
+	int i;
+	uint64_t base;
+
+	for (i = 0; i < MAX_KVM_MEMSLOTS; i++) {
+		if (!inst->ram[i].userspace_addr)
+			continue;
+
+		base = inst->ram[i].guest_phys_addr;
+
+		if (paddr >= base && paddr - base < inst->ram[i].memory_size)
+			return i;
+	}
+
+	return -1;
+}
+
+int tst_kvm_find_memslot(const struct tst_kvm_instance *inst, uint64_t addr)
+{
+	addr = tst_kvm_get_phys_address(inst, addr);
+
+	if (!addr)
+		return -1;
+
+	return tst_kvm_find_phys_memslot(inst, addr);
+}
+
+void *tst_kvm_get_memptr(const struct tst_kvm_instance *inst, uint64_t addr)
+{
+	int slot;
+	char *ret;
+
+	addr = tst_kvm_get_phys_address(inst, addr);
+
+	if (!addr)
+		return NULL;
+
+	slot = tst_kvm_find_phys_memslot(inst, addr);
+
+	if (slot < 0)
+		return NULL;
+
+	ret = (char *)(uintptr_t)inst->ram[slot].userspace_addr;
+	return ret + (addr - inst->ram[slot].guest_phys_addr);
+}
+
 void tst_kvm_print_result(const struct tst_kvm_instance *inst)
 {
 	int ttype;
 	const struct tst_kvm_result *result = inst->result;
-	const char *file = inst->ram;
+	const char *file;
 
 	tst_kvm_validate_result(result->result);
 	ttype = TTYPE_RESULT(result->result);
-	file += result->file_addr;
+	file = tst_kvm_get_memptr(inst, result->file_addr);
 
 	if (ttype == TBROK)
 		tst_brk_(file, result->lineno, ttype, "%s", result->message);
@@ -52,26 +122,29 @@ void tst_kvm_print_result(const struct tst_kvm_instance *inst)
 		tst_res_(file, result->lineno, ttype, "%s", result->message);
 }
 
-void *tst_kvm_alloc_memory(int vm, unsigned int slot, uint64_t baseaddr,
-	size_t size, unsigned int flags)
+void *tst_kvm_alloc_memory(struct tst_kvm_instance *inst, unsigned int slot,
+	uint64_t baseaddr, size_t size, unsigned int flags)
 {
-	size_t pagesize;
-	void *ret;
+	size_t pagesize, offset;
+	char *ret;
 	struct kvm_userspace_memory_region memslot = {
 		.slot = slot,
 		.flags = flags
 	};
 
+	if (slot >= MAX_KVM_MEMSLOTS)
+		tst_brk(TBROK, "Invalid KVM memory slot %u", slot);
+
 	pagesize = SAFE_SYSCONF(_SC_PAGESIZE);
-	size += (baseaddr % pagesize) + pagesize - 1;
-	baseaddr -= baseaddr % pagesize;
-	size -= size % pagesize;
+	offset = baseaddr % pagesize;
+	size = LTP_ALIGN(size + offset, pagesize);
 	ret = tst_alloc(size);
 
-	memslot.guest_phys_addr = baseaddr;
+	memslot.guest_phys_addr = baseaddr - offset;
 	memslot.memory_size = size;
 	memslot.userspace_addr = (uintptr_t)ret;
-	SAFE_IOCTL(vm, KVM_SET_USER_MEMORY_REGION, &memslot);
+	SAFE_IOCTL(inst->vm_fd, KVM_SET_USER_MEMORY_REGION, &memslot);
+	inst->ram[slot] = memslot;
 	return ret;
 }
 
@@ -108,7 +181,7 @@ void tst_kvm_create_instance(struct tst_kvm_instance *inst, size_t ram_size)
 {
 	int sys_fd;
 	size_t pagesize, result_pageaddr = KVM_RESULT_BASEADDR;
-	char *vm_result, *reset_ptr;
+	char *buf, *reset_ptr;
 	struct kvm_cpuid2 *cpuid_data;
 	const size_t payload_size = kvm_payload_end - kvm_payload_start;
 
@@ -122,8 +195,7 @@ void tst_kvm_create_instance(struct tst_kvm_instance *inst, size_t ram_size)
 
 	if (payload_size + MIN_FREE_RAM > ram_size - VM_KERNEL_BASEADDR) {
 		ram_size = payload_size + MIN_FREE_RAM + VM_KERNEL_BASEADDR;
-		ram_size += 1024 * 1024 - 1;
-		ram_size -= ram_size % (1024 * 1024);
+		ram_size = LTP_ALIGN(ram_size, 1024 * 1024);
 		tst_res(TWARN, "RAM size increased to %zu bytes", ram_size);
 	}
 
@@ -148,15 +220,15 @@ void tst_kvm_create_instance(struct tst_kvm_instance *inst, size_t ram_size)
 	inst->vcpu_info = SAFE_MMAP(NULL, inst->vcpu_info_size,
 		PROT_READ | PROT_WRITE, MAP_SHARED, inst->vcpu_fd, 0);
 
-	inst->ram = tst_kvm_alloc_memory(inst->vm_fd, 0, 0, ram_size, 0);
-	vm_result = tst_kvm_alloc_memory(inst->vm_fd, 1, KVM_RESULT_BASEADDR,
+	buf = tst_kvm_alloc_memory(inst, 0, 0, ram_size, 0);
+	memcpy(buf + VM_KERNEL_BASEADDR, kvm_payload_start, payload_size);
+	buf = tst_kvm_alloc_memory(inst, 1, KVM_RESULT_BASEADDR,
 		KVM_RESULT_SIZE, 0);
-	memset(vm_result, 0, KVM_RESULT_SIZE);
-	memcpy(inst->ram + VM_KERNEL_BASEADDR, kvm_payload_start, payload_size);
+	memset(buf, 0, KVM_RESULT_SIZE);
 
-	reset_ptr = vm_result + (VM_RESET_BASEADDR % pagesize);
+	reset_ptr = buf + (VM_RESET_BASEADDR % pagesize);
 	memcpy(reset_ptr, tst_kvm_reset_code, sizeof(tst_kvm_reset_code));
-	inst->result = (struct tst_kvm_result *)(vm_result +
+	inst->result = (struct tst_kvm_result *)(buf +
 		(KVM_RESULT_BASEADDR % pagesize));
 	inst->result->result = KVM_TNONE;
 	inst->result->message[0] = '\0';
-- 
2.36.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test
  2022-05-06 14:21 [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Martin Doucha
@ 2022-05-06 14:21 ` Martin Doucha
  2022-05-19 12:52   ` Cyril Hrubis
  2022-05-19 14:28 ` [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Cyril Hrubis
  1 sibling, 1 reply; 4+ messages in thread
From: Martin Doucha @ 2022-05-06 14:21 UTC (permalink / raw)
  To: ltp

Signed-off-by: Martin Doucha <mdoucha@suse.cz>
---

This fixes running the test with "-i N" for N>1. It's a lazy fix but
the alternative would be saving the initial CPU state in up to 3 separate
buffers and then restoring it after each test run. And the number of buffers
differs on different archs. And ARM64 has an extra ioctl() instead...

I also wonder whether I should implement LTP library function to free
individual guarded buffers because I don't like calling tst_free_all()
directly.

 testcases/kernel/kvm/lib_host.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/testcases/kernel/kvm/lib_host.c b/testcases/kernel/kvm/lib_host.c
index b8994f34e..2782e68b0 100644
--- a/testcases/kernel/kvm/lib_host.c
+++ b/testcases/kernel/kvm/lib_host.c
@@ -269,16 +269,20 @@ void tst_kvm_destroy_instance(struct tst_kvm_instance *inst)
 		SAFE_CLOSE(inst->vcpu_fd);
 
 	SAFE_CLOSE(inst->vm_fd);
+	memset(inst->ram, 0, sizeof(inst->ram));
 }
 
 void tst_kvm_setup(void)
 {
-	tst_kvm_create_instance(&test_vm, DEFAULT_RAM_SIZE);
+
 }
 
 void tst_kvm_run(void)
 {
+	tst_kvm_create_instance(&test_vm, DEFAULT_RAM_SIZE);
 	tst_kvm_run_instance(&test_vm);
+	tst_kvm_destroy_instance(&test_vm);
+	tst_free_all();
 }
 
 void tst_kvm_cleanup(void)
-- 
2.36.0


-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test
  2022-05-06 14:21 ` [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test Martin Doucha
@ 2022-05-19 12:52   ` Cyril Hrubis
  0 siblings, 0 replies; 4+ messages in thread
From: Cyril Hrubis @ 2022-05-19 12:52 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp

Hi!
> This fixes running the test with "-i N" for N>1. It's a lazy fix but
> the alternative would be saving the initial CPU state in up to 3 separate
> buffers and then restoring it after each test run. And the number of buffers
> differs on different archs. And ARM64 has an extra ioctl() instead...
> 
> I also wonder whether I should implement LTP library function to free
> individual guarded buffers because I don't like calling tst_free_all()
> directly.

Well I guess that we can do that later on. It should be easy enough if
we switch to a double linked list and do a linear search, after all I
doubt that we will allocate more than a few buffers this way.

>  testcases/kernel/kvm/lib_host.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/testcases/kernel/kvm/lib_host.c b/testcases/kernel/kvm/lib_host.c
> index b8994f34e..2782e68b0 100644
> --- a/testcases/kernel/kvm/lib_host.c
> +++ b/testcases/kernel/kvm/lib_host.c
> @@ -269,16 +269,20 @@ void tst_kvm_destroy_instance(struct tst_kvm_instance *inst)
>  		SAFE_CLOSE(inst->vcpu_fd);
>  
>  	SAFE_CLOSE(inst->vm_fd);
> +	memset(inst->ram, 0, sizeof(inst->ram));
>  }
>  
>  void tst_kvm_setup(void)
>  {
> -	tst_kvm_create_instance(&test_vm, DEFAULT_RAM_SIZE);
> +
>  }
>  
>  void tst_kvm_run(void)
>  {
> +	tst_kvm_create_instance(&test_vm, DEFAULT_RAM_SIZE);
>  	tst_kvm_run_instance(&test_vm);
> +	tst_kvm_destroy_instance(&test_vm);
> +	tst_free_all();
>  }

Pushed, thanks.

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library
  2022-05-06 14:21 [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Martin Doucha
  2022-05-06 14:21 ` [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test Martin Doucha
@ 2022-05-19 14:28 ` Cyril Hrubis
  1 sibling, 0 replies; 4+ messages in thread
From: Cyril Hrubis @ 2022-05-19 14:28 UTC (permalink / raw)
  To: Martin Doucha; +Cc: ltp

Hi!
Pushed as well, since the change affects only the newly added test and
looked good to me.

-- 
Cyril Hrubis
chrubis@suse.cz

-- 
Mailing list info: https://lists.linux.it/listinfo/ltp

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-05-19 14:26 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-06 14:21 [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Martin Doucha
2022-05-06 14:21 ` [LTP] [PATCH 2/2] kvm: Allow running multiple iterations of a test Martin Doucha
2022-05-19 12:52   ` Cyril Hrubis
2022-05-19 14:28 ` [LTP] [PATCH 1/2] kvm: Add VM memory access helper functions to host library Cyril Hrubis

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.