All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>,
	Janosch Frank <frankja@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Peter Xu <peterx@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Peter Shier <pshier@google.com>
Subject: [PATCH 03/19] KVM: selftests: Unconditionally use memslot 0 when loading elf binary
Date: Tue, 22 Jun 2021 13:05:13 -0700	[thread overview]
Message-ID: <20210622200529.3650424-4-seanjc@google.com> (raw)
In-Reply-To: <20210622200529.3650424-1-seanjc@google.com>

Use memslot '0' for all vm_vaddr_alloc() calls when loading the test
binary.  This is the first step toward adding a helper to handle page
allocations with a default value for the target memslot.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/dirty_log_test.c         | 2 +-
 tools/testing/selftests/kvm/hardware_disable_test.c  | 2 +-
 tools/testing/selftests/kvm/include/kvm_util.h       | 3 +--
 tools/testing/selftests/kvm/lib/elf.c                | 6 ++----
 tools/testing/selftests/kvm/lib/kvm_util.c           | 2 +-
 tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c | 2 +-
 6 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index b4d24f50aca6..9026fa4ea133 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -680,7 +680,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
 	vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
-	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+	kvm_vm_elf_load(vm, program_invocation_name);
 #ifdef __x86_64__
 	vm_create_irqchip(vm);
 #endif
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index 4b8db3bce610..b21c69a56daa 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -105,7 +105,7 @@ static void run_test(uint32_t run)
 		CPU_SET(i, &cpu_set);
 
 	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
-	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+	kvm_vm_elf_load(vm, program_invocation_name);
 	vm_create_irqchip(vm);
 
 	pr_debug("%s: [%d] start vcpus\n", __func__, run);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 35739567189e..59608b17707d 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -98,8 +98,7 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
 		       size_t len);
 
-void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
-		     uint32_t data_memslot, uint32_t pgd_memslot);
+void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
 
 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
 
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index bc75a91e00a6..edeeaf73d3b1 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -111,8 +111,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
  * by the image and it needs to have sufficient available physical pages, to
  * back the virtual pages used to load the image.
  */
-void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
-	uint32_t data_memslot, uint32_t pgd_memslot)
+void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
 {
 	off_t offset, offset_rv;
 	Elf64_Ehdr hdr;
@@ -164,8 +163,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
 		seg_vend |= vm->page_size - 1;
 		size_t seg_size = seg_vend - seg_vstart + 1;
 
-		vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart,
-			data_memslot, pgd_memslot);
+		vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart, 0, 0);
 		TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
 			"virtual memory for segment at requested min addr,\n"
 			"  segment idx: %u\n"
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a2b732cf96ea..15a8527b15db 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -365,7 +365,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
 	pages = vm_adjust_num_guest_pages(mode, pages);
 	vm = vm_create(mode, pages, O_RDWR);
 
-	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+	kvm_vm_elf_load(vm, program_invocation_name);
 
 #ifdef __x86_64__
 	vm_create_irqchip(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
index 5f8dd74d415f..e5535a0c8a35 100644
--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
@@ -90,7 +90,7 @@ static struct kvm_vm *create_vm(void)
 	pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, pages);
 	vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR);
 
-	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+	kvm_vm_elf_load(vm, program_invocation_name);
 	vm_create_irqchip(vm);
 
 	return vm;
-- 
2.32.0.288.g62a8d224e6-goog


  parent reply	other threads:[~2021-06-22 20:06 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-22 20:05 [PATCH 00/19] KVM: selftests: Add x86 mmu_role test and cleanups Sean Christopherson
2021-06-22 20:05 ` [PATCH 01/19] KVM: selftests: Remove errant asm/barrier.h include to fix arm64 build Sean Christopherson
2021-06-22 20:05 ` [PATCH 02/19] KVM: selftests: Zero out the correct page in the Hyper-V features test Sean Christopherson
2021-06-22 20:05 ` Sean Christopherson [this message]
2021-06-22 20:05 ` [PATCH 04/19] KVM: selftests: Unconditionally use memslot 0 for x86's GDT/TSS setup Sean Christopherson
2021-06-22 20:05 ` [PATCH 05/19] KVM: selftests: Use "standard" min virtual address for Hyper-V pages Sean Christopherson
2021-06-22 20:05 ` [PATCH 06/19] KVM: selftests: Add helpers to allocate N pages of virtual memory Sean Christopherson
2021-06-22 20:05 ` [PATCH 07/19] KVM: selftests: Lower the min virtual address for misc page allocations Sean Christopherson
2021-06-22 20:05 ` [PATCH 08/19] KVM: selftests: Use alloc_page helper for x86-64's GDT/ITD/TSS allocations Sean Christopherson
2021-06-22 20:05 ` [PATCH 09/19] KVM: selftests: Use alloc page helper for xAPIC IPI test Sean Christopherson
2021-06-22 20:05 ` [PATCH 10/19] KVM: selftests: Use "standard" min virtual address for CPUID test alloc Sean Christopherson
2021-06-22 20:05 ` [PATCH 11/19] KVM: selftest: Unconditionally use memslot 0 for vaddr allocations Sean Christopherson
2021-06-22 20:05 ` [PATCH 12/19] KVM: selftests: Unconditionally use memslot '0' for page table allocations Sean Christopherson
2021-06-22 20:05 ` [PATCH 13/19] KVM: selftests: Unconditionally allocate EPT tables in memslot 0 Sean Christopherson
2021-06-22 20:05 ` [PATCH 14/19] KVM: selftests: Add wrapper to allocate page table page Sean Christopherson
2021-06-22 20:05 ` [PATCH 15/19] KVM: selftests: Rename x86's page table "address" to "pfn" Sean Christopherson
2021-06-22 20:05 ` [PATCH 16/19] KVM: selfests: Add PTE helper for x86-64 in preparation for hugepages Sean Christopherson
2021-06-22 20:05 ` [PATCH 17/19] KVM: selftests: Genericize upper level page table entry struct Sean Christopherson
2021-06-22 20:05 ` [PATCH 18/19] KVM: selftests: Add hugepage support for x86-64 Sean Christopherson
2021-06-22 20:05 ` [PATCH 19/19] KVM: sefltests: Add x86-64 test to verify MMU reacts to CPUID updates Sean Christopherson
2022-03-02  7:45   ` Like Xu
2021-06-23 13:07 ` [PATCH 00/19] KVM: selftests: Add x86 mmu_role test and cleanups Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210622200529.3650424-4-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=borntraeger@de.ibm.com \
    --cc=cohuck@redhat.com \
    --cc=david@redhat.com \
    --cc=frankja@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=pshier@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.