kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Sean Christopherson <seanjc@google.com>,
	Ben Gardon <bgardon@google.com>,
	Yanan Wang <wangyanan55@huawei.com>,
	Andrew Jones <drjones@redhat.com>, Peter Xu <peterx@redhat.com>,
	Aaron Lewis <aaronlewis@google.com>
Subject: [PATCH 12/15] KVM: selftests: Fill per-vCPU struct during "perf_test" VM creation
Date: Wed, 10 Feb 2021 15:06:22 -0800	[thread overview]
Message-ID: <20210210230625.550939-13-seanjc@google.com> (raw)
In-Reply-To: <20210210230625.550939-1-seanjc@google.com>

Fill the per-vCPU args when creating the perf_test VM instead of having
the caller do so.  This helps ensure that any adjustments to the number
of pages (and thus vcpu_memory_bytes) are reflected in the per-VM args.
Automatically filling the per-vCPU args will also allow a future patch
to do the sync to the guest during creation.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../selftests/kvm/demand_paging_test.c        |  6 +-
 .../selftests/kvm/dirty_log_perf_test.c       |  6 +-
 .../selftests/kvm/include/perf_test_util.h    |  6 +-
 .../selftests/kvm/lib/perf_test_util.c        | 74 ++++++++++---------
 .../kvm/memslot_modification_stress_test.c    |  6 +-
 5 files changed, 49 insertions(+), 49 deletions(-)

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index b937a65b0e6d..00f2c795b68d 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -267,7 +267,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	int r;
 
 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-				 VM_MEM_SRC_ANONYMOUS);
+				 VM_MEM_SRC_ANONYMOUS,
+				 p->partition_vcpu_memory_access);
 
 	perf_test_args.wr_fract = 1;
 
@@ -279,9 +280,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
 	TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-	perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-			      p->partition_vcpu_memory_access);
-
 	if (p->use_uffd) {
 		uffd_handler_threads =
 			malloc(nr_vcpus * sizeof(*uffd_handler_threads));
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 04a2641261be..2c809452eac1 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -114,7 +114,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	struct timespec clear_dirty_log_total = (struct timespec){0};
 
 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-				 p->backing_src);
+				 p->backing_src,
+				 p->partition_vcpu_memory_access);
 
 	perf_test_args.wr_fract = p->wr_fract;
 
@@ -132,9 +133,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
 	TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-	perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-			      p->partition_vcpu_memory_access);
-
 	sync_global_to_guest(vm, perf_test_args);
 
 	/* Start the iterations */
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index 223fe6b79a04..3a21e82a0173 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -39,10 +39,8 @@ extern struct perf_test_args perf_test_args;
 
 struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 				   uint64_t vcpu_memory_bytes,
-				   enum vm_mem_backing_src_type backing_src);
+				   enum vm_mem_backing_src_type backing_src,
+				   bool partition_vcpu_memory_access);
 void perf_test_destroy_vm(struct kvm_vm *vm);
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
-			   uint64_t vcpu_memory_bytes,
-			   bool partition_vcpu_memory_access);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 9b0cfdf10772..3aa99365726b 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -47,9 +47,45 @@ static void guest_code(uint32_t vcpu_id)
 	}
 }
 
+
+static void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+				  uint64_t vcpu_memory_bytes,
+				  bool partition_vcpu_memory_access)
+{
+	struct perf_test_args *pta = &perf_test_args;
+	struct perf_test_vcpu_args *vcpu_args;
+	int vcpu_id;
+
+	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+		vcpu_args = &pta->vcpu_args[vcpu_id];
+
+		vcpu_args->vcpu_id = vcpu_id;
+		if (partition_vcpu_memory_access) {
+			vcpu_args->gva = guest_test_virt_mem +
+					 (vcpu_id * vcpu_memory_bytes);
+			vcpu_args->pages = vcpu_memory_bytes /
+					   pta->guest_page_size;
+			vcpu_args->gpa = pta->gpa +
+					 (vcpu_id * vcpu_memory_bytes);
+		} else {
+			vcpu_args->gva = guest_test_virt_mem;
+			vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+					   pta->guest_page_size;
+			vcpu_args->gpa = pta->gpa;
+		}
+
+		pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+			 vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
+			 (vcpu_args->pages * pta->guest_page_size));
+	}
+}
+
+
+
 struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 				   uint64_t vcpu_memory_bytes,
-				   enum vm_mem_backing_src_type backing_src)
+				   enum vm_mem_backing_src_type backing_src,
+				   bool partition_vcpu_memory_access)
 {
 	struct perf_test_args *pta = &perf_test_args;
 	struct kvm_vm *vm;
@@ -65,6 +101,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 
 	guest_num_pages = vm_adjust_num_guest_pages(mode,
 				(vcpus * vcpu_memory_bytes) / pta->guest_page_size);
+	vcpu_memory_bytes = (guest_num_pages * pta->guest_page_size) / vcpus;
 
 	TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
 		    "Guest memory size is not host page size aligned.");
@@ -106,6 +143,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	/* Do mapping for the demand paging memory slot */
 	virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages, 0);
 
+	perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes,
+			      partition_vcpu_memory_access);
+
 	ucall_init(vm, NULL);
 
 	return vm;
@@ -116,35 +156,3 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
 	ucall_uninit(vm);
 	kvm_vm_free(vm);
 }
-
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
-			   uint64_t vcpu_memory_bytes,
-			   bool partition_vcpu_memory_access)
-{
-	struct perf_test_args *pta = &perf_test_args;
-	struct perf_test_vcpu_args *vcpu_args;
-	int vcpu_id;
-
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-		vcpu_args = &pta->vcpu_args[vcpu_id];
-
-		vcpu_args->vcpu_id = vcpu_id;
-		if (partition_vcpu_memory_access) {
-			vcpu_args->gva = guest_test_virt_mem +
-					 (vcpu_id * vcpu_memory_bytes);
-			vcpu_args->pages = vcpu_memory_bytes /
-					   pta->guest_page_size;
-			vcpu_args->gpa = pta->gpa +
-					 (vcpu_id * vcpu_memory_bytes);
-		} else {
-			vcpu_args->gva = guest_test_virt_mem;
-			vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
-					   pta->guest_page_size;
-			vcpu_args->gpa = pta->gpa;
-		}
-
-		pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-			 vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
-			 (vcpu_args->pages * pta->guest_page_size));
-	}
-}
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index b3b8f08e91ad..949822833b6b 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -98,16 +98,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	int vcpu_id;
 
 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-				 VM_MEM_SRC_ANONYMOUS);
+				 VM_MEM_SRC_ANONYMOUS,
+				 p->partition_vcpu_memory_access);
 
 	perf_test_args.wr_fract = 1;
 
 	vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
 	TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-	perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-			      p->partition_vcpu_memory_access);
-
 	/* Export the shared variables to the guest */
 	sync_global_to_guest(vm, perf_test_args);
 
-- 
2.30.0.478.g8a0d178c01-goog


  parent reply	other threads:[~2021-02-10 23:08 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-10 23:06 [PATCH 00/15] VM: selftests: Hugepage fixes and cleanups Sean Christopherson
2021-02-10 23:06 ` [PATCH 01/15] KVM: selftests: Explicitly state indicies for vm_guest_mode_params array Sean Christopherson
2021-02-11  0:50   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 02/15] KVM: selftests: Expose align() helpers to tests Sean Christopherson
2021-02-11  0:49   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 03/15] KVM: selftests: Align HVA for HugeTLB-backed memslots Sean Christopherson
2021-02-11  0:52   ` Ben Gardon
2021-02-25  7:40   ` wangyanan (Y)
2021-03-13  0:17     ` Sean Christopherson
2021-02-10 23:06 ` [PATCH 04/15] KVM: selftests: Force stronger HVA alignment (1gb) for hugepages Sean Christopherson
2021-02-25  7:57   ` wangyanan (Y)
2021-03-13  0:26     ` Sean Christopherson
2021-02-10 23:06 ` [PATCH 05/15] KVM: selftests: Require GPA to be aligned when backed by hugepages Sean Christopherson
2021-02-11  1:01   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 06/15] KVM: selftests: Use shorthand local var to access struct perf_tests_args Sean Christopherson
2021-02-11  1:09   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 07/15] KVM: selftests: Capture per-vCPU GPA in perf_test_vcpu_args Sean Christopherson
2021-02-11  1:24   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 08/15] KVM: selftests: Use perf util's per-vCPU GPA/pages in demand paging test Sean Christopherson
2021-02-11  1:23   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 09/15] KVM: selftests: Move per-VM GPA into perf_test_args Sean Christopherson
2021-02-11  1:22   ` Ben Gardon
2021-02-11  1:56     ` Sean Christopherson
2021-02-11 13:12       ` Paolo Bonzini
2021-02-11 15:57         ` Sean Christopherson
2021-02-11 17:33           ` Ben Gardon
2021-02-10 23:06 ` [PATCH 10/15] KVM: selftests: Remove perf_test_args.host_page_size Sean Christopherson
2021-02-11  1:26   ` Ben Gardon
2021-02-10 23:06 ` [PATCH 11/15] KVM: selftests: Create VM with adjusted number of guest pages for perf tests Sean Christopherson
2021-02-11  1:32   ` Ben Gardon
2021-02-10 23:06 ` Sean Christopherson [this message]
2021-02-10 23:06 ` [PATCH 13/15] KVM: selftests: Sync perf_test_args to guest during VM creation Sean Christopherson
2021-02-10 23:06 ` [PATCH 14/15] KVM: selftests: Track size of per-VM memslot in perf_test_args Sean Christopherson
2021-02-10 23:06 ` [PATCH 15/15] KVM: selftests: Get rid of gorilla math in memslots modification test Sean Christopherson
2021-02-11 11:58 ` [PATCH 00/15] VM: selftests: Hugepage fixes and cleanups Andrew Jones

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210210230625.550939-13-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=aaronlewis@google.com \
    --cc=bgardon@google.com \
    --cc=drjones@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=wangyanan55@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).