All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Andrew Jones <drjones@redhat.com>,
	David Matlack <dmatlack@google.com>,
	Ben Gardon <bgardon@google.com>, Oliver Upton <oupton@google.com>,
	Sean Christopherson <seanjc@google.com>
Subject: [PATCH 117/128] KVM: selftests: Stop conflating vCPU index and ID in perf tests
Date: Wed,  4 May 2022 22:49:03 +0000	[thread overview]
Message-ID: <20220504224914.1654036-118-seanjc@google.com> (raw)
In-Reply-To: <20220504224914.1654036-1-seanjc@google.com>

Track vCPUs by their 'struct kvm_vcpu' object, and stop assuming that a
vCPU's ID is the same as its index when referencing a vCPU's metadata.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../selftests/kvm/access_tracking_perf_test.c | 81 ++++++++++---------
 .../selftests/kvm/demand_paging_test.c        | 36 ++++-----
 .../selftests/kvm/dirty_log_perf_test.c       | 39 ++++-----
 .../selftests/kvm/include/perf_test_util.h    |  5 +-
 .../selftests/kvm/lib/perf_test_util.c        | 79 +++++++++---------
 .../kvm/memslot_modification_stress_test.c    | 10 +--
 6 files changed, 129 insertions(+), 121 deletions(-)

diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index d8909032317a..86a90222f913 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -74,7 +74,7 @@ struct test_params {
 	uint64_t vcpu_memory_bytes;
 
 	/* The number of vCPUs to create in the VM. */
-	int vcpus;
+	int nr_vcpus;
 };
 
 static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
@@ -127,10 +127,12 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
 		    "Set page_idle bits for PFN 0x%" PRIx64, pfn);
 }
 
-static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+static void mark_vcpu_memory_idle(struct kvm_vm *vm,
+				  struct perf_test_vcpu_args *vcpu_args)
 {
-	uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
-	uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+	int vcpu_idx = vcpu_args->vcpu_idx;
+	uint64_t base_gva = vcpu_args->gva;
+	uint64_t pages = vcpu_args->pages;
 	uint64_t page;
 	uint64_t still_idle = 0;
 	uint64_t no_pfn = 0;
@@ -138,7 +140,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
 	int pagemap_fd;
 
 	/* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
-	if (overlap_memory_access && vcpu_id)
+	if (overlap_memory_access && vcpu_idx)
 		return;
 
 	page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
@@ -170,7 +172,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
 	 */
 	TEST_ASSERT(no_pfn < pages / 100,
 		    "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
-		    vcpu_id, no_pfn, pages);
+		    vcpu_idx, no_pfn, pages);
 
 	/*
 	 * Test that at least 90% of memory has been marked idle (the rest might
@@ -183,17 +185,16 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
 	TEST_ASSERT(still_idle < pages / 10,
 		    "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
 		    PRIu64 ").\n",
-		    vcpu_id, still_idle, pages);
+		    vcpu_idx, still_idle, pages);
 
 	close(page_idle_fd);
 	close(pagemap_fd);
 }
 
-static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
-			 uint64_t expected_ucall)
+static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
 {
 	struct ucall uc;
-	uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+	uint64_t actual_ucall = get_ucall(vcpu->vm, vcpu->id, &uc);
 
 	TEST_ASSERT(expected_ucall == actual_ucall,
 		    "Guest exited unexpectedly (expected ucall %" PRIu64
@@ -217,28 +218,29 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
 
 static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
 {
+	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
 	struct kvm_vm *vm = perf_test_args.vm;
-	int vcpu_id = vcpu_args->vcpu_id;
+	int vcpu_idx = vcpu_args->vcpu_idx;
 	int current_iteration = 0;
 
 	while (spin_wait_for_next_iteration(&current_iteration)) {
 		switch (READ_ONCE(iteration_work)) {
 		case ITERATION_ACCESS_MEMORY:
-			vcpu_run(vm, vcpu_id);
-			assert_ucall(vm, vcpu_id, UCALL_SYNC);
+			vcpu_run(vm, vcpu->id);
+			assert_ucall(vcpu, UCALL_SYNC);
 			break;
 		case ITERATION_MARK_IDLE:
-			mark_vcpu_memory_idle(vm, vcpu_id);
+			mark_vcpu_memory_idle(vm, vcpu_args);
 			break;
 		};
 
-		vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+		vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
 	}
 }
 
-static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration)
 {
-	while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+	while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) !=
 	       target_iteration) {
 		continue;
 	}
@@ -250,12 +252,11 @@ enum access_type {
 	ACCESS_WRITE,
 };
 
-static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description)
 {
 	struct timespec ts_start;
 	struct timespec ts_elapsed;
-	int next_iteration;
-	int vcpu_id;
+	int next_iteration, i;
 
 	/* Kick off the vCPUs by incrementing iteration. */
 	next_iteration = ++iteration;
@@ -263,23 +264,23 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
 	clock_gettime(CLOCK_MONOTONIC, &ts_start);
 
 	/* Wait for all vCPUs to finish the iteration. */
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
-		spin_wait_for_vcpu(vcpu_id, next_iteration);
+	for (i = 0; i < nr_vcpus; i++)
+		spin_wait_for_vcpu(i, next_iteration);
 
 	ts_elapsed = timespec_elapsed(ts_start);
 	pr_info("%-30s: %ld.%09lds\n",
 		description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
 }
 
-static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
-			  const char *description)
+static void access_memory(struct kvm_vm *vm, int nr_vcpus,
+			  enum access_type access, const char *description)
 {
 	perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
 	iteration_work = ITERATION_ACCESS_MEMORY;
-	run_iteration(vm, vcpus, description);
+	run_iteration(vm, nr_vcpus, description);
 }
 
-static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus)
 {
 	/*
 	 * Even though this parallelizes the work across vCPUs, this is still a
@@ -289,37 +290,37 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
 	 */
 	pr_debug("Marking VM memory idle (slow)...\n");
 	iteration_work = ITERATION_MARK_IDLE;
-	run_iteration(vm, vcpus, "Mark memory idle");
+	run_iteration(vm, nr_vcpus, "Mark memory idle");
 }
 
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
 	struct test_params *params = arg;
 	struct kvm_vm *vm;
-	int vcpus = params->vcpus;
+	int nr_vcpus = params->nr_vcpus;
 
-	vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
+	vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
 				 params->backing_src, !overlap_memory_access);
 
-	perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
+	perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
 
 	pr_info("\n");
-	access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+	access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
 
 	/* As a control, read and write to the populated memory first. */
-	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
-	access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+	access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory");
+	access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory");
 
 	/* Repeat on memory that has been marked as idle. */
-	mark_memory_idle(vm, vcpus);
-	access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
-	mark_memory_idle(vm, vcpus);
-	access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+	mark_memory_idle(vm, nr_vcpus);
+	access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory");
+	mark_memory_idle(vm, nr_vcpus);
+	access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
 
 	/* Set done to signal the vCPU threads to exit */
 	done = true;
 
-	perf_test_join_vcpu_threads(vcpus);
+	perf_test_join_vcpu_threads(nr_vcpus);
 	perf_test_destroy_vm(vm);
 }
 
@@ -347,7 +348,7 @@ int main(int argc, char *argv[])
 	struct test_params params = {
 		.backing_src = DEFAULT_VM_MEM_SRC,
 		.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
-		.vcpus = 1,
+		.nr_vcpus = 1,
 	};
 	int page_idle_fd;
 	int opt;
@@ -363,7 +364,7 @@ int main(int argc, char *argv[])
 			params.vcpu_memory_bytes = parse_size(optarg);
 			break;
 		case 'v':
-			params.vcpus = atoi(optarg);
+			params.nr_vcpus = atoi(optarg);
 			break;
 		case 'o':
 			overlap_memory_access = true;
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index d8db0a37e973..c46110721088 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -44,28 +44,27 @@ static char *guest_data_prototype;
 
 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
-	int ret;
-	int vcpu_id = vcpu_args->vcpu_id;
+	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
 	struct kvm_vm *vm = perf_test_args.vm;
-	struct kvm_run *run;
+	int vcpu_idx = vcpu_args->vcpu_idx;
+	struct kvm_run *run = vcpu->run;
 	struct timespec start;
 	struct timespec ts_diff;
-
-	run = vcpu_state(vm, vcpu_id);
+	int ret;
 
 	clock_gettime(CLOCK_MONOTONIC, &start);
 
 	/* Let the guest access its memory */
-	ret = _vcpu_run(vm, vcpu_id);
+	ret = _vcpu_run(vm, vcpu->id);
 	TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
-	if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) {
+	if (get_ucall(vm, vcpu->id, NULL) != UCALL_SYNC) {
 		TEST_ASSERT(false,
 			    "Invalid guest sync status: exit_reason=%s\n",
 			    exit_reason_str(run->exit_reason));
 	}
 
 	ts_diff = timespec_elapsed(start);
-	PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
+	PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
 		       ts_diff.tv_sec, ts_diff.tv_nsec);
 }
 
@@ -285,8 +284,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	struct timespec ts_diff;
 	int *pipefds = NULL;
 	struct kvm_vm *vm;
-	int vcpu_id;
-	int r;
+	int r, i;
 
 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
 				 p->src_type, p->partition_vcpu_memory_access);
@@ -309,12 +307,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 		pipefds = malloc(sizeof(int) * nr_vcpus * 2);
 		TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
 
-		for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+		for (i = 0; i < nr_vcpus; i++) {
 			struct perf_test_vcpu_args *vcpu_args;
 			void *vcpu_hva;
 			void *vcpu_alias;
 
-			vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+			vcpu_args = &perf_test_args.vcpu_args[i];
 
 			/* Cache the host addresses of the region */
 			vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
@@ -324,13 +322,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 			 * Set up user fault fd to handle demand paging
 			 * requests.
 			 */
-			r = pipe2(&pipefds[vcpu_id * 2],
+			r = pipe2(&pipefds[i * 2],
 				  O_CLOEXEC | O_NONBLOCK);
 			TEST_ASSERT(!r, "Failed to set up pipefd");
 
-			setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
-					    pipefds[vcpu_id * 2], p->uffd_mode,
-					    p->uffd_delay, &uffd_args[vcpu_id],
+			setup_demand_paging(vm, &uffd_handler_threads[i],
+					    pipefds[i * 2], p->uffd_mode,
+					    p->uffd_delay, &uffd_args[i],
 					    vcpu_hva, vcpu_alias,
 					    vcpu_args->pages * perf_test_args.guest_page_size);
 		}
@@ -350,11 +348,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 		char c;
 
 		/* Tell the user fault fd handler threads to quit */
-		for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-			r = write(pipefds[vcpu_id * 2 + 1], &c, 1);
+		for (i = 0; i < nr_vcpus; i++) {
+			r = write(pipefds[i * 2 + 1], &c, 1);
 			TEST_ASSERT(r == 1, "Unable to write to pipefd");
 
-			pthread_join(uffd_handler_threads[vcpu_id], NULL);
+			pthread_join(uffd_handler_threads[i], NULL);
 		}
 	}
 
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 7b47ae4f952e..e0b3639c47b9 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -68,44 +68,45 @@ static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
-	int ret;
+	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
 	struct kvm_vm *vm = perf_test_args.vm;
+	int vcpu_idx = vcpu_args->vcpu_idx;
 	uint64_t pages_count = 0;
 	struct kvm_run *run;
 	struct timespec start;
 	struct timespec ts_diff;
 	struct timespec total = (struct timespec){0};
 	struct timespec avg;
-	int vcpu_id = vcpu_args->vcpu_id;
+	int ret;
 
-	run = vcpu_state(vm, vcpu_id);
+	run = vcpu->run;
 
 	while (!READ_ONCE(host_quit)) {
 		int current_iteration = READ_ONCE(iteration);
 
 		clock_gettime(CLOCK_MONOTONIC, &start);
-		ret = _vcpu_run(vm, vcpu_id);
+		ret = _vcpu_run(vm, vcpu->id);
 		ts_diff = timespec_elapsed(start);
 
 		TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
-		TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+		TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC,
 			    "Invalid guest sync status: exit_reason=%s\n",
 			    exit_reason_str(run->exit_reason));
 
-		pr_debug("Got sync event from vCPU %d\n", vcpu_id);
-		vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+		pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
+		vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
 		pr_debug("vCPU %d updated last completed iteration to %d\n",
-			 vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
+			 vcpu->id, vcpu_last_completed_iteration[vcpu_idx]);
 
 		if (current_iteration) {
 			pages_count += vcpu_args->pages;
 			total = timespec_add(total, ts_diff);
 			pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
-				vcpu_id, current_iteration, ts_diff.tv_sec,
+				vcpu_idx, current_iteration, ts_diff.tv_sec,
 				ts_diff.tv_nsec);
 		} else {
 			pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
-				vcpu_id, current_iteration, ts_diff.tv_sec,
+				vcpu_idx, current_iteration, ts_diff.tv_sec,
 				ts_diff.tv_nsec);
 		}
 
@@ -113,9 +114,9 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 		       !READ_ONCE(host_quit)) {}
 	}
 
-	avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
+	avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
 	pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
-		vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
+		vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
 		total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
 }
 
@@ -207,7 +208,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	uint64_t guest_num_pages;
 	uint64_t host_num_pages;
 	uint64_t pages_per_slot;
-	int vcpu_id;
 	struct timespec start;
 	struct timespec ts_diff;
 	struct timespec get_dirty_log_total = (struct timespec){0};
@@ -215,6 +215,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	struct timespec avg;
 	struct kvm_enable_cap cap = {};
 	struct timespec clear_dirty_log_total = (struct timespec){0};
+	int i;
 
 	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
 				 p->slots, p->backing_src,
@@ -242,15 +243,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 	host_quit = false;
 
 	clock_gettime(CLOCK_MONOTONIC, &start);
-	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
-		vcpu_last_completed_iteration[vcpu_id] = -1;
+	for (i = 0; i < nr_vcpus; i++)
+		vcpu_last_completed_iteration[i] = -1;
 
 	perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
 
 	/* Allow the vCPUs to populate memory */
 	pr_debug("Starting iteration %d - Populating\n", iteration);
-	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-		while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+	for (i = 0; i < nr_vcpus; i++) {
+		while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
 		       iteration)
 			;
 	}
@@ -275,8 +276,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 		iteration++;
 
 		pr_debug("Starting iteration %d\n", iteration);
-		for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-			while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+		for (i = 0; i < nr_vcpus; i++) {
+			while (READ_ONCE(vcpu_last_completed_iteration[i])
 			       != iteration)
 				;
 		}
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index a86f953d8d36..9a6cdaed33f6 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -25,7 +25,8 @@ struct perf_test_vcpu_args {
 	uint64_t pages;
 
 	/* Only used by the host userspace part of the vCPU thread */
-	int vcpu_id;
+	struct kvm_vcpu *vcpu;
+	int vcpu_idx;
 };
 
 struct perf_test_args {
@@ -39,7 +40,7 @@ struct perf_test_args {
 
 extern struct perf_test_args perf_test_args;
 
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 				   uint64_t vcpu_memory_bytes, int slots,
 				   enum vm_mem_backing_src_type backing_src,
 				   bool partition_vcpu_memory_access);
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index ffbd3664e162..679f64527f1a 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -17,8 +17,8 @@ struct perf_test_args perf_test_args;
 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
 
 struct vcpu_thread {
-	/* The id of the vCPU. */
-	int vcpu_id;
+	/* The index of the vCPU. */
+	int vcpu_idx;
 
 	/* The pthread backing the vCPU. */
 	pthread_t thread;
@@ -36,24 +36,26 @@ static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
 /* Set to true once all vCPU threads are up and running. */
 static bool all_vcpu_threads_running;
 
+static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+
 /*
  * Continuously write to the first 8 bytes of each page in the
  * specified region.
  */
-static void guest_code(uint32_t vcpu_id)
+static void guest_code(uint32_t vcpu_idx)
 {
 	struct perf_test_args *pta = &perf_test_args;
-	struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
+	struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_idx];
 	uint64_t gva;
 	uint64_t pages;
 	int i;
 
-	/* Make sure vCPU args data structure is not corrupt. */
-	GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
 	gva = vcpu_args->gva;
 	pages = vcpu_args->pages;
 
+	/* Make sure vCPU args data structure is not corrupt. */
+	GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
+
 	while (true) {
 		for (i = 0; i < pages; i++) {
 			uint64_t addr = gva + (i * pta->guest_page_size);
@@ -68,40 +70,43 @@ static void guest_code(uint32_t vcpu_id)
 	}
 }
 
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
+			   struct kvm_vcpu *vcpus[],
 			   uint64_t vcpu_memory_bytes,
 			   bool partition_vcpu_memory_access)
 {
 	struct perf_test_args *pta = &perf_test_args;
 	struct perf_test_vcpu_args *vcpu_args;
-	int vcpu_id;
+	int i;
 
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-		vcpu_args = &pta->vcpu_args[vcpu_id];
+	for (i = 0; i < nr_vcpus; i++) {
+		vcpu_args = &pta->vcpu_args[i];
+
+		vcpu_args->vcpu = vcpus[i];
+		vcpu_args->vcpu_idx = i;
 
-		vcpu_args->vcpu_id = vcpu_id;
 		if (partition_vcpu_memory_access) {
 			vcpu_args->gva = guest_test_virt_mem +
-					 (vcpu_id * vcpu_memory_bytes);
+					 (i * vcpu_memory_bytes);
 			vcpu_args->pages = vcpu_memory_bytes /
 					   pta->guest_page_size;
-			vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
+			vcpu_args->gpa = pta->gpa + (i * vcpu_memory_bytes);
 		} else {
 			vcpu_args->gva = guest_test_virt_mem;
-			vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+			vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
 					   pta->guest_page_size;
 			vcpu_args->gpa = pta->gpa;
 		}
 
-		vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+		vcpu_args_set(vm, vcpus[i]->id, 1, i);
 
 		pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-			 vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
+			 i, vcpu_args->gpa, vcpu_args->gpa +
 			 (vcpu_args->pages * pta->guest_page_size));
 	}
 }
 
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 				   uint64_t vcpu_memory_bytes, int slots,
 				   enum vm_mem_backing_src_type backing_src,
 				   bool partition_vcpu_memory_access)
@@ -124,7 +129,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	pta->guest_page_size = vm_guest_mode_params[mode].page_size;
 
 	guest_num_pages = vm_adjust_num_guest_pages(mode,
-				(vcpus * vcpu_memory_bytes) / pta->guest_page_size);
+				(nr_vcpus * vcpu_memory_bytes) / pta->guest_page_size);
 
 	TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
 		    "Guest memory size is not host page size aligned.");
@@ -139,8 +144,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	 * The memory is also added to memslot 0, but that's a benign side
 	 * effect as KVM allows aliasing HVAs in meslots.
 	 */
-	vm = __vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
-				    guest_num_pages, 0, guest_code, NULL);
+	vm = __vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+				    guest_num_pages, 0, guest_code, vcpus);
 
 	pta->vm = vm;
 
@@ -151,8 +156,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
 		    "Requested more guest memory than address space allows.\n"
 		    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
-		    " vcpus: %d wss: %" PRIx64 "]\n",
-		    guest_num_pages, vm_get_max_gfn(vm), vcpus,
+		    " nr_vcpus: %d wss: %" PRIx64 "]\n",
+		    guest_num_pages, vm_get_max_gfn(vm), nr_vcpus,
 		    vcpu_memory_bytes);
 
 	pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
@@ -176,7 +181,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 	/* Do mapping for the demand paging memory slot */
 	virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
 
-	perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
+	perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
+			      partition_vcpu_memory_access);
 
 	ucall_init(vm, NULL);
 
@@ -213,39 +219,40 @@ static void *vcpu_thread_main(void *data)
 	while (!READ_ONCE(all_vcpu_threads_running))
 		;
 
-	vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
+	vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_idx]);
 
 	return NULL;
 }
 
-void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
+void perf_test_start_vcpu_threads(int nr_vcpus,
+				  void (*vcpu_fn)(struct perf_test_vcpu_args *))
 {
-	int vcpu_id;
+	int i;
 
 	vcpu_thread_fn = vcpu_fn;
 	WRITE_ONCE(all_vcpu_threads_running, false);
 
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-		struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
+	for (i = 0; i < nr_vcpus; i++) {
+		struct vcpu_thread *vcpu = &vcpu_threads[i];
 
-		vcpu->vcpu_id = vcpu_id;
+		vcpu->vcpu_idx = i;
 		WRITE_ONCE(vcpu->running, false);
 
 		pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
 	}
 
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-		while (!READ_ONCE(vcpu_threads[vcpu_id].running))
+	for (i = 0; i < nr_vcpus; i++) {
+		while (!READ_ONCE(vcpu_threads[i].running))
 			;
 	}
 
 	WRITE_ONCE(all_vcpu_threads_running, true);
 }
 
-void perf_test_join_vcpu_threads(int vcpus)
+void perf_test_join_vcpu_threads(int nr_vcpus)
 {
-	int vcpu_id;
+	int i;
 
-	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
-		pthread_join(vcpu_threads[vcpu_id].thread, NULL);
+	for (i = 0; i < nr_vcpus; i++)
+		pthread_join(vcpu_threads[i].thread, NULL);
 }
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 1410d0a9141a..a3efb3182119 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -38,19 +38,19 @@ static bool run_vcpus = true;
 
 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
-	int ret;
-	int vcpu_id = vcpu_args->vcpu_id;
+	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
 	struct kvm_vm *vm = perf_test_args.vm;
 	struct kvm_run *run;
+	int ret;
 
-	run = vcpu_state(vm, vcpu_id);
+	run = vcpu->run;
 
 	/* Let the guest access its memory until a stop signal is received */
 	while (READ_ONCE(run_vcpus)) {
-		ret = _vcpu_run(vm, vcpu_id);
+		ret = _vcpu_run(vm, vcpu->id);
 		TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
 
-		if (get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC)
+		if (get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC)
 			continue;
 
 		TEST_ASSERT(false,
-- 
2.36.0.464.gb9c8b46e94-goog


  parent reply	other threads:[~2022-05-04 23:07 UTC|newest]

Thread overview: 132+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-04 22:47 [PATCH 000/128] KVM: selftests: Overhaul APIs, purge VCPU_ID Sean Christopherson
2022-05-04 22:47 ` [PATCH 001/128] KVM: selftests: Fix buggy-but-benign check in test_v3_new_redist_regions() Sean Christopherson
2022-05-04 22:47 ` [PATCH 002/128] KVM: selftests: Drop stale declarations from kvm_util_base.h Sean Christopherson
2022-05-04 22:47 ` [PATCH 003/128] KVM: selftests: Unconditionally compile KVM selftests with -Werror Sean Christopherson
2022-05-05 14:08   ` Vitaly Kuznetsov
2022-05-06 21:37     ` Sean Christopherson
2022-05-04 22:47 ` [PATCH 004/128] KVM: selftests: Always open VM file descriptors with O_RDWR Sean Christopherson
2022-05-04 22:47 ` [PATCH 005/128] KVM: selftests: Add another underscore to inner ioctl() helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 006/128] KVM: selftests: Make vcpu_ioctl() a wrapper to pretty print ioctl name Sean Christopherson
2022-05-04 22:47 ` [PATCH 007/128] KVM: selftests: Drop @mode from common vm_create() helper Sean Christopherson
2022-05-04 22:47 ` [PATCH 008/128] KVM: selftests: Split vcpu_set_nested_state() into two helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 009/128] KVM: sefltests: Use vcpu_ioctl() and __vcpu_ioctl() helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 010/128] KVM: selftests: Add __vcpu_run() helper Sean Christopherson
2022-05-04 22:47 ` [PATCH 011/128] KVM: selftests: Use vcpu_access_device_attr() in arm64 code Sean Christopherson
2022-05-04 22:47 ` [PATCH 012/128] KVM: selftests: Remove vcpu_get_fd() Sean Christopherson
2022-05-04 22:47 ` [PATCH 013/128] KVM: selftests: Add vcpu_get() to retrieve and assert on vCPU existence Sean Christopherson
2022-05-04 22:47 ` [PATCH 014/128] KVM: selftests: Make vm_ioctl() a wrapper to pretty print ioctl name Sean Christopherson
2022-05-04 22:47 ` [PATCH 015/128] KVM: sefltests: Use vm_ioctl() and __vm_ioctl() helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 016/128] KVM: selftests: Make kvm_ioctl() a wrapper to pretty print ioctl name Sean Christopherson
2022-05-04 22:47 ` [PATCH 017/128] KVM: selftests: Use kvm_ioctl() helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 018/128] KVM: selftests: Use __KVM_SYSCALL_ERROR() to handle non-KVM syscall errors Sean Christopherson
2022-05-04 22:47 ` [PATCH 019/128] KVM: selftests: Make x86-64's register dump helpers static Sean Christopherson
2022-05-04 22:47 ` [PATCH 020/128] KVM: selftests: Get rid of kvm_util_internal.h Sean Christopherson
2022-05-04 22:47 ` [PATCH 021/128] KVM: selftests: Use KVM_IOCTL_ERROR() for one-off arm64 ioctls Sean Christopherson
2022-05-04 22:47 ` [PATCH 022/128] KVM: selftests: Drop @test param from kvm_create_device() Sean Christopherson
2022-05-04 22:47 ` [PATCH 023/128] KVM: selftests: Move KVM_CREATE_DEVICE_TEST code to separate helper Sean Christopherson
2022-05-04 22:47 ` [PATCH 024/128] KVM: selftests: Multiplex return code and fd in __kvm_create_device() Sean Christopherson
2022-05-04 22:47 ` [PATCH 025/128] KVM: selftests: Rename KVM_HAS_DEVICE_ATTR helpers for consistency Sean Christopherson
2022-05-04 22:47 ` [PATCH 026/128] KVM: selftests: Drop 'int' return from asserting *_has_device_attr() Sean Christopherson
2022-05-04 22:47 ` [PATCH 027/128] KVM: selftests: Split get/set device_attr helpers Sean Christopherson
2022-05-04 22:47 ` [PATCH 028/128] KVM: selftests: Add a VM backpointer to 'struct vcpu' Sean Christopherson
2022-05-04 22:47 ` [PATCH 029/128] KVM: selftests: Add vm_create_*() variants to expose/return " Sean Christopherson
2022-05-04 22:47 ` [PATCH 030/128] KVM: selftests: Push vm_adjust_num_guest_pages() into "w/o vCPUs" helper Sean Christopherson
2022-05-04 22:47 ` [PATCH 031/128] KVM: selftests: Use vm_create_without_vcpus() in set_boot_cpu_id Sean Christopherson
2022-05-04 22:47 ` [PATCH 032/128] KVM: selftests: Use vm_create_without_vcpus() in dirty_log_test Sean Christopherson
2022-05-04 22:47 ` [PATCH 033/128] KVM: selftests: Use vm_create_without_vcpus() in hardware_disable_test Sean Christopherson
2022-05-04 22:47 ` [PATCH 034/128] KVM: selftests: Use vm_create_without_vcpus() in psci_cpu_on_test Sean Christopherson
2022-05-04 22:47 ` [PATCH 035/128] KVM: selftests: Rename vm_create() => vm_create_barebones(), drop param Sean Christopherson
2022-05-04 22:47 ` [PATCH 036/128] KVM: selftests: Rename vm_create_without_vcpus() => vm_create() Sean Christopherson
2022-05-04 22:47 ` [PATCH 037/128] KVM: selftests: Make vm_create() a wrapper that specifies VM_MODE_DEFAULT Sean Christopherson
2022-05-04 22:47 ` [PATCH 038/128] KVM: selftests: Rename xAPIC state test's vcpu struct Sean Christopherson
2022-05-04 22:47 ` [PATCH 039/128] KVM: selftests: Rename vcpu.state => vcpu.run Sean Christopherson
2022-05-04 22:47 ` [PATCH 040/128] KVM: selftests: Rename 'struct vcpu' to 'struct kvm_vcpu' Sean Christopherson
2022-05-04 22:47 ` [PATCH 041/128] KVM: selftests: Return the created vCPU from vm_vcpu_add() Sean Christopherson
2022-05-04 22:47 ` [PATCH 042/128] KVM: selftests: Convert memslot_perf_test away from VCPU_ID Sean Christopherson
2022-05-04 22:47 ` [PATCH 043/128] KVM: selftests: Convert rseq_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 044/128] KVM: selftests: Convert xss_msr_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 045/128] KVM: selftests: Convert vmx_preemption_timer_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 046/128] KVM: selftests: Convert vmx_pmu_msrs_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 047/128] KVM: selftests: Convert vmx_set_nested_state_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 048/128] KVM: selftests: Convert vmx_tsc_adjust_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 049/128] KVM: selftests: Convert mmu_role_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 050/128] KVM: selftests: Convert pmu_event_filter_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 051/128] KVM: selftests: Convert smm_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 052/128] KVM: selftests: Convert state_test " Sean Christopherson
2022-05-04 22:47 ` [PATCH 053/128] KVM: selftests: Convert svm_int_ctl_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 054/128] KVM: selftests: Convert svm_vmcall_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 055/128] KVM: selftests: Convert sync_regs_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 056/128] KVM: selftests: Convert hyperv_cpuid " Sean Christopherson
2022-05-04 22:48 ` [PATCH 057/128] KVM: selftests: Convert kvm_pv_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 058/128] KVM: selftests: Convert platform_info_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 059/128] KVM: selftests: Convert vmx_nested_tsc_scaling_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 060/128] KVM: selftests: Convert set_sregs_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 061/128] KVM: selftests: Convert vmx_dirty_log_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 062/128] KVM: selftests: Convert vmx_close_while_nested_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 063/128] KVM: selftests: Convert vmx_apic_access_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 064/128] KVM: selftests: Convert userspace_msr_exit_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 065/128] KVM: selftests: Convert vmx_exception_with_invalid_guest_state " Sean Christopherson
2022-05-04 22:48 ` [PATCH 066/128] KVM: selftests: Convert tsc_msrs_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 067/128] KVM: selftests: Convert kvm_clock_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 068/128] KVM: selftests: Convert hyperv_svm_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 069/128] KVM: selftests: Convert hyperv_features " Sean Christopherson
2022-05-04 22:48 ` [PATCH 070/128] KVM: selftests: Convert hyperv_clock " Sean Christopherson
2022-05-04 22:48 ` [PATCH 071/128] KVM: selftests: Convert evmcs_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 072/128] KVM: selftests: Convert emulator_error_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 073/128] KVM: selftests: Convert debug_regs " Sean Christopherson
2022-05-04 22:48 ` [PATCH 074/128] KVM: selftests: Add proper helper for advancing RIP in debug_regs Sean Christopherson
2022-05-04 22:48 ` [PATCH 075/128] KVM: selftests: Convert amx_test away from VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 076/128] KVM: selftests: Convert cr4_cpuid_sync_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 077/128] KVM: selftests: Convert cpuid_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 078/128] KVM: selftests: Convert userspace_io_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 079/128] KVM: selftests: Convert vmx_invalid_nested_guest_state " Sean Christopherson
2022-05-04 22:48 ` [PATCH 080/128] KVM: selftests: Convert xen_vmcall_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 081/128] KVM: selftests: Convert xen_shinfo_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 082/128] KVM: selftests: Convert dirty_log_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 083/128] KVM: selftests: Convert set_memory_region_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 084/128] KVM: selftests: Convert system_counter_offset_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 085/128] KVM: selftests: Track kvm_vcpu object in tsc_scaling_sync Sean Christopherson
2022-05-04 22:48 ` [PATCH 086/128] KVM: selftests: Convert xapic_state_test away from hardcoded vCPU ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 087/128] KVM: selftests: Convert debug-exceptions away from VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 088/128] KVM: selftests: Convert fix_hypercall_test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 089/128] KVM: selftests: Convert vgic_irq " Sean Christopherson
2022-05-04 22:48 ` [PATCH 090/128] KVM: selftests: Make arm64's guest_get_vcpuid() declaration arm64-only Sean Christopherson
2022-05-04 22:48 ` [PATCH 091/128] KVM: selftests: Move vm_is_unrestricted_guest() to x86-64 Sean Christopherson
2022-05-04 22:48 ` [PATCH 092/128] KVM: selftests: Add "arch" to common utils that have arch implementations Sean Christopherson
2022-05-04 22:48 ` [PATCH 093/128] KVM: selftests: Return created vcpu from vm_vcpu_add_default() Sean Christopherson
2022-05-04 22:48 ` [PATCH 094/128] KVM: selftests: Rename vm_vcpu_add* helpers to better show relationships Sean Christopherson
2022-05-04 22:48 ` [PATCH 095/128] KVM: selftests: Convert set_boot_cpu_id away from global VCPU_IDs Sean Christopherson
2022-05-04 22:48 ` [PATCH 096/128] KVM: selftests: Convert psci_cpu_on_test away from VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 097/128] KVM: selftests: Convert hardware_disable_test to pass around vCPU objects Sean Christopherson
2022-05-04 22:48 ` [PATCH 098/128] KVM: selftests: Add VM creation helper that "returns" vCPUs Sean Christopherson
2022-05-04 22:48 ` [PATCH 099/128] KVM: selftests: Convert steal_time away from VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 100/128] KVM: selftests: Convert arch_timer " Sean Christopherson
2022-05-04 22:48 ` [PATCH 101/128] KVM: selftests: Fix typo in vgic_init test Sean Christopherson
2022-05-04 22:48 ` [PATCH 102/128] KVM: selftests: Convert vgic_init away from vm_create_default_with_vcpus() Sean Christopherson
2022-05-04 22:48 ` [PATCH 103/128] KVM: selftests: Convert xapic_ipi_test away from *_VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 104/128] KVM: selftests: Convert sync_regs_test away from VCPU_ID Sean Christopherson
2022-05-04 22:48 ` [PATCH 105/128] KVM: selftests: Convert s390's "resets" test " Sean Christopherson
2022-05-04 22:48 ` [PATCH 106/128] KVM: selftests: Convert memop " Sean Christopherson
2022-05-04 22:48 ` [PATCH 107/128] KVM: selftests: Convert s390x/diag318_test_handler " Sean Christopherson
2022-05-04 22:48 ` [PATCH 108/128] KVM: selftests: Convert tprot " Sean Christopherson
2022-05-04 22:48 ` [PATCH 109/128] KVM: selftests: Use vm_create() in tsc_scaling_sync Sean Christopherson
2022-05-04 22:48 ` [PATCH 110/128] KVM: selftests: Use vm_create_with_vcpus() in max_guest_memory_test Sean Christopherson
2022-05-04 22:48 ` [PATCH 111/128] KVM: selftests: Drop vm_create_default* helpers Sean Christopherson
2022-05-04 22:48 ` [PATCH 112/128] KVM: selftests: Drop @vcpuids param from VM creators Sean Christopherson
2022-05-04 22:48 ` [PATCH 113/128] KVM: selftests: Convert kvm_page_table_test away from reliance on vcpu_id Sean Christopherson
2022-05-04 22:49 ` [PATCH 114/128] KVM: selftests: Convert kvm_binary_stats_test away from vCPU IDs Sean Christopherson
2022-05-04 22:49 ` [PATCH 115/128] KVM: selftests: Convert get-reg-list away from its "VCPU_ID" Sean Christopherson
2022-05-04 22:49 ` [PATCH 116/128] KVM: selftests: Stop hardcoding vCPU IDs in vcpu_width_config Sean Christopherson
2022-05-04 22:49 ` Sean Christopherson [this message]
2022-05-04 22:49 ` [PATCH 118/128] KVM: selftests: Remove vcpu_get() usage from dirty_log_test Sean Christopherson
2022-05-04 22:49 ` [PATCH 119/128] KVM: selftests: Require vCPU output array when creating VM with vCPUs Sean Christopherson
2022-05-04 22:49 ` [PATCH 120/128] KVM: selftests: Purge vm+vcpu_id == vcpu silliness Sean Christopherson
2022-05-04 22:49 ` [PATCH 121/128] KVM: selftests: Drop vcpu_get(), rename vcpu_find() => vcpu_exists() Sean Christopherson
2022-05-04 22:49 ` [PATCH 122/128] KVM: selftests: Remove vcpu_state() helper Sean Christopherson
2022-05-04 22:49 ` [PATCH 123/128] KVM: selftests: Open code and drop 'struct kvm_vm' accessors Sean Christopherson
2022-05-04 22:49 ` [PATCH 124/128] KVM: selftests: Drop @slot0_mem_pages from __vm_create_with_vcpus() Sean Christopherson
2022-05-04 22:49 ` [PATCH 125/128] KVM: selftests: Drop @num_percpu_pages " Sean Christopherson
2022-05-04 22:49 ` [PATCH 126/128] KVM: selftests: Move per-VM/per-vCPU nr pages calculation to __vm_create() Sean Christopherson
2022-05-04 22:49 ` [PATCH 127/128] KVM: selftests: Trust that MAXPHYADDR > memslot0 in vmx_apic_access_test Sean Christopherson
2022-05-04 22:49 ` [PATCH 128/128] KVM: selftests: Drop DEFAULT_GUEST_PHY_PAGES, open code the magic number Sean Christopherson
2022-05-05 14:26 ` [PATCH 000/128] KVM: selftests: Overhaul APIs, purge VCPU_ID Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220504224914.1654036-118-seanjc@google.com \
    --to=seanjc@google.com \
    --cc=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=drjones@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.