All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	linux-kselftest@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Cannon Matthews <cannonmatthews@google.com>,
	Peter Xu <peterx@redhat.com>, Andrew Jones <drjones@redhat.com>,
	Peter Shier <pshier@google.com>, Oliver Upton <oupton@google.com>,
	Ben Gardon <bgardon@google.com>
Subject: [PATCH v4 08/10] KVM: selftests: Time guest demand paging
Date: Thu, 23 Jan 2020 10:04:34 -0800	[thread overview]
Message-ID: <20200123180436.99487-9-bgardon@google.com> (raw)
In-Reply-To: <20200123180436.99487-1-bgardon@google.com>

In order to quantify demand paging performance, time guest execution
during demand paging.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 .../selftests/kvm/demand_paging_test.c        | 68 +++++++++++++++++++
 1 file changed, 68 insertions(+)

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 2002032df32cc..0dc5d04718678 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -32,6 +32,12 @@
 
 #define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */
 
+#ifdef PRINT_PER_PAGE_UPDATES
+#define PER_PAGE_DEBUG(...) DEBUG(__VA_ARGS__)
+#else
+#define PER_PAGE_DEBUG(...)
+#endif
+
 #ifdef PRINT_PER_VCPU_UPDATES
 #define PER_VCPU_DEBUG(...) DEBUG(__VA_ARGS__)
 #else
@@ -64,6 +70,26 @@ static uint64_t guest_test_phys_mem;
  */
 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
 
+int64_t to_ns(struct timespec ts)
+{
+	return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
+}
+
+struct timespec diff(struct timespec start, struct  timespec end)
+{
+	struct   timespec temp;
+
+	if ((end.tv_nsec-start.tv_nsec) < 0) {
+		temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+		temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
+	} else {
+		temp.tv_sec = end.tv_sec - start.tv_sec;
+		temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+	}
+
+	return temp;
+}
+
 struct vcpu_args {
 	uint64_t gva;
 	uint64_t pages;
@@ -109,10 +135,14 @@ static void *vcpu_worker(void *data)
 	struct kvm_vm *vm = args->vm;
 	int vcpu_id = args->vcpu_id;
 	struct kvm_run *run;
+	struct timespec start;
+	struct timespec end;
 
 	vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
 	run = vcpu_state(vm, vcpu_id);
 
+	clock_gettime(CLOCK_MONOTONIC, &start);
+
 	/* Let the guest access its memory */
 	ret = _vcpu_run(vm, vcpu_id);
 	TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
@@ -122,6 +152,11 @@ static void *vcpu_worker(void *data)
 			    exit_reason_str(run->exit_reason));
 	}
 
+	clock_gettime(CLOCK_MONOTONIC, &end);
+	PER_VCPU_DEBUG("vCPU %d execution time: %lld.%.9lds\n", vcpu_id,
+		       (long long)(diff(start, end).tv_sec),
+		       diff(start, end).tv_nsec);
+
 	return NULL;
 }
 
@@ -158,6 +193,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
 static int handle_uffd_page_request(int uffd, uint64_t addr)
 {
 	pid_t tid;
+	struct timespec start;
+	struct timespec end;
 	struct uffdio_copy copy;
 	int r;
 
@@ -168,6 +205,8 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
 	copy.len = host_page_size;
 	copy.mode = 0;
 
+	clock_gettime(CLOCK_MONOTONIC, &start);
+
 	r = ioctl(uffd, UFFDIO_COPY, &copy);
 	if (r == -1) {
 		DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n",
@@ -175,6 +214,13 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
 		return r;
 	}
 
+	clock_gettime(CLOCK_MONOTONIC, &end);
+
+	PER_PAGE_DEBUG("UFFDIO_COPY %d \t%lld ns\n", tid,
+		       (long long)to_ns(diff(start, end)));
+	PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
+		       host_page_size, addr, tid);
+
 	return 0;
 }
 
@@ -193,7 +239,10 @@ static void *uffd_handler_thread_fn(void *arg)
 	int pipefd = uffd_args->pipefd;
 	useconds_t delay = uffd_args->delay;
 	int64_t pages = 0;
+	struct timespec start;
+	struct timespec end;
 
+	clock_gettime(CLOCK_MONOTONIC, &start);
 	while (!quit_uffd_thread) {
 		struct uffd_msg msg;
 		struct pollfd pollfd[2];
@@ -261,6 +310,13 @@ static void *uffd_handler_thread_fn(void *arg)
 		pages++;
 	}
 
+	clock_gettime(CLOCK_MONOTONIC, &end);
+	PER_VCPU_DEBUG("userfaulted %ld pages over %lld.%.9lds. (%f/sec)\n",
+		       pages, (long long)(diff(start, end).tv_sec),
+		       diff(start, end).tv_nsec, pages /
+		       ((double)diff(start, end).tv_sec +
+			(double)diff(start, end).tv_nsec / 100000000.0));
+
 	return NULL;
 }
 
@@ -325,6 +381,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 	uint64_t guest_num_pages;
 	int vcpu_id;
 	int r;
+	struct timespec start;
+	struct timespec end;
 
 	vm = create_vm(mode, vcpus, vcpu_memory_bytes);
 
@@ -449,6 +507,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 
 	DEBUG("Finished creating vCPUs and starting uffd threads\n");
 
+	clock_gettime(CLOCK_MONOTONIC, &start);
+
 	for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
 		pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
 			       &vcpu_args[vcpu_id]);
@@ -464,6 +524,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 
 	DEBUG("All vCPU threads joined\n");
 
+	clock_gettime(CLOCK_MONOTONIC, &end);
+
 	if (use_uffd) {
 		char c;
 
@@ -476,6 +538,12 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 		}
 	}
 
+	DEBUG("Total guest execution time: %lld.%.9lds\n",
+	      (long long)(diff(start, end).tv_sec), diff(start, end).tv_nsec);
+	DEBUG("Overall demand paging rate: %f pgs/sec\n",
+	      guest_num_pages / ((double)diff(start, end).tv_sec +
+	      (double)diff(start, end).tv_nsec / 100000000.0));
+
 	ucall_uninit(vm);
 	kvm_vm_free(vm);
 
-- 
2.25.0.341.g760bfbb309-goog


  parent reply	other threads:[~2020-01-23 18:05 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-23 18:04 [PATCH v4 00/10] Create a userfaultfd demand paging test Ben Gardon
2020-01-23 18:04 ` [PATCH v4 01/10] KVM: selftests: Create a " Ben Gardon
2020-01-24  9:49   ` Andrew Jones
2020-01-27  9:18   ` Thomas Huth
2020-01-23 18:04 ` [PATCH v4 02/10] KVM: selftests: Add demand paging content to the " Ben Gardon
2020-01-23 18:04 ` [PATCH v4 03/10] KVM: selftests: Add configurable demand paging delay Ben Gardon
2020-01-23 18:04 ` [PATCH v4 04/10] KVM: selftests: Add memory size parameter to the demand paging test Ben Gardon
2020-01-24 10:01   ` Andrew Jones
2020-01-23 18:04 ` [PATCH v4 05/10] KVM: selftests: Pass args to vCPU in global vCPU args struct Ben Gardon
2020-01-23 18:04 ` [PATCH v4 06/10] KVM: selftests: Add support for vcpu_args_set to aarch64 and s390x Ben Gardon
2020-01-24  9:03   ` Paolo Bonzini
2020-01-24  9:35     ` Andrew Jones
2020-01-24  9:44       ` Paolo Bonzini
2020-01-24 10:45       ` Andrew Jones
2020-01-24 18:33     ` Christian Borntraeger
2020-01-25  9:34   ` Paolo Bonzini
2020-01-27  8:38     ` Andrew Jones
2020-01-27  9:01   ` Thomas Huth
2020-01-23 18:04 ` [PATCH v4 07/10] KVM: selftests: Support multiple vCPUs in demand paging test Ben Gardon
2020-01-24 10:16   ` Andrew Jones
2020-01-24 10:49   ` Andrew Jones
2020-01-25  9:39     ` Paolo Bonzini
2020-01-27  8:39       ` Andrew Jones
2020-01-23 18:04 ` Ben Gardon [this message]
2020-01-24 10:21   ` [PATCH v4 08/10] KVM: selftests: Time guest demand paging Andrew Jones
2020-01-23 18:04 ` [PATCH v4 09/10] KVM: selftests: Stop memslot creation in KVM internal memslot region Ben Gardon
2020-01-24  8:58   ` Paolo Bonzini
2020-01-24 18:41     ` Ben Gardon
2020-01-25  9:37       ` Paolo Bonzini
2020-01-27 17:28         ` Ben Gardon
2020-01-23 18:04 ` [PATCH v4 10/10] KVM: selftests: Move memslot 0 above KVM internal memslots Ben Gardon
2020-01-24  9:01   ` Paolo Bonzini
2020-01-24 18:53     ` Ben Gardon
2020-01-27  9:42   ` Thomas Huth
2020-01-24  9:03 ` [PATCH v4 00/10] Create a userfaultfd demand paging test Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200123180436.99487-9-bgardon@google.com \
    --to=bgardon@google.com \
    --cc=cannonmatthews@google.com \
    --cc=drjones@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=pshier@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.