From: Peter Xu <peterx@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
Paolo Bonzini <pbonzini@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
peterx@redhat.com, Vitaly Kuznetsov <vkuznets@redhat.com>
Subject: [PATCH RFC 10/15] KVM: selftests: Use a single binary for dirty/clear log test
Date: Fri, 29 Nov 2019 16:35:00 -0500 [thread overview]
Message-ID: <20191129213505.18472-11-peterx@redhat.com> (raw)
In-Reply-To: <20191129213505.18472-1-peterx@redhat.com>
Remove the clear_dirty_log test, instead merge it into the existing
dirty_log_test. It should be cleaner to use this single binary to do
both tests, also it's a preparation for the upcoming dirty ring test.
The default test will still be the dirty_log test. To run the clear
dirty log test, we need to specify "-M clear-log".
Signed-off-by: Peter Xu <peterx@redhat.com>
---
tools/testing/selftests/kvm/Makefile | 2 -
.../selftests/kvm/clear_dirty_log_test.c | 2 -
tools/testing/selftests/kvm/dirty_log_test.c | 131 +++++++++++++++---
3 files changed, 110 insertions(+), 25 deletions(-)
delete mode 100644 tools/testing/selftests/kvm/clear_dirty_log_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 3138a916574a..130a7b1c7ad6 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -26,11 +26,9 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
-TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
-TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/clear_dirty_log_test.c b/tools/testing/selftests/kvm/clear_dirty_log_test.c
deleted file mode 100644
index 749336937d37..000000000000
--- a/tools/testing/selftests/kvm/clear_dirty_log_test.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define USE_CLEAR_DIRTY_LOG
-#include "dirty_log_test.c"
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 3c0ffd34b3b0..a8ae8c0042a8 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -128,6 +128,66 @@ static uint64_t host_dirty_count;
static uint64_t host_clear_count;
static uint64_t host_track_next_count;
+enum log_mode_t {
+ /* Only use KVM_GET_DIRTY_LOG for logging */
+ LOG_MODE_DIRTY_LOG = 0,
+
+ /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
+ LOG_MODE_CLERA_LOG = 1,
+
+ LOG_MODE_NUM,
+};
+
+/* Mode of logging. Default is LOG_MODE_DIRTY_LOG */
+static enum log_mode_t host_log_mode;
+
+static void clear_log_create_vm_done(struct kvm_vm *vm)
+{
+ struct kvm_enable_cap cap = {};
+
+ if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
+ fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
+ exit(KSFT_SKIP);
+ }
+
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
+ cap.args[0] = 1;
+ vm_enable_cap(vm, &cap);
+}
+
+static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages)
+{
+ kvm_vm_get_dirty_log(vm, slot, bitmap);
+}
+
+static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages)
+{
+ kvm_vm_get_dirty_log(vm, slot, bitmap);
+ kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
+}
+
+struct log_mode {
+ const char *name;
+ /* Hook when the vm creation is done (before vcpu creation) */
+ void (*create_vm_done)(struct kvm_vm *vm);
+ /* Hook to collect the dirty pages into the bitmap provided */
+ void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages);
+} log_modes[LOG_MODE_NUM] = {
+ {
+ .name = "dirty-log",
+ .create_vm_done = NULL,
+ .collect_dirty_pages = dirty_log_collect_dirty_pages,
+ },
+ {
+ .name = "clear-log",
+ .create_vm_done = clear_log_create_vm_done,
+ .collect_dirty_pages = clear_log_collect_dirty_pages,
+ },
+};
+
/*
* We use this bitmap to track some pages that should have its dirty
* bit set in the _next_ iteration. For example, if we detected the
@@ -137,6 +197,33 @@ static uint64_t host_track_next_count;
*/
static unsigned long *host_bmap_track;
+static void log_modes_dump(void)
+{
+ int i;
+
+ for (i = 0; i < LOG_MODE_NUM; i++)
+ printf("%s, ", log_modes[i].name);
+ puts("\b\b \b\b");
+}
+
+static void log_mode_create_vm_done(struct kvm_vm *vm)
+{
+ struct log_mode *mode = &log_modes[host_log_mode];
+
+ if (mode->create_vm_done)
+ mode->create_vm_done(vm);
+}
+
+static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ void *bitmap, uint32_t num_pages)
+{
+ struct log_mode *mode = &log_modes[host_log_mode];
+
+ TEST_ASSERT(mode->collect_dirty_pages != NULL,
+ "collect_dirty_pages() is required for any log mode!");
+ mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
+}
+
static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
@@ -257,6 +344,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
#ifdef __x86_64__
vm_create_irqchip(vm);
#endif
+ log_mode_create_vm_done(vm);
vm_vcpu_add_default(vm, vcpuid, guest_code);
return vm;
}
@@ -316,14 +404,6 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
bmap = bitmap_alloc(host_num_pages);
host_bmap_track = bitmap_alloc(host_num_pages);
-#ifdef USE_CLEAR_DIRTY_LOG
- struct kvm_enable_cap cap = {};
-
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = 1;
- vm_enable_cap(vm, &cap);
-#endif
-
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
guest_test_phys_mem,
@@ -364,11 +444,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
while (iteration < iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(interval * 1000);
- kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
-#ifdef USE_CLEAR_DIRTY_LOG
- kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- host_num_pages);
-#endif
+ log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
+ bmap, host_num_pages);
vm_dirty_log_verify(bmap);
iteration++;
sync_global_to_guest(vm, iteration);
@@ -413,6 +490,9 @@ static void help(char *name)
TEST_HOST_LOOP_INTERVAL);
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
+ printf(" -M: specify the host logging mode "
+ "(default: log-dirty). Supported modes: \n\t");
+ log_modes_dump();
printf(" -m: specify the guest mode ID to test "
"(default: test all supported modes)\n"
" This option may be used multiple times.\n"
@@ -437,13 +517,6 @@ int main(int argc, char *argv[])
unsigned int host_ipa_limit;
#endif
-#ifdef USE_CLEAR_DIRTY_LOG
- if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
- fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
- exit(KSFT_SKIP);
- }
-#endif
-
#ifdef __x86_64__
vm_guest_mode_params_init(VM_MODE_PXXV48_4K, true, true);
#endif
@@ -463,7 +536,7 @@ int main(int argc, char *argv[])
vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
#endif
- while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
+ while ((opt = getopt(argc, argv, "hi:I:p:m:M:")) != -1) {
switch (opt) {
case 'i':
iterations = strtol(optarg, NULL, 10);
@@ -485,6 +558,22 @@ int main(int argc, char *argv[])
"Guest mode ID %d too big", mode);
vm_guest_mode_params[mode].enabled = true;
break;
+ case 'M':
+ for (i = 0; i < LOG_MODE_NUM; i++) {
+ if (!strcmp(optarg, log_modes[i].name)) {
+ DEBUG("Setting log mode to: '%s'\n",
+ optarg);
+ host_log_mode = i;
+ break;
+ }
+ }
+ if (i == LOG_MODE_NUM) {
+ printf("Log mode '%s' is invalid. "
+ "Please choose from: ", optarg);
+ log_modes_dump();
+ exit(-1);
+ }
+ break;
case 'h':
default:
help(argv[0]);
--
2.21.0
next prev parent reply other threads:[~2019-11-29 21:36 UTC|newest]
Thread overview: 121+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-11-29 21:34 [PATCH RFC 00/15] KVM: Dirty ring interface Peter Xu
2019-11-29 21:34 ` [PATCH RFC 01/15] KVM: Move running VCPU from ARM to common code Peter Xu
2019-12-03 19:01 ` Sean Christopherson
2019-12-04 9:42 ` Paolo Bonzini
2019-12-09 22:05 ` Peter Xu
2019-11-29 21:34 ` [PATCH RFC 02/15] KVM: Add kvm/vcpu argument to mark_dirty_page_in_slot Peter Xu
2019-12-02 19:32 ` Sean Christopherson
2019-12-02 20:49 ` Peter Xu
2019-11-29 21:34 ` [PATCH RFC 03/15] KVM: Add build-time error check on kvm_run size Peter Xu
2019-12-02 19:30 ` Sean Christopherson
2019-12-02 20:53 ` Peter Xu
2019-12-02 22:19 ` Sean Christopherson
2019-12-02 22:40 ` Peter Xu
2019-12-03 5:50 ` Sean Christopherson
2019-12-03 13:41 ` Paolo Bonzini
2019-12-03 17:04 ` Peter Xu
2019-11-29 21:34 ` [PATCH RFC 04/15] KVM: Implement ring-based dirty memory tracking Peter Xu
2019-12-02 20:10 ` Sean Christopherson
2019-12-02 21:16 ` Peter Xu
2019-12-02 21:50 ` Sean Christopherson
2019-12-02 23:09 ` Peter Xu
2019-12-03 13:48 ` Paolo Bonzini
2019-12-03 18:46 ` Sean Christopherson
2019-12-04 10:05 ` Paolo Bonzini
2019-12-07 0:29 ` Sean Christopherson
2019-12-09 9:37 ` Paolo Bonzini
2019-12-09 21:54 ` Peter Xu
2019-12-10 10:07 ` Paolo Bonzini
2019-12-10 15:52 ` Peter Xu
2019-12-10 17:09 ` Paolo Bonzini
2019-12-15 17:21 ` Peter Xu
2019-12-16 10:08 ` Paolo Bonzini
2019-12-16 18:54 ` Peter Xu
2019-12-17 9:01 ` Paolo Bonzini
2019-12-17 16:24 ` Peter Xu
2019-12-17 16:28 ` Paolo Bonzini
2019-12-18 21:58 ` Peter Xu
2019-12-18 22:24 ` Sean Christopherson
2019-12-18 22:37 ` Paolo Bonzini
2019-12-18 22:49 ` Peter Xu
2019-12-17 2:28 ` Tian, Kevin
2019-12-17 16:18 ` Alex Williamson
2019-12-17 16:30 ` Paolo Bonzini
2019-12-18 0:29 ` Tian, Kevin
[not found] ` <AADFC41AFE54684AB9EE6CBC0274A5D19D645E5F@SHSMSX104.ccr.corp.intel.com>
2019-12-17 5:17 ` Tian, Kevin
2019-12-17 5:25 ` Yan Zhao
2019-12-17 16:24 ` Alex Williamson
2019-12-03 19:13 ` Sean Christopherson
2019-12-04 10:14 ` Paolo Bonzini
2019-12-04 14:33 ` Sean Christopherson
2019-12-04 10:38 ` Jason Wang
2019-12-04 11:04 ` Paolo Bonzini
2019-12-04 19:52 ` Peter Xu
2019-12-05 6:51 ` Jason Wang
2019-12-05 12:08 ` Peter Xu
2019-12-05 13:12 ` Jason Wang
2019-12-10 13:25 ` Michael S. Tsirkin
2019-12-10 13:31 ` Paolo Bonzini
2019-12-10 16:02 ` Peter Xu
2019-12-10 21:53 ` Michael S. Tsirkin
2019-12-11 9:05 ` Paolo Bonzini
2019-12-11 13:04 ` Michael S. Tsirkin
2019-12-11 14:54 ` Peter Xu
2019-12-10 21:48 ` Michael S. Tsirkin
2019-12-11 12:53 ` Michael S. Tsirkin
2019-12-11 14:14 ` Paolo Bonzini
2019-12-11 20:59 ` Peter Xu
2019-12-11 22:57 ` Michael S. Tsirkin
2019-12-12 0:08 ` Paolo Bonzini
2019-12-12 7:36 ` Michael S. Tsirkin
2019-12-12 8:12 ` Paolo Bonzini
2019-12-12 10:38 ` Michael S. Tsirkin
2019-12-15 17:33 ` Peter Xu
2019-12-16 9:47 ` Michael S. Tsirkin
2019-12-16 15:07 ` Peter Xu
2019-12-16 15:33 ` Michael S. Tsirkin
2019-12-16 15:47 ` Peter Xu
2019-12-11 17:24 ` Christophe de Dinechin
2019-12-13 20:23 ` Peter Xu
2019-12-14 7:57 ` Paolo Bonzini
2019-12-14 16:26 ` Peter Xu
2019-12-16 9:29 ` Paolo Bonzini
2019-12-16 15:26 ` Peter Xu
2019-12-16 15:31 ` Paolo Bonzini
2019-12-16 15:43 ` Peter Xu
2019-12-17 12:16 ` Christophe de Dinechin
2019-12-17 12:19 ` Paolo Bonzini
2019-12-17 15:38 ` Peter Xu
2019-12-17 16:31 ` Paolo Bonzini
2019-12-17 16:42 ` Peter Xu
2019-12-17 16:48 ` Paolo Bonzini
2019-12-17 19:41 ` Peter Xu
2019-12-18 0:33 ` Paolo Bonzini
2019-12-18 16:32 ` Peter Xu
2019-12-18 16:41 ` Paolo Bonzini
2019-12-20 18:19 ` Peter Xu
2019-11-29 21:34 ` [PATCH RFC 05/15] KVM: Make dirty ring exclusive to dirty bitmap log Peter Xu
2019-11-29 21:34 ` [PATCH RFC 06/15] KVM: Introduce dirty ring wait queue Peter Xu
2019-11-29 21:34 ` [PATCH RFC 07/15] KVM: X86: Implement ring-based dirty memory tracking Peter Xu
2019-11-29 21:34 ` [PATCH RFC 08/15] KVM: selftests: Always clear dirty bitmap after iteration Peter Xu
2019-11-29 21:34 ` [PATCH RFC 09/15] KVM: selftests: Sync uapi/linux/kvm.h to tools/ Peter Xu
2019-11-29 21:35 ` Peter Xu [this message]
2019-11-29 21:35 ` [PATCH RFC 11/15] KVM: selftests: Introduce after_vcpu_run hook for dirty log test Peter Xu
2019-11-29 21:35 ` [PATCH RFC 12/15] KVM: selftests: Add dirty ring buffer test Peter Xu
2019-11-29 21:35 ` [PATCH RFC 13/15] KVM: selftests: Let dirty_log_test async for dirty ring test Peter Xu
2019-11-29 21:35 ` [PATCH RFC 14/15] KVM: selftests: Add "-c" parameter to dirty log test Peter Xu
2019-11-29 21:35 ` [PATCH RFC 15/15] KVM: selftests: Test dirty ring waitqueue Peter Xu
2019-11-30 8:29 ` [PATCH RFC 00/15] KVM: Dirty ring interface Paolo Bonzini
2019-12-02 2:13 ` Peter Xu
2019-12-03 13:59 ` Paolo Bonzini
2019-12-05 19:30 ` Peter Xu
2019-12-05 19:59 ` Paolo Bonzini
2019-12-05 20:52 ` Peter Xu
2019-12-02 20:21 ` Sean Christopherson
2019-12-02 20:43 ` Peter Xu
2019-12-04 10:39 ` Jason Wang
2019-12-04 19:33 ` Peter Xu
2019-12-05 6:49 ` Jason Wang
2019-12-11 13:41 ` Christophe de Dinechin
2019-12-11 14:16 ` Paolo Bonzini
2019-12-11 17:15 ` Peter Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191129213505.18472-11-peterx@redhat.com \
--to=peterx@redhat.com \
--cc=dgilbert@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=sean.j.christopherson@intel.com \
--cc=vkuznets@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).