KVM Archive on lore.kernel.org
 help / color / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: mtosatti@redhat.com, avi@redhat.com, kvm@vger.kernel.org
Cc: glommer@redhat.com
Subject: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
Date: Fri, 27 Aug 2010 13:49:53 +0800
Message-ID: <20100827054953.7409.25948.stgit@FreeLancer> (raw)
In-Reply-To: <20100827054733.7409.63882.stgit@FreeLancer>

This patch implements two tests for kvmclock. First one check whether
the date of time returned by kvmclock matches the value got from
host. Second one check whether the cycle of kvmclock grows
monotonically in smp guest.

Three parameters were accepted by the test: test loops, seconds
since 1970-01-01 00:00:00 UTC which could be easily get through date
+%s and the max accepted offset value between the tod of guest and
host.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 config-x86-common.mak |    6 ++
 x86/README            |    2 +
 x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg     |    5 ++
 4 files changed, 157 insertions(+), 1 deletions(-)
 create mode 100644 x86/kvmclock_test.c

diff --git a/config-x86-common.mak b/config-x86-common.mak
index b8ca859..b541c1c 100644
--- a/config-x86-common.mak
+++ b/config-x86-common.mak
@@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
 tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
                $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
                $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
-               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
+               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
+               $(TEST_DIR)/kvmclock_test.flat
 
 tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
 
@@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
 
 $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
 
+$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
+                                $(TEST_DIR)/kvmclock_test.o
+
 arch_clean:
 	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
 	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/x86/README b/x86/README
index ab5a2ae..4b90080 100644
--- a/x86/README
+++ b/x86/README
@@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
 smptest: run smp_id() on every cpu and compares return value to number
 tsc: write to tsc(0) and write to tsc(100000000000) and read it back
 vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
+kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
+wallclock
diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
new file mode 100644
index 0000000..cd80915
--- /dev/null
+++ b/x86/kvmclock_test.c
@@ -0,0 +1,145 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "string.h"
+#include "kvmclock.h"
+
+#define DEFAULT_TEST_LOOPS 100000000L
+#define DEFAULT_THRESHOLD  60L
+
+struct test_info {
+        struct spinlock lock;
+        long loops;               /* test loops */
+        u64 warps;                /* warp count */
+        long long worst;          /* worst warp */
+        volatile cycle_t last;    /* last cycle seen by test */
+        atomic_t ncpus;           /* number of cpu in the test*/
+};
+
+struct test_info ti[2];
+
+static int wallclock_test(long sec, long threshold)
+{
+        int i;
+        long ksec, offset;
+        struct timespec ts, ts_last;
+
+        printf("Wallclock test, threshold %ld\n", threshold);
+        kvm_get_wallclock(&ts_last);
+        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
+
+        offset = ksec - sec;
+        printf("Seconds get from host: %ld\n", sec);
+        printf("Seconds get from kvmclock: %ld\n", ksec);
+
+        if (offset > threshold || offset < -threshold) {
+                printf("Seconds get from kvmclock: %ld\n", ksec);
+                return 1;
+        }
+
+        for (i=0; i < 100; i++){
+                kvm_get_wallclock(&ts);
+                if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
+                        printf ("Inconsistent wall clock returned!\n");
+                        return 1;
+                }
+        }
+        return 0;
+}
+
+static void kvm_clock_test(void *data)
+{
+        struct test_info *hv_test_info = (struct test_info *)data;
+        int i;
+
+        for (i = 0; i < hv_test_info->loops; i++){
+                cycle_t t0, t1;
+                long long delta;
+
+                spin_lock(&hv_test_info->lock);
+                t1 = kvm_clock_read();
+                t0 = hv_test_info->last;
+                hv_test_info->last = kvm_clock_read();
+                spin_unlock(&hv_test_info->lock);
+
+                delta = t1 - t0;
+                if (delta < 0){
+                        spin_lock(&hv_test_info->lock);
+                        ++hv_test_info->warps;
+                        if (delta < hv_test_info->worst){
+                                hv_test_info->worst = delta;
+                                printf("Worst warp %lld %\n", hv_test_info->worst);
+                        }
+                        spin_unlock(&hv_test_info->lock);
+                }
+
+                if (!((unsigned long)i & 31))
+                        asm volatile("rep; nop");
+        }
+
+        atomic_dec(&hv_test_info->ncpus);
+}
+
+static int cycle_test(int ncpus, long loops, struct test_info *ti)
+{
+        int i;
+
+        atomic_set(&ti->ncpus, ncpus);
+        ti->loops = loops;
+        for (i = ncpus - 1; i >= 0; i--)
+                on_cpu_async(i, kvm_clock_test, (void *)ti);
+
+        /* Wait for the end of other vcpu */
+        while(atomic_read(&ti->ncpus))
+                ;
+
+        printf("Total vcpus: %d\n", ncpus);
+        printf("Test  loops: %ld\n", ti->loops);
+        printf("Total warps: %lld\n", ti->warps);
+        printf("Worst warp:  %lld\n", ti->worst);
+
+        return ti->warps ? 1 : 0;
+}
+
+int main(int ac, char **av)
+{
+        int ncpus = cpu_count();
+        int nerr = 0, i;
+        long loops = DEFAULT_TEST_LOOPS;
+        long sec = 0;
+        long threshold = DEFAULT_THRESHOLD;
+
+        if (ac > 1)
+                loops = atol(av[1]);
+        if (ac > 2)
+                sec = atol(av[2]);
+        if (ac > 3)
+                threshold = atol(av[3]);
+
+        smp_init();
+
+        if (ncpus > MAX_CPU)
+                ncpus = MAX_CPU;
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_init, (void *)0);
+
+        if (ac > 2)
+                nerr += wallclock_test(sec, threshold);
+
+        printf("Check the stability of raw cycle\n");
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
+                          | PVCLOCK_RAW_CYCLE_BIT);
+        if (cycle_test(ncpus, loops, &ti[1]))
+                printf("Raw cycle is not stable\n");
+        else
+                printf("Raw cycle is stable\n");
+
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+        printf("Monotonic cycle test:\n");
+        nerr += cycle_test(ncpus, loops, &ti[0]);
+
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_clear, (void *)0);
+
+        return nerr > 0 ? 1 : 0;
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 7796e41..a3290cd 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -63,3 +63,8 @@ extra_params = -enable-nesting -cpu qemu64,+svm
 file = svm.flat
 smp = 2
 extra_params = -cpu qemu64,-svm
+
+[kvmclock_test]
+file = kvmclock_test.flat
+smp = 2
+extra_params = --append "10000000 `date +%s`"
\ No newline at end of file


  parent reply index

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 1/6] Introduce memory barriers Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 2/6] Introduce atomic operations Jason Wang
2010-08-27 11:39   ` Glauber Costa
2010-08-29  9:39     ` Avi Kivity
2010-08-27  5:49 ` [PATCH kvm-unit-test 3/6] Export tsc related helpers Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 4/6] Introduce atol() Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver Jason Wang
2010-08-27 11:31   ` Glauber Costa
2010-08-27  5:49 ` Jason Wang [this message]
2010-08-27 11:27   ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Glauber Costa
2010-08-30  3:07     ` Jason Wang
2010-08-27 11:34   ` Glauber Costa
2010-08-30  3:27     ` Jason Wang
2010-08-28  1:58   ` Zachary Amsden
     [not found] <18442408.826301283138927321.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2010-08-30  3:29 ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100827054953.7409.25948.stgit@FreeLancer \
    --to=jasowang@redhat.com \
    --cc=avi@redhat.com \
    --cc=glommer@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

KVM Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/kvm/0 kvm/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 kvm kvm/ https://lore.kernel.org/kvm \
		kvm@vger.kernel.org
	public-inbox-index kvm

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.kvm


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git