kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH kvm-unit-test 0/6] Kvmclock test
@ 2010-08-27  5:49 Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 1/6] Introduce memory barriers Jason Wang
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

The following series implements test of kvmlock.

---

Jason Wang (6):
      Introduce memory barriers.
      Introduce atomic operations
      Export tsc related helpers
      Introduce atol()
      Add a simple kvmclock driver
      Add a test for kvm-clock


 config-x86-common.mak |    7 ++
 lib/string.c          |   31 +++++++++
 lib/x86/atomic.c      |   38 +++++++++++
 lib/x86/atomic.h      |  164 ++++++++++++++++++++++++++++++++++++++++++++++++
 lib/x86/processor.h   |   22 ++++++
 lib/x86/smp.h         |    4 +
 x86/README            |    2 +
 x86/kvmclock.c        |  166 +++++++++++++++++++++++++++++++++++++++++++++++++
 x86/kvmclock.h        |   53 ++++++++++++++++
 x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++
 x86/tsc.c             |   16 -----
 x86/unittests.cfg     |    5 +
 x86/vmexit.c          |   15 ----
 13 files changed, 637 insertions(+), 31 deletions(-)
 create mode 100644 lib/x86/atomic.c
 create mode 100644 lib/x86/atomic.h
 create mode 100644 x86/kvmclock.c
 create mode 100644 x86/kvmclock.h
 create mode 100644 x86/kvmclock_test.c

-- 
Jason Wang

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 1/6] Introduce memory barriers.
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 2/6] Introduce atomic operations Jason Wang
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 lib/x86/smp.h |    4 ++++
 1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/lib/x86/smp.h b/lib/x86/smp.h
index c2e7350..df5fdba 100644
--- a/lib/x86/smp.h
+++ b/lib/x86/smp.h
@@ -1,6 +1,10 @@
 #ifndef __SMP_H
 #define __SMP_H
 
+#define mb() 	asm volatile("mfence":::"memory")
+#define rmb()	asm volatile("lfence":::"memory")
+#define wmb()	asm volatile("sfence" ::: "memory")
+
 struct spinlock {
     int v;
 };


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 2/6] Introduce atomic operations
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 1/6] Introduce memory barriers Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27 11:39   ` Glauber Costa
  2010-08-27  5:49 ` [PATCH kvm-unit-test 3/6] Export tsc related helpers Jason Wang
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 config-x86-common.mak |    1 
 lib/x86/atomic.c      |   38 +++++++++++
 lib/x86/atomic.h      |  164 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 203 insertions(+), 0 deletions(-)
 create mode 100644 lib/x86/atomic.c
 create mode 100644 lib/x86/atomic.h

diff --git a/config-x86-common.mak b/config-x86-common.mak
index fdb9e3e..b8ca859 100644
--- a/config-x86-common.mak
+++ b/config-x86-common.mak
@@ -10,6 +10,7 @@ cflatobjs += \
 
 cflatobjs += lib/x86/fwcfg.o
 cflatobjs += lib/x86/apic.o
+cflatobjs += lib/x86/atomic.o
 
 $(libcflat): LDFLAGS += -nostdlib
 $(libcflat): CFLAGS += -ffreestanding -I lib
diff --git a/lib/x86/atomic.c b/lib/x86/atomic.c
new file mode 100644
index 0000000..6697e61
--- /dev/null
+++ b/lib/x86/atomic.c
@@ -0,0 +1,38 @@
+#include <libcflat.h>
+#include "atomic.h"
+
+#ifdef __i386__
+
+u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
+{
+        u32 low = new;
+        u32 high = new >> 32;
+
+        asm volatile("lock cmpxchg8b %1\n"
+                     : "+A" (old),
+                       "+m" (*(volatile long long *)&v->counter)
+                     : "b" (low), "c" (high)
+                     : "memory"
+                     );
+
+        return old;
+}
+
+#else
+
+u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
+{
+        u64 ret;
+        u64 _old = old;
+        u64 _new = new;
+
+        asm volatile("lock cmpxchgq %1,%2"
+                     : "=a" (ret)
+                     : "r" (_new),
+                       "m" (*(volatile long *)&v->counter), "0"(_old)
+                     : "memory"
+                     );
+        return ret;
+}
+
+#endif
diff --git a/lib/x86/atomic.h b/lib/x86/atomic.h
new file mode 100644
index 0000000..de2f033
--- /dev/null
+++ b/lib/x86/atomic.h
@@ -0,0 +1,164 @@
+#ifndef __ATOMIC_H
+#define __ATOMIC_H
+
+typedef struct {
+	volatile int counter;
+} atomic_t;
+
+#ifdef __i386__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read(const atomic_t *v)
+{
+	return v->counter;
+}
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+	v->counter = i;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc(atomic_t *v)
+{
+	asm volatile("lock incl %0"
+		     : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic_dec(atomic_t *v)
+{
+	asm volatile("lock decl %0"
+		     : "+m" (v->counter));
+}
+
+typedef struct {
+	u64 __attribute__((aligned(8))) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(val)	{ (val) }
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @ptr:      pointer to type atomic64_t
+ *
+ * Atomically reads the value of @ptr and returns it.
+ */
+static inline u64 atomic64_read(atomic64_t *ptr)
+{
+	u64 res;
+
+	/*
+	 * Note, we inline this atomic64_t primitive because
+	 * it only clobbers EAX/EDX and leaves the others
+	 * untouched. We also (somewhat subtly) rely on the
+	 * fact that cmpxchg8b returns the current 64-bit value
+	 * of the memory location we are touching:
+	 */
+	asm volatile("mov %%ebx, %%eax\n\t"
+                     "mov %%ecx, %%edx\n\t"
+                     "lock cmpxchg8b %1\n"
+                     : "=&A" (res)
+                     : "m" (*ptr)
+                     );
+	return res;
+}
+
+u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new);
+
+#elif defined(__x86_64__)
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read(const atomic_t *v)
+{
+	return v->counter;
+}
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+	v->counter = i;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc(atomic_t *v)
+{
+	asm volatile("lock incl %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic_dec(atomic_t *v)
+{
+	asm volatile("lock decl %0"
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+typedef struct {
+	long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+static inline long atomic64_read(const atomic64_t *v)
+{
+	return v->counter;
+}
+
+u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new);
+
+#endif
+
+#endif


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 3/6] Export tsc related helpers
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 1/6] Introduce memory barriers Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 2/6] Introduce atomic operations Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 4/6] Introduce atol() Jason Wang
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 lib/x86/processor.h |   22 ++++++++++++++++++++++
 x86/tsc.c           |   16 +---------------
 x86/vmexit.c        |   15 ---------------
 3 files changed, 23 insertions(+), 30 deletions(-)

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 67d7ca5..c348808 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -258,4 +258,26 @@ static inline void sti(void)
     asm volatile ("sti");
 }
 
+static inline unsigned long long rdtsc()
+{
+	long long r;
+
+#ifdef __x86_64__
+	unsigned a, d;
+
+	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
+	r = a | ((long long)d << 32);
+#else
+	asm volatile ("rdtsc" : "=A"(r));
+#endif
+	return r;
+}
+
+static inline void wrtsc(u64 tsc)
+{
+	unsigned a = tsc, d = tsc >> 32;
+
+	asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
+}
+
 #endif
diff --git a/x86/tsc.c b/x86/tsc.c
index 394a9c6..58f332d 100644
--- a/x86/tsc.c
+++ b/x86/tsc.c
@@ -1,19 +1,5 @@
 #include "libcflat.h"
-
-u64 rdtsc(void)
-{
-	unsigned a, d;
-
-	asm volatile("rdtsc" : "=a"(a), "=d"(d));
-	return a | (u64)d << 32;
-}
-
-void wrtsc(u64 tsc)
-{
-	unsigned a = tsc, d = tsc >> 32;
-
-	asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
-}
+#include "processor.h"
 
 void test_wrtsc(u64 t1)
 {
diff --git a/x86/vmexit.c b/x86/vmexit.c
index 84c384d..551083d 100644
--- a/x86/vmexit.c
+++ b/x86/vmexit.c
@@ -3,21 +3,6 @@
 #include "smp.h"
 #include "processor.h"
 
-static inline unsigned long long rdtsc()
-{
-	long long r;
-
-#ifdef __x86_64__
-	unsigned a, d;
-
-	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
-	r = a | ((long long)d << 32);
-#else
-	asm volatile ("rdtsc" : "=A"(r));
-#endif
-	return r;
-}
-
 static unsigned int inl(unsigned short port)
 {
     unsigned int val;


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 4/6] Introduce atol()
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
                   ` (2 preceding siblings ...)
  2010-08-27  5:49 ` [PATCH kvm-unit-test 3/6] Export tsc related helpers Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver Jason Wang
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  5 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 lib/string.c |   31 +++++++++++++++++++++++++++++++
 1 files changed, 31 insertions(+), 0 deletions(-)

diff --git a/lib/string.c b/lib/string.c
index acac3c0..1f19f5c 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -30,3 +30,34 @@ void *memset(void *s, int c, size_t n)
 
     return s;
 }
+
+long atol(const char *ptr)
+{
+    long acc = 0;
+    const char *s = ptr;
+    int neg, c;
+
+    while (*s == ' ' || *s == '\t')
+        s++;
+    if (*s == '-'){
+        neg = 1;
+        s++;
+    } else {
+        neg = 0;
+        if (*s == '+')
+            s++;
+    }
+
+    while (*s) {
+        if (*s < '0' || *s > '9')
+            break;
+        c = *s - '0';
+        acc = acc * 10 + c;
+        s++;
+    }
+
+    if (neg)
+        acc = -acc;
+
+    return acc;
+}


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
                   ` (3 preceding siblings ...)
  2010-08-27  5:49 ` [PATCH kvm-unit-test 4/6] Introduce atol() Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27 11:31   ` Glauber Costa
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  5 siblings, 1 reply; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

Most of the codes were borrowed from arxh/x86/kernel/kvmclock.c. A
special bit: PV_CLOCK_CYCLE_RAW_TEST_BIT is used to notify the driver
to return unadjusted cycles.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 x86/kvmclock.c |  166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 x86/kvmclock.h |   53 ++++++++++++++++++
 2 files changed, 219 insertions(+), 0 deletions(-)
 create mode 100644 x86/kvmclock.c
 create mode 100644 x86/kvmclock.h

diff --git a/x86/kvmclock.c b/x86/kvmclock.c
new file mode 100644
index 0000000..6bfc858
--- /dev/null
+++ b/x86/kvmclock.c
@@ -0,0 +1,166 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "processor.h"
+#include "kvmclock.h"
+
+#define unlikely(x)	__builtin_expect(!!(x), 0)
+
+struct pvclock_vcpu_time_info hv_clock[MAX_CPU];
+struct pvclock_wall_clock wall_clock;
+static unsigned char valid_flags = 0;
+static atomic64_t last_value = ATOMIC64_INIT(0);
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+	u64 product;
+#ifdef __i386__
+	u32 tmp1, tmp2;
+#endif
+
+	if (shift < 0)
+		delta >>= -shift;
+	else
+		delta <<= shift;
+
+#ifdef __i386__
+	__asm__ (
+		"mul  %5       ; "
+		"mov  %4,%%eax ; "
+		"mov  %%edx,%4 ; "
+		"mul  %5       ; "
+		"xor  %5,%5    ; "
+		"add  %4,%%eax ; "
+		"adc  %5,%%edx ; "
+		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
+		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif defined(__x86_64__)
+	__asm__ (
+		"mul %%rdx ; shrd $32,%%rdx,%%rax"
+		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+	return product;
+}
+
+static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
+{
+	u64 delta = rdtsc() - shadow->tsc_timestamp;
+	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+/*
+ * Reads a consistent set of time-base values from hypervisor,
+ * into a shadow data area.
+ */
+static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
+					struct pvclock_vcpu_time_info *src)
+{
+	do {
+		dst->version = src->version;
+		rmb();		/* fetch version before data */
+		dst->tsc_timestamp     = src->tsc_timestamp;
+		dst->system_timestamp  = src->system_time;
+		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
+		dst->tsc_shift         = src->tsc_shift;
+		dst->flags             = src->flags;
+		rmb();		/* test version after fetching data */
+	} while ((src->version & 1) || (dst->version != src->version));
+
+	return dst->version;
+}
+
+cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+{
+	struct pvclock_shadow_time shadow;
+	unsigned version;
+	cycle_t ret, offset;
+	u64 last;
+
+	do {
+		version = pvclock_get_time_values(&shadow, src);
+		barrier();
+		offset = pvclock_get_nsec_offset(&shadow);
+		ret = shadow.system_timestamp + offset;
+		barrier();
+	} while (version != src->version);
+
+	if ((valid_flags & PVCLOCK_RAW_CYCLE_BIT) ||
+            ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
+             (shadow.flags & PVCLOCK_TSC_STABLE_BIT)))
+                return ret;
+
+	/*
+	 * Assumption here is that last_value, a global accumulator, always goes
+	 * forward. If we are less than that, we should not be much smaller.
+	 * We assume there is an error marging we're inside, and then the correction
+	 * does not sacrifice accuracy.
+	 *
+	 * For reads: global may have changed between test and return,
+	 * but this means someone else updated poked the clock at a later time.
+	 * We just need to make sure we are not seeing a backwards event.
+	 *
+	 * For updates: last_value = ret is not enough, since two vcpus could be
+	 * updating at the same time, and one of them could be slightly behind,
+	 * making the assumption that last_value always go forward fail to hold.
+	 */
+	last = atomic64_read(&last_value);
+	do {
+		if (ret < last)
+			return last;
+		last = atomic64_cmpxchg(&last_value, last, ret);
+	} while (unlikely(last != ret));
+
+	return ret;
+}
+
+cycle_t kvm_clock_read()
+{
+        struct pvclock_vcpu_time_info *src;
+        cycle_t ret;
+        int index = smp_id();
+
+        src = &hv_clock[index];
+        ret = pvclock_clocksource_read(src);
+        return ret;
+}
+
+void kvm_clock_init(void *data)
+{
+        int index = smp_id();
+        struct pvclock_vcpu_time_info *hvc = &hv_clock[index];
+
+        printf("kvm-clock: cpu %d, msr 0x:%lx \n", index, hvc);
+        wrmsr(MSR_KVM_SYSTEM_TIME, (unsigned long)hvc | 1);
+}
+
+void kvm_clock_clear(void *data)
+{
+        wrmsr(MSR_KVM_SYSTEM_TIME, 0LL);
+}
+
+void kvm_get_wallclock(struct timespec *ts)
+{
+        u32 version;
+        wrmsr(MSR_KVM_WALL_CLOCK, (u64)&wall_clock);
+
+        do {
+                version = wall_clock.version;
+                rmb();		/* fetch version before time */
+                ts->sec = wall_clock.sec;
+                ts->nsec = wall_clock.nsec;
+                rmb();		/* fetch time before checking version */
+        } while ((wall_clock.version & 1) || (version != wall_clock.version));
+
+}
+
+void pvclock_set_flags(unsigned char flags)
+{
+        valid_flags = flags;
+}
diff --git a/x86/kvmclock.h b/x86/kvmclock.h
new file mode 100644
index 0000000..e3a1c02
--- /dev/null
+++ b/x86/kvmclock.h
@@ -0,0 +1,53 @@
+#ifndef KVMCLOCK_H
+#define KVMCLOCK_H
+
+#define MSR_KVM_WALL_CLOCK  0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define MAX_CPU 4
+
+#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_RAW_CYCLE_BIT (1 << 7) /* Get raw cycle */
+
+# define NSEC_PER_SEC			1000000000ULL
+
+typedef u64 cycle_t;
+
+struct pvclock_vcpu_time_info {
+        u32   version;
+        u32   pad0;
+        u64   tsc_timestamp;
+        u64   system_time;
+        u32   tsc_to_system_mul;
+        signed char    tsc_shift;
+        u8    flags;
+        u8    pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+	u32   version;
+	u32   sec;
+	u32   nsec;
+} __attribute__((__packed__));
+
+struct pvclock_shadow_time {
+        u64 tsc_timestamp;     /* TSC at last update of time vals.  */
+        u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
+        u32 tsc_to_nsec_mul;
+        int tsc_shift;
+        u32 version;
+        u8  flags;
+};
+
+struct timespec {
+        u32   sec;
+        u32   nsec;
+};
+
+void pvclock_set_flags(unsigned char flags);
+cycle_t kvm_clock_read();
+void kvm_get_wallclock(struct timespec *ts);
+void kvm_clock_init(void *data);
+void kvm_clock_clear(void *data);
+
+#endif


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
                   ` (4 preceding siblings ...)
  2010-08-27  5:49 ` [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27 11:27   ` Glauber Costa
                     ` (2 more replies)
  5 siblings, 3 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

This patch implements two tests for kvmclock. First one check whether
the date of time returned by kvmclock matches the value got from
host. Second one check whether the cycle of kvmclock grows
monotonically in smp guest.

Three parameters were accepted by the test: test loops, seconds
since 1970-01-01 00:00:00 UTC which could be easily get through date
+%s and the max accepted offset value between the tod of guest and
host.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 config-x86-common.mak |    6 ++
 x86/README            |    2 +
 x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg     |    5 ++
 4 files changed, 157 insertions(+), 1 deletions(-)
 create mode 100644 x86/kvmclock_test.c

diff --git a/config-x86-common.mak b/config-x86-common.mak
index b8ca859..b541c1c 100644
--- a/config-x86-common.mak
+++ b/config-x86-common.mak
@@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
 tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
                $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
                $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
-               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
+               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
+               $(TEST_DIR)/kvmclock_test.flat
 
 tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
 
@@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
 
 $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
 
+$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
+                                $(TEST_DIR)/kvmclock_test.o
+
 arch_clean:
 	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
 	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/x86/README b/x86/README
index ab5a2ae..4b90080 100644
--- a/x86/README
+++ b/x86/README
@@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
 smptest: run smp_id() on every cpu and compares return value to number
 tsc: write to tsc(0) and write to tsc(100000000000) and read it back
 vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
+kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
+wallclock
diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
new file mode 100644
index 0000000..cd80915
--- /dev/null
+++ b/x86/kvmclock_test.c
@@ -0,0 +1,145 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "string.h"
+#include "kvmclock.h"
+
+#define DEFAULT_TEST_LOOPS 100000000L
+#define DEFAULT_THRESHOLD  60L
+
+struct test_info {
+        struct spinlock lock;
+        long loops;               /* test loops */
+        u64 warps;                /* warp count */
+        long long worst;          /* worst warp */
+        volatile cycle_t last;    /* last cycle seen by test */
+        atomic_t ncpus;           /* number of cpu in the test*/
+};
+
+struct test_info ti[2];
+
+static int wallclock_test(long sec, long threshold)
+{
+        int i;
+        long ksec, offset;
+        struct timespec ts, ts_last;
+
+        printf("Wallclock test, threshold %ld\n", threshold);
+        kvm_get_wallclock(&ts_last);
+        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
+
+        offset = ksec - sec;
+        printf("Seconds get from host: %ld\n", sec);
+        printf("Seconds get from kvmclock: %ld\n", ksec);
+
+        if (offset > threshold || offset < -threshold) {
+                printf("Seconds get from kvmclock: %ld\n", ksec);
+                return 1;
+        }
+
+        for (i=0; i < 100; i++){
+                kvm_get_wallclock(&ts);
+                if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
+                        printf ("Inconsistent wall clock returned!\n");
+                        return 1;
+                }
+        }
+        return 0;
+}
+
+static void kvm_clock_test(void *data)
+{
+        struct test_info *hv_test_info = (struct test_info *)data;
+        int i;
+
+        for (i = 0; i < hv_test_info->loops; i++){
+                cycle_t t0, t1;
+                long long delta;
+
+                spin_lock(&hv_test_info->lock);
+                t1 = kvm_clock_read();
+                t0 = hv_test_info->last;
+                hv_test_info->last = kvm_clock_read();
+                spin_unlock(&hv_test_info->lock);
+
+                delta = t1 - t0;
+                if (delta < 0){
+                        spin_lock(&hv_test_info->lock);
+                        ++hv_test_info->warps;
+                        if (delta < hv_test_info->worst){
+                                hv_test_info->worst = delta;
+                                printf("Worst warp %lld %\n", hv_test_info->worst);
+                        }
+                        spin_unlock(&hv_test_info->lock);
+                }
+
+                if (!((unsigned long)i & 31))
+                        asm volatile("rep; nop");
+        }
+
+        atomic_dec(&hv_test_info->ncpus);
+}
+
+static int cycle_test(int ncpus, long loops, struct test_info *ti)
+{
+        int i;
+
+        atomic_set(&ti->ncpus, ncpus);
+        ti->loops = loops;
+        for (i = ncpus - 1; i >= 0; i--)
+                on_cpu_async(i, kvm_clock_test, (void *)ti);
+
+        /* Wait for the end of other vcpu */
+        while(atomic_read(&ti->ncpus))
+                ;
+
+        printf("Total vcpus: %d\n", ncpus);
+        printf("Test  loops: %ld\n", ti->loops);
+        printf("Total warps: %lld\n", ti->warps);
+        printf("Worst warp:  %lld\n", ti->worst);
+
+        return ti->warps ? 1 : 0;
+}
+
+int main(int ac, char **av)
+{
+        int ncpus = cpu_count();
+        int nerr = 0, i;
+        long loops = DEFAULT_TEST_LOOPS;
+        long sec = 0;
+        long threshold = DEFAULT_THRESHOLD;
+
+        if (ac > 1)
+                loops = atol(av[1]);
+        if (ac > 2)
+                sec = atol(av[2]);
+        if (ac > 3)
+                threshold = atol(av[3]);
+
+        smp_init();
+
+        if (ncpus > MAX_CPU)
+                ncpus = MAX_CPU;
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_init, (void *)0);
+
+        if (ac > 2)
+                nerr += wallclock_test(sec, threshold);
+
+        printf("Check the stability of raw cycle\n");
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
+                          | PVCLOCK_RAW_CYCLE_BIT);
+        if (cycle_test(ncpus, loops, &ti[1]))
+                printf("Raw cycle is not stable\n");
+        else
+                printf("Raw cycle is stable\n");
+
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+        printf("Monotonic cycle test:\n");
+        nerr += cycle_test(ncpus, loops, &ti[0]);
+
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_clear, (void *)0);
+
+        return nerr > 0 ? 1 : 0;
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 7796e41..a3290cd 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -63,3 +63,8 @@ extra_params = -enable-nesting -cpu qemu64,+svm
 file = svm.flat
 smp = 2
 extra_params = -cpu qemu64,-svm
+
+[kvmclock_test]
+file = kvmclock_test.flat
+smp = 2
+extra_params = --append "10000000 `date +%s`"
\ No newline at end of file


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
@ 2010-08-27 11:27   ` Glauber Costa
  2010-08-30  3:07     ` Jason Wang
  2010-08-27 11:34   ` Glauber Costa
  2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 1 reply; 16+ messages in thread
From: Glauber Costa @ 2010-08-27 11:27 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
> 
> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
Good.

I liked the flag usage. Might help us in the future, when we apply
zach's series plus a couple of ideas we have, to see if it the
problem indeed goes away.

A minor tip, not strong feelings towards this, would be to extract
some information from host cpu, and print it too.
It would be easier when we're analyzing this reports in the future.

tsc-based mechanisms are very sensible to:
 - vendor
 - # of cpus
 - # of sockets
 - tsc flags

Sure we can get all this information from /proc/cpuinfo, but having it
in your final report automatically would be convenient, I think.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver
  2010-08-27  5:49 ` [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver Jason Wang
@ 2010-08-27 11:31   ` Glauber Costa
  0 siblings, 0 replies; 16+ messages in thread
From: Glauber Costa @ 2010-08-27 11:31 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:45PM +0800, Jason Wang wrote:
> +#define unlikely(x)	__builtin_expect(!!(x), 0)
> +
> +struct pvclock_vcpu_time_info hv_clock[MAX_CPU];
this structure have to be 4-byte aligned. Let the compiler
help you guaranteeing it here.

> +#define MAX_CPU 4
> +
Any particular reason for the number 4? I'd like
to see these tests running in really big boxes,
where things get really interesting.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  2010-08-27 11:27   ` Glauber Costa
@ 2010-08-27 11:34   ` Glauber Costa
  2010-08-30  3:27     ` Jason Wang
  2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 1 reply; 16+ messages in thread
From: Glauber Costa @ 2010-08-27 11:34 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
> 
> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  config-x86-common.mak |    6 ++
>  x86/README            |    2 +
>  x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
>  x86/unittests.cfg     |    5 ++
>  4 files changed, 157 insertions(+), 1 deletions(-)
>  create mode 100644 x86/kvmclock_test.c
> 
> diff --git a/config-x86-common.mak b/config-x86-common.mak
> index b8ca859..b541c1c 100644
> --- a/config-x86-common.mak
> +++ b/config-x86-common.mak
> @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
>  tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>                 $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
>                 $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> +               $(TEST_DIR)/kvmclock_test.flat
>  
>  tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
>  
> @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
>  
>  $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
>  
> +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
> +                                $(TEST_DIR)/kvmclock_test.o
> +
>  arch_clean:
>  	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
>  	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> diff --git a/x86/README b/x86/README
> index ab5a2ae..4b90080 100644
> --- a/x86/README
> +++ b/x86/README
> @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
>  smptest: run smp_id() on every cpu and compares return value to number
>  tsc: write to tsc(0) and write to tsc(100000000000) and read it back
>  vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
> +wallclock
> diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> new file mode 100644
> index 0000000..cd80915
> --- /dev/null
> +++ b/x86/kvmclock_test.c
> @@ -0,0 +1,145 @@
> +#include "libcflat.h"
> +#include "smp.h"
> +#include "atomic.h"
> +#include "string.h"
> +#include "kvmclock.h"
> +
> +#define DEFAULT_TEST_LOOPS 100000000L
> +#define DEFAULT_THRESHOLD  60L
> +
> +        printf("Check the stability of raw cycle\n");
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> +                          | PVCLOCK_RAW_CYCLE_BIT);
> +        if (cycle_test(ncpus, loops, &ti[1]))
> +                printf("Raw cycle is not stable\n");
> +        else
> +                printf("Raw cycle is stable\n");
> +
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
> +        printf("Monotonic cycle test:\n");
> +        nerr += cycle_test(ncpus, loops, &ti[0]);
> +
> +        for (i = 0; i < ncpus; ++i)
> +                on_cpu(i, kvm_clock_clear, (void *)0);
> +
> +        return nerr > 0 ? 1 : 0;

another interesting bit of information is the total time taken by
the first cycle_test, compared to the second (They do the same amount
of loops anyway, so no need for further math). We are all pretty sure
the lack of a stable bit will influence kvm clock performance, but
nobody measured by how much yet (in big, big boxes.)

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 2/6] Introduce atomic operations
  2010-08-27  5:49 ` [PATCH kvm-unit-test 2/6] Introduce atomic operations Jason Wang
@ 2010-08-27 11:39   ` Glauber Costa
  2010-08-29  9:39     ` Avi Kivity
  0 siblings, 1 reply; 16+ messages in thread
From: Glauber Costa @ 2010-08-27 11:39 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:20PM +0800, Jason Wang wrote:
> +u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
> +{
> +        u64 ret;
> +        u64 _old = old;
> +        u64 _new = new;
> +
> +        asm volatile("lock cmpxchgq %1,%2"
> +                     : "=a" (ret)
> +                     : "r" (_new),
> +                       "m" (*(volatile long *)&v->counter), "0"(_old)
> +                     : "memory"
> +                     );
> +        return ret;
> +}
> +
This is wrong.
See http://git.kernel.org/?p=linux/kernel/git/x86/linux-2.6-tip.git;a=commitdiff;h=113fc5a6e8c2288619ff7e8187a6f556b7e0d372

you need to explicitly mark you are changing the memory, here.

Btw, this is mostly header copying, and can miss bug fixes like the one above. Isn't there
a way to just specify in the test they are needed, and then grab them from linux automatically?



^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  2010-08-27 11:27   ` Glauber Costa
  2010-08-27 11:34   ` Glauber Costa
@ 2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 0 replies; 16+ messages in thread
From: Zachary Amsden @ 2010-08-28  1:58 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm, glommer

On 08/26/2010 07:49 PM, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
>    

Technically, it's not monotonic, it's non-decreasing.

> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
>    

This in general looks awesome.

> Signed-off-by: Jason Wang<jasowang@redhat.com>
> ---
>   config-x86-common.mak |    6 ++
>   x86/README            |    2 +
>   x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
>   x86/unittests.cfg     |    5 ++
>   4 files changed, 157 insertions(+), 1 deletions(-)
>   create mode 100644 x86/kvmclock_test.c
>
> diff --git a/config-x86-common.mak b/config-x86-common.mak
> index b8ca859..b541c1c 100644
> --- a/config-x86-common.mak
> +++ b/config-x86-common.mak
> @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
>   tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>                  $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
>                  $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> +               $(TEST_DIR)/kvmclock_test.flat
>
>   tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
>
> @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
>
>   $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
>
> +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
> +                                $(TEST_DIR)/kvmclock_test.o
> +
>   arch_clean:
>   	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
>   	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> diff --git a/x86/README b/x86/README
> index ab5a2ae..4b90080 100644
> --- a/x86/README
> +++ b/x86/README
> @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
>   smptest: run smp_id() on every cpu and compares return value to number
>   tsc: write to tsc(0) and write to tsc(100000000000) and read it back
>   vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
> +wallclock
> diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> new file mode 100644
> index 0000000..cd80915
> --- /dev/null
> +++ b/x86/kvmclock_test.c
> @@ -0,0 +1,145 @@
> +#include "libcflat.h"
> +#include "smp.h"
> +#include "atomic.h"
> +#include "string.h"
> +#include "kvmclock.h"
> +
> +#define DEFAULT_TEST_LOOPS 100000000L
> +#define DEFAULT_THRESHOLD  60L
> +
> +struct test_info {
> +        struct spinlock lock;
> +        long loops;               /* test loops */
> +        u64 warps;                /* warp count */
> +        long long worst;          /* worst warp */
> +        volatile cycle_t last;    /* last cycle seen by test */
> +        atomic_t ncpus;           /* number of cpu in the test*/
> +};
> +
> +struct test_info ti[2];
> +
> +static int wallclock_test(long sec, long threshold)
> +{
> +        int i;
> +        long ksec, offset;
> +        struct timespec ts, ts_last;
> +
> +        printf("Wallclock test, threshold %ld\n", threshold);
> +        kvm_get_wallclock(&ts_last);
> +        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
> +
> +        offset = ksec - sec;
> +        printf("Seconds get from host: %ld\n", sec);
> +        printf("Seconds get from kvmclock: %ld\n", ksec);
> +
> +        if (offset>  threshold || offset<  -threshold) {
> +                printf("Seconds get from kvmclock: %ld\n", ksec);
> +                return 1;
> +        }
> +
> +        for (i=0; i<  100; i++){
> +                kvm_get_wallclock(&ts);
> +                if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
> +                        printf ("Inconsistent wall clock returned!\n");
> +                        return 1;
> +                }
> +        }
> +        return 0;
> +}
> +
> +static void kvm_clock_test(void *data)
> +{
> +        struct test_info *hv_test_info = (struct test_info *)data;
> +        int i;
> +
> +        for (i = 0; i<  hv_test_info->loops; i++){
> +                cycle_t t0, t1;
> +                long long delta;
> +
> +                spin_lock(&hv_test_info->lock);
> +                t1 = kvm_clock_read();
> +                t0 = hv_test_info->last;
> +                hv_test_info->last = kvm_clock_read();
> +                spin_unlock(&hv_test_info->lock);
> +
> +                delta = t1 - t0;
> +                if (delta<  0){
> +                        spin_lock(&hv_test_info->lock);
> +                        ++hv_test_info->warps;
> +                        if (delta<  hv_test_info->worst){
> +                                hv_test_info->worst = delta;
> +                                printf("Worst warp %lld %\n", hv_test_info->worst);
> +                        }
> +                        spin_unlock(&hv_test_info->lock);
> +                }
> +
> +                if (!((unsigned long)i&  31))
> +                        asm volatile("rep; nop");
> +        }
> +
> +        atomic_dec(&hv_test_info->ncpus);
> +}
> +
> +static int cycle_test(int ncpus, long loops, struct test_info *ti)
> +{
> +        int i;
> +
> +        atomic_set(&ti->ncpus, ncpus);
> +        ti->loops = loops;
> +        for (i = ncpus - 1; i>= 0; i--)
> +                on_cpu_async(i, kvm_clock_test, (void *)ti);
> +
> +        /* Wait for the end of other vcpu */
> +        while(atomic_read(&ti->ncpus))
> +                ;
> +
> +        printf("Total vcpus: %d\n", ncpus);
> +        printf("Test  loops: %ld\n", ti->loops);
> +        printf("Total warps: %lld\n", ti->warps);
> +        printf("Worst warp:  %lld\n", ti->worst);
> +
> +        return ti->warps ? 1 : 0;
> +}
> +
> +int main(int ac, char **av)
> +{
> +        int ncpus = cpu_count();
> +        int nerr = 0, i;
> +        long loops = DEFAULT_TEST_LOOPS;
> +        long sec = 0;
> +        long threshold = DEFAULT_THRESHOLD;
> +
> +        if (ac>  1)
> +                loops = atol(av[1]);
> +        if (ac>  2)
> +                sec = atol(av[2]);
> +        if (ac>  3)
> +                threshold = atol(av[3]);
> +
> +        smp_init();
> +
> +        if (ncpus>  MAX_CPU)
> +                ncpus = MAX_CPU;
> +        for (i = 0; i<  ncpus; ++i)
> +                on_cpu(i, kvm_clock_init, (void *)0);
> +
> +        if (ac>  2)
> +                nerr += wallclock_test(sec, threshold);
> +
> +        printf("Check the stability of raw cycle\n");
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> +                          | PVCLOCK_RAW_CYCLE_BIT);
>    

What is this RAW_CYCLE_BIT ?  Did I miss something?



^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 2/6] Introduce atomic operations
  2010-08-27 11:39   ` Glauber Costa
@ 2010-08-29  9:39     ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-08-29  9:39 UTC (permalink / raw)
  To: Glauber Costa; +Cc: Jason Wang, mtosatti, kvm

  On 08/27/2010 02:39 PM, Glauber Costa wrote:
> On Fri, Aug 27, 2010 at 01:49:20PM +0800, Jason Wang wrote:
>> +u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
>> +{
>> +        u64 ret;
>> +        u64 _old = old;
>> +        u64 _new = new;
>> +
>> +        asm volatile("lock cmpxchgq %1,%2"
>> +                     : "=a" (ret)
>> +                     : "r" (_new),
>> +                       "m" (*(volatile long *)&v->counter), "0"(_old)
>> +                     : "memory"
>> +                     );
>> +        return ret;
>> +}
>> +
> This is wrong.
> See http://git.kernel.org/?p=linux/kernel/git/x86/linux-2.6-tip.git;a=commitdiff;h=113fc5a6e8c2288619ff7e8187a6f556b7e0d372
>
> you need to explicitly mark you are changing the memory, here.
>
> Btw, this is mostly header copying, and can miss bug fixes like the one above. Isn't there
> a way to just specify in the test they are needed, and then grab them from linux automatically?

That may pull in dependencies.  I'd prefer using builtins like 
__sync_val_compare_and_swap() (but many atomics are not available).  Is 
there some library we can use for this?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27 11:27   ` Glauber Costa
@ 2010-08-30  3:07     ` Jason Wang
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-30  3:07 UTC (permalink / raw)
  To: Glauber Costa; +Cc: mtosatti, avi, kvm


----- "Glauber Costa" <glommer@redhat.com> wrote:

> On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> > 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> Good.
> 
> I liked the flag usage. Might help us in the future, when we apply
> zach's series plus a couple of ideas we have, to see if it the
> problem indeed goes away.
> 
> A minor tip, not strong feelings towards this, would be to extract
> some information from host cpu, and print it too.
> It would be easier when we're analyzing this reports in the future.
> 
> tsc-based mechanisms are very sensible to:
>  - vendor
>  - # of cpus
>  - # of sockets
>  - tsc flags
> 
> Sure we can get all this information from /proc/cpuinfo, but having
> it
> in your final report automatically would be convenient, I think.

Yes, they are useful. But since unit tests are running as guest, it
would be a hard to gather host information by it self. So maybe we
could do this through autotest or other kind of test launcher.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27 11:34   ` Glauber Costa
@ 2010-08-30  3:27     ` Jason Wang
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-30  3:27 UTC (permalink / raw)
  To: Glauber Costa; +Cc: mtosatti, avi, kvm


----- "Glauber Costa" <glommer@redhat.com> wrote:

> On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> > 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> > 
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > ---
> >  config-x86-common.mak |    6 ++
> >  x86/README            |    2 +
> >  x86/kvmclock_test.c   |  145
> +++++++++++++++++++++++++++++++++++++++++++++++++
> >  x86/unittests.cfg     |    5 ++
> >  4 files changed, 157 insertions(+), 1 deletions(-)
> >  create mode 100644 x86/kvmclock_test.c
> > 
> > diff --git a/config-x86-common.mak b/config-x86-common.mak
> > index b8ca859..b541c1c 100644
> > --- a/config-x86-common.mak
> > +++ b/config-x86-common.mak
> > @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
> >  tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
> >                 $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
> >                 $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> > -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> > +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> > +               $(TEST_DIR)/kvmclock_test.flat
> >  
> >  tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
> >  
> > @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o)
> $(TEST_DIR)/rmap_chain.o \
> >  
> >  $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
> >  
> > +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o
> \
> > +                                $(TEST_DIR)/kvmclock_test.o
> > +
> >  arch_clean:
> >  	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
> >  	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> > diff --git a/x86/README b/x86/README
> > index ab5a2ae..4b90080 100644
> > --- a/x86/README
> > +++ b/x86/README
> > @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with
> paging static and with paging
> >  smptest: run smp_id() on every cpu and compares return value to
> number
> >  tsc: write to tsc(0) and write to tsc(100000000000) and read it
> back
> >  vmexit: long loops for each: cpuid, vmcall, mov_from_cr8,
> mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> > +kvmclock_test: monotonic cycle test of kvmclock and a sanity test
> of
> > +wallclock
> > diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> > new file mode 100644
> > index 0000000..cd80915
> > --- /dev/null
> > +++ b/x86/kvmclock_test.c
> > @@ -0,0 +1,145 @@
> > +#include "libcflat.h"
> > +#include "smp.h"
> > +#include "atomic.h"
> > +#include "string.h"
> > +#include "kvmclock.h"
> > +
> > +#define DEFAULT_TEST_LOOPS 100000000L
> > +#define DEFAULT_THRESHOLD  60L
> > +
> > +        printf("Check the stability of raw cycle\n");
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> > +                          | PVCLOCK_RAW_CYCLE_BIT);
> > +        if (cycle_test(ncpus, loops, &ti[1]))
> > +                printf("Raw cycle is not stable\n");
> > +        else
> > +                printf("Raw cycle is stable\n");
> > +
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
> > +        printf("Monotonic cycle test:\n");
> > +        nerr += cycle_test(ncpus, loops, &ti[0]);
> > +
> > +        for (i = 0; i < ncpus; ++i)
> > +                on_cpu(i, kvm_clock_clear, (void *)0);
> > +
> > +        return nerr > 0 ? 1 : 0;
> 
> another interesting bit of information is the total time taken by
> the first cycle_test, compared to the second (They do the same amount
> of loops anyway, so no need for further math). We are all pretty sure
> the lack of a stable bit will influence kvm clock performance, but
> nobody measured by how much yet (in big, big boxes.)

I would add it in next version.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
       [not found] <18442408.826301283138927321.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
@ 2010-08-30  3:29 ` Jason Wang
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2010-08-30  3:29 UTC (permalink / raw)
  To: Zachary Amsden; +Cc: mtosatti, avi, kvm, glommer


----- "Zachary Amsden" <zamsden@redhat.com> wrote:

> On 08/26/2010 07:49 PM, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> >    
> 
> Technically, it's not monotonic, it's non-decreasing.
> 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> >    
> 
> This in general looks awesome.
> 
> > Signed-off-by: Jason Wang<jasowang@redhat.com>
> > ---
> >   config-x86-common.mak |    6 ++
> >   x86/README            |    2 +
> >   x86/kvmclock_test.c   |  145
> +++++++++++++++++++++++++++++++++++++++++++++++++
> >   x86/unittests.cfg     |    5 ++
> >   4 files changed, 157 insertions(+), 1 deletions(-)
> >   create mode 100644 x86/kvmclock_test.c
> >
> > diff --git a/config-x86-common.mak b/config-x86-common.mak
> > index b8ca859..b541c1c 100644
> > --- a/config-x86-common.mak
> > +++ b/config-x86-common.mak
> > @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
> >   tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
> >                  $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat
> \
> >                  $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> > -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> > +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> > +               $(TEST_DIR)/kvmclock_test.flat
> >
> >   tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
> >
> > @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o)
> $(TEST_DIR)/rmap_chain.o \
> >
> >   $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
> >
> > +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o
> \
> > +                                $(TEST_DIR)/kvmclock_test.o
> > +
> >   arch_clean:
> >   	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
> >   	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> > diff --git a/x86/README b/x86/README
> > index ab5a2ae..4b90080 100644
> > --- a/x86/README
> > +++ b/x86/README
> > @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with
> paging static and with paging
> >   smptest: run smp_id() on every cpu and compares return value to
> number
> >   tsc: write to tsc(0) and write to tsc(100000000000) and read it
> back
> >   vmexit: long loops for each: cpuid, vmcall, mov_from_cr8,
> mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> > +kvmclock_test: monotonic cycle test of kvmclock and a sanity test
> of
> > +wallclock
> > diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> > new file mode 100644
> > index 0000000..cd80915
> > --- /dev/null
> > +++ b/x86/kvmclock_test.c
> > @@ -0,0 +1,145 @@
> > +#include "libcflat.h"
> > +#include "smp.h"
> > +#include "atomic.h"
> > +#include "string.h"
> > +#include "kvmclock.h"
> > +
> > +#define DEFAULT_TEST_LOOPS 100000000L
> > +#define DEFAULT_THRESHOLD  60L
> > +
> > +struct test_info {
> > +        struct spinlock lock;
> > +        long loops;               /* test loops */
> > +        u64 warps;                /* warp count */
> > +        long long worst;          /* worst warp */
> > +        volatile cycle_t last;    /* last cycle seen by test */
> > +        atomic_t ncpus;           /* number of cpu in the test*/
> > +};
> > +
> > +struct test_info ti[2];
> > +
> > +static int wallclock_test(long sec, long threshold)
> > +{
> > +        int i;
> > +        long ksec, offset;
> > +        struct timespec ts, ts_last;
> > +
> > +        printf("Wallclock test, threshold %ld\n", threshold);
> > +        kvm_get_wallclock(&ts_last);
> > +        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
> > +
> > +        offset = ksec - sec;
> > +        printf("Seconds get from host: %ld\n", sec);
> > +        printf("Seconds get from kvmclock: %ld\n", ksec);
> > +
> > +        if (offset>  threshold || offset<  -threshold) {
> > +                printf("Seconds get from kvmclock: %ld\n", ksec);
> > +                return 1;
> > +        }
> > +
> > +        for (i=0; i<  100; i++){
> > +                kvm_get_wallclock(&ts);
> > +                if (ts.nsec != ts_last.nsec || ts.sec !=
> ts_last.sec){
> > +                        printf ("Inconsistent wall clock
> returned!\n");
> > +                        return 1;
> > +                }
> > +        }
> > +        return 0;
> > +}
> > +
> > +static void kvm_clock_test(void *data)
> > +{
> > +        struct test_info *hv_test_info = (struct test_info *)data;
> > +        int i;
> > +
> > +        for (i = 0; i<  hv_test_info->loops; i++){
> > +                cycle_t t0, t1;
> > +                long long delta;
> > +
> > +                spin_lock(&hv_test_info->lock);
> > +                t1 = kvm_clock_read();
> > +                t0 = hv_test_info->last;
> > +                hv_test_info->last = kvm_clock_read();
> > +                spin_unlock(&hv_test_info->lock);
> > +
> > +                delta = t1 - t0;
> > +                if (delta<  0){
> > +                        spin_lock(&hv_test_info->lock);
> > +                        ++hv_test_info->warps;
> > +                        if (delta<  hv_test_info->worst){
> > +                                hv_test_info->worst = delta;
> > +                                printf("Worst warp %lld %\n",
> hv_test_info->worst);
> > +                        }
> > +                        spin_unlock(&hv_test_info->lock);
> > +                }
> > +
> > +                if (!((unsigned long)i&  31))
> > +                        asm volatile("rep; nop");
> > +        }
> > +
> > +        atomic_dec(&hv_test_info->ncpus);
> > +}
> > +
> > +static int cycle_test(int ncpus, long loops, struct test_info *ti)
> > +{
> > +        int i;
> > +
> > +        atomic_set(&ti->ncpus, ncpus);
> > +        ti->loops = loops;
> > +        for (i = ncpus - 1; i>= 0; i--)
> > +                on_cpu_async(i, kvm_clock_test, (void *)ti);
> > +
> > +        /* Wait for the end of other vcpu */
> > +        while(atomic_read(&ti->ncpus))
> > +                ;
> > +
> > +        printf("Total vcpus: %d\n", ncpus);
> > +        printf("Test  loops: %ld\n", ti->loops);
> > +        printf("Total warps: %lld\n", ti->warps);
> > +        printf("Worst warp:  %lld\n", ti->worst);
> > +
> > +        return ti->warps ? 1 : 0;
> > +}
> > +
> > +int main(int ac, char **av)
> > +{
> > +        int ncpus = cpu_count();
> > +        int nerr = 0, i;
> > +        long loops = DEFAULT_TEST_LOOPS;
> > +        long sec = 0;
> > +        long threshold = DEFAULT_THRESHOLD;
> > +
> > +        if (ac>  1)
> > +                loops = atol(av[1]);
> > +        if (ac>  2)
> > +                sec = atol(av[2]);
> > +        if (ac>  3)
> > +                threshold = atol(av[3]);
> > +
> > +        smp_init();
> > +
> > +        if (ncpus>  MAX_CPU)
> > +                ncpus = MAX_CPU;
> > +        for (i = 0; i<  ncpus; ++i)
> > +                on_cpu(i, kvm_clock_init, (void *)0);
> > +
> > +        if (ac>  2)
> > +                nerr += wallclock_test(sec, threshold);
> > +
> > +        printf("Check the stability of raw cycle\n");
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> > +                          | PVCLOCK_RAW_CYCLE_BIT);
> >    
> 
> What is this RAW_CYCLE_BIT ?  Did I miss something?

RAW_CYCLE_BIT is used to tell the driver return unadjusted cycle value
as could be used to test whether the host could supply a stable cycle.

> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2010-08-30  3:29 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 1/6] Introduce memory barriers Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 2/6] Introduce atomic operations Jason Wang
2010-08-27 11:39   ` Glauber Costa
2010-08-29  9:39     ` Avi Kivity
2010-08-27  5:49 ` [PATCH kvm-unit-test 3/6] Export tsc related helpers Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 4/6] Introduce atol() Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 5/6] Add a simple kvmclock driver Jason Wang
2010-08-27 11:31   ` Glauber Costa
2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
2010-08-27 11:27   ` Glauber Costa
2010-08-30  3:07     ` Jason Wang
2010-08-27 11:34   ` Glauber Costa
2010-08-30  3:27     ` Jason Wang
2010-08-28  1:58   ` Zachary Amsden
     [not found] <18442408.826301283138927321.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2010-08-30  3:29 ` Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).