kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
       [not found] <18442408.826301283138927321.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
@ 2010-08-30  3:29 ` Jason Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Wang @ 2010-08-30  3:29 UTC (permalink / raw)
  To: Zachary Amsden; +Cc: mtosatti, avi, kvm, glommer


----- "Zachary Amsden" <zamsden@redhat.com> wrote:

> On 08/26/2010 07:49 PM, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> >    
> 
> Technically, it's not monotonic, it's non-decreasing.
> 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> >    
> 
> This in general looks awesome.
> 
> > Signed-off-by: Jason Wang<jasowang@redhat.com>
> > ---
> >   config-x86-common.mak |    6 ++
> >   x86/README            |    2 +
> >   x86/kvmclock_test.c   |  145
> +++++++++++++++++++++++++++++++++++++++++++++++++
> >   x86/unittests.cfg     |    5 ++
> >   4 files changed, 157 insertions(+), 1 deletions(-)
> >   create mode 100644 x86/kvmclock_test.c
> >
> > diff --git a/config-x86-common.mak b/config-x86-common.mak
> > index b8ca859..b541c1c 100644
> > --- a/config-x86-common.mak
> > +++ b/config-x86-common.mak
> > @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
> >   tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
> >                  $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat
> \
> >                  $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> > -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> > +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> > +               $(TEST_DIR)/kvmclock_test.flat
> >
> >   tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
> >
> > @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o)
> $(TEST_DIR)/rmap_chain.o \
> >
> >   $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
> >
> > +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o
> \
> > +                                $(TEST_DIR)/kvmclock_test.o
> > +
> >   arch_clean:
> >   	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
> >   	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> > diff --git a/x86/README b/x86/README
> > index ab5a2ae..4b90080 100644
> > --- a/x86/README
> > +++ b/x86/README
> > @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with
> paging static and with paging
> >   smptest: run smp_id() on every cpu and compares return value to
> number
> >   tsc: write to tsc(0) and write to tsc(100000000000) and read it
> back
> >   vmexit: long loops for each: cpuid, vmcall, mov_from_cr8,
> mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> > +kvmclock_test: monotonic cycle test of kvmclock and a sanity test
> of
> > +wallclock
> > diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> > new file mode 100644
> > index 0000000..cd80915
> > --- /dev/null
> > +++ b/x86/kvmclock_test.c
> > @@ -0,0 +1,145 @@
> > +#include "libcflat.h"
> > +#include "smp.h"
> > +#include "atomic.h"
> > +#include "string.h"
> > +#include "kvmclock.h"
> > +
> > +#define DEFAULT_TEST_LOOPS 100000000L
> > +#define DEFAULT_THRESHOLD  60L
> > +
> > +struct test_info {
> > +        struct spinlock lock;
> > +        long loops;               /* test loops */
> > +        u64 warps;                /* warp count */
> > +        long long worst;          /* worst warp */
> > +        volatile cycle_t last;    /* last cycle seen by test */
> > +        atomic_t ncpus;           /* number of cpu in the test*/
> > +};
> > +
> > +struct test_info ti[2];
> > +
> > +static int wallclock_test(long sec, long threshold)
> > +{
> > +        int i;
> > +        long ksec, offset;
> > +        struct timespec ts, ts_last;
> > +
> > +        printf("Wallclock test, threshold %ld\n", threshold);
> > +        kvm_get_wallclock(&ts_last);
> > +        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
> > +
> > +        offset = ksec - sec;
> > +        printf("Seconds get from host: %ld\n", sec);
> > +        printf("Seconds get from kvmclock: %ld\n", ksec);
> > +
> > +        if (offset>  threshold || offset<  -threshold) {
> > +                printf("Seconds get from kvmclock: %ld\n", ksec);
> > +                return 1;
> > +        }
> > +
> > +        for (i=0; i<  100; i++){
> > +                kvm_get_wallclock(&ts);
> > +                if (ts.nsec != ts_last.nsec || ts.sec !=
> ts_last.sec){
> > +                        printf ("Inconsistent wall clock
> returned!\n");
> > +                        return 1;
> > +                }
> > +        }
> > +        return 0;
> > +}
> > +
> > +static void kvm_clock_test(void *data)
> > +{
> > +        struct test_info *hv_test_info = (struct test_info *)data;
> > +        int i;
> > +
> > +        for (i = 0; i<  hv_test_info->loops; i++){
> > +                cycle_t t0, t1;
> > +                long long delta;
> > +
> > +                spin_lock(&hv_test_info->lock);
> > +                t1 = kvm_clock_read();
> > +                t0 = hv_test_info->last;
> > +                hv_test_info->last = kvm_clock_read();
> > +                spin_unlock(&hv_test_info->lock);
> > +
> > +                delta = t1 - t0;
> > +                if (delta<  0){
> > +                        spin_lock(&hv_test_info->lock);
> > +                        ++hv_test_info->warps;
> > +                        if (delta<  hv_test_info->worst){
> > +                                hv_test_info->worst = delta;
> > +                                printf("Worst warp %lld %\n",
> hv_test_info->worst);
> > +                        }
> > +                        spin_unlock(&hv_test_info->lock);
> > +                }
> > +
> > +                if (!((unsigned long)i&  31))
> > +                        asm volatile("rep; nop");
> > +        }
> > +
> > +        atomic_dec(&hv_test_info->ncpus);
> > +}
> > +
> > +static int cycle_test(int ncpus, long loops, struct test_info *ti)
> > +{
> > +        int i;
> > +
> > +        atomic_set(&ti->ncpus, ncpus);
> > +        ti->loops = loops;
> > +        for (i = ncpus - 1; i>= 0; i--)
> > +                on_cpu_async(i, kvm_clock_test, (void *)ti);
> > +
> > +        /* Wait for the end of other vcpu */
> > +        while(atomic_read(&ti->ncpus))
> > +                ;
> > +
> > +        printf("Total vcpus: %d\n", ncpus);
> > +        printf("Test  loops: %ld\n", ti->loops);
> > +        printf("Total warps: %lld\n", ti->warps);
> > +        printf("Worst warp:  %lld\n", ti->worst);
> > +
> > +        return ti->warps ? 1 : 0;
> > +}
> > +
> > +int main(int ac, char **av)
> > +{
> > +        int ncpus = cpu_count();
> > +        int nerr = 0, i;
> > +        long loops = DEFAULT_TEST_LOOPS;
> > +        long sec = 0;
> > +        long threshold = DEFAULT_THRESHOLD;
> > +
> > +        if (ac>  1)
> > +                loops = atol(av[1]);
> > +        if (ac>  2)
> > +                sec = atol(av[2]);
> > +        if (ac>  3)
> > +                threshold = atol(av[3]);
> > +
> > +        smp_init();
> > +
> > +        if (ncpus>  MAX_CPU)
> > +                ncpus = MAX_CPU;
> > +        for (i = 0; i<  ncpus; ++i)
> > +                on_cpu(i, kvm_clock_init, (void *)0);
> > +
> > +        if (ac>  2)
> > +                nerr += wallclock_test(sec, threshold);
> > +
> > +        printf("Check the stability of raw cycle\n");
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> > +                          | PVCLOCK_RAW_CYCLE_BIT);
> >    
> 
> What is this RAW_CYCLE_BIT ?  Did I miss something?

RAW_CYCLE_BIT is used to tell the driver return unadjusted cycle value
as could be used to test whether the host could supply a stable cycle.

> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27 11:34   ` Glauber Costa
@ 2010-08-30  3:27     ` Jason Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Wang @ 2010-08-30  3:27 UTC (permalink / raw)
  To: Glauber Costa; +Cc: mtosatti, avi, kvm


----- "Glauber Costa" <glommer@redhat.com> wrote:

> On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> > 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> > 
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > ---
> >  config-x86-common.mak |    6 ++
> >  x86/README            |    2 +
> >  x86/kvmclock_test.c   |  145
> +++++++++++++++++++++++++++++++++++++++++++++++++
> >  x86/unittests.cfg     |    5 ++
> >  4 files changed, 157 insertions(+), 1 deletions(-)
> >  create mode 100644 x86/kvmclock_test.c
> > 
> > diff --git a/config-x86-common.mak b/config-x86-common.mak
> > index b8ca859..b541c1c 100644
> > --- a/config-x86-common.mak
> > +++ b/config-x86-common.mak
> > @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
> >  tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
> >                 $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
> >                 $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> > -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> > +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> > +               $(TEST_DIR)/kvmclock_test.flat
> >  
> >  tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
> >  
> > @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o)
> $(TEST_DIR)/rmap_chain.o \
> >  
> >  $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
> >  
> > +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o
> \
> > +                                $(TEST_DIR)/kvmclock_test.o
> > +
> >  arch_clean:
> >  	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
> >  	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> > diff --git a/x86/README b/x86/README
> > index ab5a2ae..4b90080 100644
> > --- a/x86/README
> > +++ b/x86/README
> > @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with
> paging static and with paging
> >  smptest: run smp_id() on every cpu and compares return value to
> number
> >  tsc: write to tsc(0) and write to tsc(100000000000) and read it
> back
> >  vmexit: long loops for each: cpuid, vmcall, mov_from_cr8,
> mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> > +kvmclock_test: monotonic cycle test of kvmclock and a sanity test
> of
> > +wallclock
> > diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> > new file mode 100644
> > index 0000000..cd80915
> > --- /dev/null
> > +++ b/x86/kvmclock_test.c
> > @@ -0,0 +1,145 @@
> > +#include "libcflat.h"
> > +#include "smp.h"
> > +#include "atomic.h"
> > +#include "string.h"
> > +#include "kvmclock.h"
> > +
> > +#define DEFAULT_TEST_LOOPS 100000000L
> > +#define DEFAULT_THRESHOLD  60L
> > +
> > +        printf("Check the stability of raw cycle\n");
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> > +                          | PVCLOCK_RAW_CYCLE_BIT);
> > +        if (cycle_test(ncpus, loops, &ti[1]))
> > +                printf("Raw cycle is not stable\n");
> > +        else
> > +                printf("Raw cycle is stable\n");
> > +
> > +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
> > +        printf("Monotonic cycle test:\n");
> > +        nerr += cycle_test(ncpus, loops, &ti[0]);
> > +
> > +        for (i = 0; i < ncpus; ++i)
> > +                on_cpu(i, kvm_clock_clear, (void *)0);
> > +
> > +        return nerr > 0 ? 1 : 0;
> 
> another interesting bit of information is the total time taken by
> the first cycle_test, compared to the second (They do the same amount
> of loops anyway, so no need for further math). We are all pretty sure
> the lack of a stable bit will influence kvm clock performance, but
> nobody measured by how much yet (in big, big boxes.)

I would add it in next version.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27 11:27   ` Glauber Costa
@ 2010-08-30  3:07     ` Jason Wang
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Wang @ 2010-08-30  3:07 UTC (permalink / raw)
  To: Glauber Costa; +Cc: mtosatti, avi, kvm


----- "Glauber Costa" <glommer@redhat.com> wrote:

> On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> > This patch implements two tests for kvmclock. First one check
> whether
> > the date of time returned by kvmclock matches the value got from
> > host. Second one check whether the cycle of kvmclock grows
> > monotonically in smp guest.
> > 
> > Three parameters were accepted by the test: test loops, seconds
> > since 1970-01-01 00:00:00 UTC which could be easily get through
> date
> > +%s and the max accepted offset value between the tod of guest and
> > host.
> Good.
> 
> I liked the flag usage. Might help us in the future, when we apply
> zach's series plus a couple of ideas we have, to see if it the
> problem indeed goes away.
> 
> A minor tip, not strong feelings towards this, would be to extract
> some information from host cpu, and print it too.
> It would be easier when we're analyzing this reports in the future.
> 
> tsc-based mechanisms are very sensible to:
>  - vendor
>  - # of cpus
>  - # of sockets
>  - tsc flags
> 
> Sure we can get all this information from /proc/cpuinfo, but having
> it
> in your final report automatically would be convenient, I think.

Yes, they are useful. But since unit tests are running as guest, it
would be a hard to gather host information by it self. So maybe we
could do this through autotest or other kind of test launcher.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  2010-08-27 11:27   ` Glauber Costa
  2010-08-27 11:34   ` Glauber Costa
@ 2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 0 replies; 7+ messages in thread
From: Zachary Amsden @ 2010-08-28  1:58 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm, glommer

On 08/26/2010 07:49 PM, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
>    

Technically, it's not monotonic, it's non-decreasing.

> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
>    

This in general looks awesome.

> Signed-off-by: Jason Wang<jasowang@redhat.com>
> ---
>   config-x86-common.mak |    6 ++
>   x86/README            |    2 +
>   x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
>   x86/unittests.cfg     |    5 ++
>   4 files changed, 157 insertions(+), 1 deletions(-)
>   create mode 100644 x86/kvmclock_test.c
>
> diff --git a/config-x86-common.mak b/config-x86-common.mak
> index b8ca859..b541c1c 100644
> --- a/config-x86-common.mak
> +++ b/config-x86-common.mak
> @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
>   tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>                  $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
>                  $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> +               $(TEST_DIR)/kvmclock_test.flat
>
>   tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
>
> @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
>
>   $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
>
> +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
> +                                $(TEST_DIR)/kvmclock_test.o
> +
>   arch_clean:
>   	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
>   	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> diff --git a/x86/README b/x86/README
> index ab5a2ae..4b90080 100644
> --- a/x86/README
> +++ b/x86/README
> @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
>   smptest: run smp_id() on every cpu and compares return value to number
>   tsc: write to tsc(0) and write to tsc(100000000000) and read it back
>   vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
> +wallclock
> diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> new file mode 100644
> index 0000000..cd80915
> --- /dev/null
> +++ b/x86/kvmclock_test.c
> @@ -0,0 +1,145 @@
> +#include "libcflat.h"
> +#include "smp.h"
> +#include "atomic.h"
> +#include "string.h"
> +#include "kvmclock.h"
> +
> +#define DEFAULT_TEST_LOOPS 100000000L
> +#define DEFAULT_THRESHOLD  60L
> +
> +struct test_info {
> +        struct spinlock lock;
> +        long loops;               /* test loops */
> +        u64 warps;                /* warp count */
> +        long long worst;          /* worst warp */
> +        volatile cycle_t last;    /* last cycle seen by test */
> +        atomic_t ncpus;           /* number of cpu in the test*/
> +};
> +
> +struct test_info ti[2];
> +
> +static int wallclock_test(long sec, long threshold)
> +{
> +        int i;
> +        long ksec, offset;
> +        struct timespec ts, ts_last;
> +
> +        printf("Wallclock test, threshold %ld\n", threshold);
> +        kvm_get_wallclock(&ts_last);
> +        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
> +
> +        offset = ksec - sec;
> +        printf("Seconds get from host: %ld\n", sec);
> +        printf("Seconds get from kvmclock: %ld\n", ksec);
> +
> +        if (offset>  threshold || offset<  -threshold) {
> +                printf("Seconds get from kvmclock: %ld\n", ksec);
> +                return 1;
> +        }
> +
> +        for (i=0; i<  100; i++){
> +                kvm_get_wallclock(&ts);
> +                if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
> +                        printf ("Inconsistent wall clock returned!\n");
> +                        return 1;
> +                }
> +        }
> +        return 0;
> +}
> +
> +static void kvm_clock_test(void *data)
> +{
> +        struct test_info *hv_test_info = (struct test_info *)data;
> +        int i;
> +
> +        for (i = 0; i<  hv_test_info->loops; i++){
> +                cycle_t t0, t1;
> +                long long delta;
> +
> +                spin_lock(&hv_test_info->lock);
> +                t1 = kvm_clock_read();
> +                t0 = hv_test_info->last;
> +                hv_test_info->last = kvm_clock_read();
> +                spin_unlock(&hv_test_info->lock);
> +
> +                delta = t1 - t0;
> +                if (delta<  0){
> +                        spin_lock(&hv_test_info->lock);
> +                        ++hv_test_info->warps;
> +                        if (delta<  hv_test_info->worst){
> +                                hv_test_info->worst = delta;
> +                                printf("Worst warp %lld %\n", hv_test_info->worst);
> +                        }
> +                        spin_unlock(&hv_test_info->lock);
> +                }
> +
> +                if (!((unsigned long)i&  31))
> +                        asm volatile("rep; nop");
> +        }
> +
> +        atomic_dec(&hv_test_info->ncpus);
> +}
> +
> +static int cycle_test(int ncpus, long loops, struct test_info *ti)
> +{
> +        int i;
> +
> +        atomic_set(&ti->ncpus, ncpus);
> +        ti->loops = loops;
> +        for (i = ncpus - 1; i>= 0; i--)
> +                on_cpu_async(i, kvm_clock_test, (void *)ti);
> +
> +        /* Wait for the end of other vcpu */
> +        while(atomic_read(&ti->ncpus))
> +                ;
> +
> +        printf("Total vcpus: %d\n", ncpus);
> +        printf("Test  loops: %ld\n", ti->loops);
> +        printf("Total warps: %lld\n", ti->warps);
> +        printf("Worst warp:  %lld\n", ti->worst);
> +
> +        return ti->warps ? 1 : 0;
> +}
> +
> +int main(int ac, char **av)
> +{
> +        int ncpus = cpu_count();
> +        int nerr = 0, i;
> +        long loops = DEFAULT_TEST_LOOPS;
> +        long sec = 0;
> +        long threshold = DEFAULT_THRESHOLD;
> +
> +        if (ac>  1)
> +                loops = atol(av[1]);
> +        if (ac>  2)
> +                sec = atol(av[2]);
> +        if (ac>  3)
> +                threshold = atol(av[3]);
> +
> +        smp_init();
> +
> +        if (ncpus>  MAX_CPU)
> +                ncpus = MAX_CPU;
> +        for (i = 0; i<  ncpus; ++i)
> +                on_cpu(i, kvm_clock_init, (void *)0);
> +
> +        if (ac>  2)
> +                nerr += wallclock_test(sec, threshold);
> +
> +        printf("Check the stability of raw cycle\n");
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> +                          | PVCLOCK_RAW_CYCLE_BIT);
>    

What is this RAW_CYCLE_BIT ?  Did I miss something?



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
  2010-08-27 11:27   ` Glauber Costa
@ 2010-08-27 11:34   ` Glauber Costa
  2010-08-30  3:27     ` Jason Wang
  2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 1 reply; 7+ messages in thread
From: Glauber Costa @ 2010-08-27 11:34 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
> 
> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  config-x86-common.mak |    6 ++
>  x86/README            |    2 +
>  x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
>  x86/unittests.cfg     |    5 ++
>  4 files changed, 157 insertions(+), 1 deletions(-)
>  create mode 100644 x86/kvmclock_test.c
> 
> diff --git a/config-x86-common.mak b/config-x86-common.mak
> index b8ca859..b541c1c 100644
> --- a/config-x86-common.mak
> +++ b/config-x86-common.mak
> @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
>  tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>                 $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
>                 $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
> -               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
> +               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
> +               $(TEST_DIR)/kvmclock_test.flat
>  
>  tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
>  
> @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
>  
>  $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
>  
> +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
> +                                $(TEST_DIR)/kvmclock_test.o
> +
>  arch_clean:
>  	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
>  	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
> diff --git a/x86/README b/x86/README
> index ab5a2ae..4b90080 100644
> --- a/x86/README
> +++ b/x86/README
> @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
>  smptest: run smp_id() on every cpu and compares return value to number
>  tsc: write to tsc(0) and write to tsc(100000000000) and read it back
>  vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
> +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
> +wallclock
> diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
> new file mode 100644
> index 0000000..cd80915
> --- /dev/null
> +++ b/x86/kvmclock_test.c
> @@ -0,0 +1,145 @@
> +#include "libcflat.h"
> +#include "smp.h"
> +#include "atomic.h"
> +#include "string.h"
> +#include "kvmclock.h"
> +
> +#define DEFAULT_TEST_LOOPS 100000000L
> +#define DEFAULT_THRESHOLD  60L
> +
> +        printf("Check the stability of raw cycle\n");
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
> +                          | PVCLOCK_RAW_CYCLE_BIT);
> +        if (cycle_test(ncpus, loops, &ti[1]))
> +                printf("Raw cycle is not stable\n");
> +        else
> +                printf("Raw cycle is stable\n");
> +
> +        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
> +        printf("Monotonic cycle test:\n");
> +        nerr += cycle_test(ncpus, loops, &ti[0]);
> +
> +        for (i = 0; i < ncpus; ++i)
> +                on_cpu(i, kvm_clock_clear, (void *)0);
> +
> +        return nerr > 0 ? 1 : 0;

another interesting bit of information is the total time taken by
the first cycle_test, compared to the second (They do the same amount
of loops anyway, so no need for further math). We are all pretty sure
the lack of a stable bit will influence kvm clock performance, but
nobody measured by how much yet (in big, big boxes.)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
@ 2010-08-27 11:27   ` Glauber Costa
  2010-08-30  3:07     ` Jason Wang
  2010-08-27 11:34   ` Glauber Costa
  2010-08-28  1:58   ` Zachary Amsden
  2 siblings, 1 reply; 7+ messages in thread
From: Glauber Costa @ 2010-08-27 11:27 UTC (permalink / raw)
  To: Jason Wang; +Cc: mtosatti, avi, kvm

On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote:
> This patch implements two tests for kvmclock. First one check whether
> the date of time returned by kvmclock matches the value got from
> host. Second one check whether the cycle of kvmclock grows
> monotonically in smp guest.
> 
> Three parameters were accepted by the test: test loops, seconds
> since 1970-01-01 00:00:00 UTC which could be easily get through date
> +%s and the max accepted offset value between the tod of guest and
> host.
Good.

I liked the flag usage. Might help us in the future, when we apply
zach's series plus a couple of ideas we have, to see if it the
problem indeed goes away.

A minor tip, not strong feelings towards this, would be to extract
some information from host cpu, and print it too.
It would be easier when we're analyzing this reports in the future.

tsc-based mechanisms are very sensible to:
 - vendor
 - # of cpus
 - # of sockets
 - tsc flags

Sure we can get all this information from /proc/cpuinfo, but having it
in your final report automatically would be convenient, I think.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH kvm-unit-test 6/6] Add a test for kvm-clock
  2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
@ 2010-08-27  5:49 ` Jason Wang
  2010-08-27 11:27   ` Glauber Costa
                     ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Jason Wang @ 2010-08-27  5:49 UTC (permalink / raw)
  To: mtosatti, avi, kvm; +Cc: glommer

This patch implements two tests for kvmclock. First one check whether
the date of time returned by kvmclock matches the value got from
host. Second one check whether the cycle of kvmclock grows
monotonically in smp guest.

Three parameters were accepted by the test: test loops, seconds
since 1970-01-01 00:00:00 UTC which could be easily get through date
+%s and the max accepted offset value between the tod of guest and
host.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 config-x86-common.mak |    6 ++
 x86/README            |    2 +
 x86/kvmclock_test.c   |  145 +++++++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg     |    5 ++
 4 files changed, 157 insertions(+), 1 deletions(-)
 create mode 100644 x86/kvmclock_test.c

diff --git a/config-x86-common.mak b/config-x86-common.mak
index b8ca859..b541c1c 100644
--- a/config-x86-common.mak
+++ b/config-x86-common.mak
@@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc)
 tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
                $(TEST_DIR)/smptest.flat  $(TEST_DIR)/port80.flat \
                $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \
-               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat
+               $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \
+               $(TEST_DIR)/kvmclock_test.flat
 
 tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg
 
@@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \
 
 $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o
 
+$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \
+                                $(TEST_DIR)/kvmclock_test.o
+
 arch_clean:
 	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \
 	$(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o
diff --git a/x86/README b/x86/README
index ab5a2ae..4b90080 100644
--- a/x86/README
+++ b/x86/README
@@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging
 smptest: run smp_id() on every cpu and compares return value to number
 tsc: write to tsc(0) and write to tsc(100000000000) and read it back
 vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt
+kvmclock_test: monotonic cycle test of kvmclock and a sanity test of
+wallclock
diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c
new file mode 100644
index 0000000..cd80915
--- /dev/null
+++ b/x86/kvmclock_test.c
@@ -0,0 +1,145 @@
+#include "libcflat.h"
+#include "smp.h"
+#include "atomic.h"
+#include "string.h"
+#include "kvmclock.h"
+
+#define DEFAULT_TEST_LOOPS 100000000L
+#define DEFAULT_THRESHOLD  60L
+
+struct test_info {
+        struct spinlock lock;
+        long loops;               /* test loops */
+        u64 warps;                /* warp count */
+        long long worst;          /* worst warp */
+        volatile cycle_t last;    /* last cycle seen by test */
+        atomic_t ncpus;           /* number of cpu in the test*/
+};
+
+struct test_info ti[2];
+
+static int wallclock_test(long sec, long threshold)
+{
+        int i;
+        long ksec, offset;
+        struct timespec ts, ts_last;
+
+        printf("Wallclock test, threshold %ld\n", threshold);
+        kvm_get_wallclock(&ts_last);
+        ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC;
+
+        offset = ksec - sec;
+        printf("Seconds get from host: %ld\n", sec);
+        printf("Seconds get from kvmclock: %ld\n", ksec);
+
+        if (offset > threshold || offset < -threshold) {
+                printf("Seconds get from kvmclock: %ld\n", ksec);
+                return 1;
+        }
+
+        for (i=0; i < 100; i++){
+                kvm_get_wallclock(&ts);
+                if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){
+                        printf ("Inconsistent wall clock returned!\n");
+                        return 1;
+                }
+        }
+        return 0;
+}
+
+static void kvm_clock_test(void *data)
+{
+        struct test_info *hv_test_info = (struct test_info *)data;
+        int i;
+
+        for (i = 0; i < hv_test_info->loops; i++){
+                cycle_t t0, t1;
+                long long delta;
+
+                spin_lock(&hv_test_info->lock);
+                t1 = kvm_clock_read();
+                t0 = hv_test_info->last;
+                hv_test_info->last = kvm_clock_read();
+                spin_unlock(&hv_test_info->lock);
+
+                delta = t1 - t0;
+                if (delta < 0){
+                        spin_lock(&hv_test_info->lock);
+                        ++hv_test_info->warps;
+                        if (delta < hv_test_info->worst){
+                                hv_test_info->worst = delta;
+                                printf("Worst warp %lld %\n", hv_test_info->worst);
+                        }
+                        spin_unlock(&hv_test_info->lock);
+                }
+
+                if (!((unsigned long)i & 31))
+                        asm volatile("rep; nop");
+        }
+
+        atomic_dec(&hv_test_info->ncpus);
+}
+
+static int cycle_test(int ncpus, long loops, struct test_info *ti)
+{
+        int i;
+
+        atomic_set(&ti->ncpus, ncpus);
+        ti->loops = loops;
+        for (i = ncpus - 1; i >= 0; i--)
+                on_cpu_async(i, kvm_clock_test, (void *)ti);
+
+        /* Wait for the end of other vcpu */
+        while(atomic_read(&ti->ncpus))
+                ;
+
+        printf("Total vcpus: %d\n", ncpus);
+        printf("Test  loops: %ld\n", ti->loops);
+        printf("Total warps: %lld\n", ti->warps);
+        printf("Worst warp:  %lld\n", ti->worst);
+
+        return ti->warps ? 1 : 0;
+}
+
+int main(int ac, char **av)
+{
+        int ncpus = cpu_count();
+        int nerr = 0, i;
+        long loops = DEFAULT_TEST_LOOPS;
+        long sec = 0;
+        long threshold = DEFAULT_THRESHOLD;
+
+        if (ac > 1)
+                loops = atol(av[1]);
+        if (ac > 2)
+                sec = atol(av[2]);
+        if (ac > 3)
+                threshold = atol(av[3]);
+
+        smp_init();
+
+        if (ncpus > MAX_CPU)
+                ncpus = MAX_CPU;
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_init, (void *)0);
+
+        if (ac > 2)
+                nerr += wallclock_test(sec, threshold);
+
+        printf("Check the stability of raw cycle\n");
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT
+                          | PVCLOCK_RAW_CYCLE_BIT);
+        if (cycle_test(ncpus, loops, &ti[1]))
+                printf("Raw cycle is not stable\n");
+        else
+                printf("Raw cycle is stable\n");
+
+        pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+        printf("Monotonic cycle test:\n");
+        nerr += cycle_test(ncpus, loops, &ti[0]);
+
+        for (i = 0; i < ncpus; ++i)
+                on_cpu(i, kvm_clock_clear, (void *)0);
+
+        return nerr > 0 ? 1 : 0;
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index 7796e41..a3290cd 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -63,3 +63,8 @@ extra_params = -enable-nesting -cpu qemu64,+svm
 file = svm.flat
 smp = 2
 extra_params = -cpu qemu64,-svm
+
+[kvmclock_test]
+file = kvmclock_test.flat
+smp = 2
+extra_params = --append "10000000 `date +%s`"
\ No newline at end of file


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2010-08-30  3:29 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <18442408.826301283138927321.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2010-08-30  3:29 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
2010-08-27  5:49 [PATCH kvm-unit-test 0/6] Kvmclock test Jason Wang
2010-08-27  5:49 ` [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Jason Wang
2010-08-27 11:27   ` Glauber Costa
2010-08-30  3:07     ` Jason Wang
2010-08-27 11:34   ` Glauber Costa
2010-08-30  3:27     ` Jason Wang
2010-08-28  1:58   ` Zachary Amsden

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).