From mboxrd@z Thu Jan 1 00:00:00 1970 From: Glauber Costa Subject: Re: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock Date: Fri, 27 Aug 2010 08:34:56 -0300 Message-ID: <20100827113456.GM2985@mothafucka.localdomain> References: <20100827054733.7409.63882.stgit@FreeLancer> <20100827054953.7409.25948.stgit@FreeLancer> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Cc: mtosatti@redhat.com, avi@redhat.com, kvm@vger.kernel.org To: Jason Wang Return-path: Received: from mx1.redhat.com ([209.132.183.28]:43446 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752313Ab0H0Le6 (ORCPT ); Fri, 27 Aug 2010 07:34:58 -0400 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o7RBYwqC014981 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Fri, 27 Aug 2010 07:34:58 -0400 Content-Disposition: inline In-Reply-To: <20100827054953.7409.25948.stgit@FreeLancer> Sender: kvm-owner@vger.kernel.org List-ID: On Fri, Aug 27, 2010 at 01:49:53PM +0800, Jason Wang wrote: > This patch implements two tests for kvmclock. First one check whether > the date of time returned by kvmclock matches the value got from > host. Second one check whether the cycle of kvmclock grows > monotonically in smp guest. > > Three parameters were accepted by the test: test loops, seconds > since 1970-01-01 00:00:00 UTC which could be easily get through date > +%s and the max accepted offset value between the tod of guest and > host. > > Signed-off-by: Jason Wang > --- > config-x86-common.mak | 6 ++ > x86/README | 2 + > x86/kvmclock_test.c | 145 +++++++++++++++++++++++++++++++++++++++++++++++++ > x86/unittests.cfg | 5 ++ > 4 files changed, 157 insertions(+), 1 deletions(-) > create mode 100644 x86/kvmclock_test.c > > diff --git a/config-x86-common.mak b/config-x86-common.mak > index b8ca859..b541c1c 100644 > --- a/config-x86-common.mak > +++ b/config-x86-common.mak > @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc) > tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \ > $(TEST_DIR)/smptest.flat $(TEST_DIR)/port80.flat \ > $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \ > - $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat > + $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \ > + $(TEST_DIR)/kvmclock_test.flat > > tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg > > @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \ > > $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o > > +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \ > + $(TEST_DIR)/kvmclock_test.o > + > arch_clean: > $(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \ > $(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o > diff --git a/x86/README b/x86/README > index ab5a2ae..4b90080 100644 > --- a/x86/README > +++ b/x86/README > @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging > smptest: run smp_id() on every cpu and compares return value to number > tsc: write to tsc(0) and write to tsc(100000000000) and read it back > vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt > +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of > +wallclock > diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c > new file mode 100644 > index 0000000..cd80915 > --- /dev/null > +++ b/x86/kvmclock_test.c > @@ -0,0 +1,145 @@ > +#include "libcflat.h" > +#include "smp.h" > +#include "atomic.h" > +#include "string.h" > +#include "kvmclock.h" > + > +#define DEFAULT_TEST_LOOPS 100000000L > +#define DEFAULT_THRESHOLD 60L > + > + printf("Check the stability of raw cycle\n"); > + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT > + | PVCLOCK_RAW_CYCLE_BIT); > + if (cycle_test(ncpus, loops, &ti[1])) > + printf("Raw cycle is not stable\n"); > + else > + printf("Raw cycle is stable\n"); > + > + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); > + printf("Monotonic cycle test:\n"); > + nerr += cycle_test(ncpus, loops, &ti[0]); > + > + for (i = 0; i < ncpus; ++i) > + on_cpu(i, kvm_clock_clear, (void *)0); > + > + return nerr > 0 ? 1 : 0; another interesting bit of information is the total time taken by the first cycle_test, compared to the second (They do the same amount of loops anyway, so no need for further math). We are all pretty sure the lack of a stable bit will influence kvm clock performance, but nobody measured by how much yet (in big, big boxes.)