All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Dario Faggioli <dario.faggioli@citrix.com>,
	Joao Martins <joao.m.martins@oracle.com>
Subject: [PATCH 3/8] x86/time: introduce and use rdtsc_ordered()
Date: Wed, 15 Jun 2016 04:27:23 -0600	[thread overview]
Message-ID: <576149AB02000078000F539D@prv-mh.provo.novell.com> (raw)
In-Reply-To: <576140F302000078000F52FE@prv-mh.provo.novell.com>

[-- Attachment #1: Type: text/plain, Size: 6196 bytes --]

Matching Linux commit 03b9730b76 ("x86/asm/tsc: Add rdtsc_ordered() and
use it in trivial call sites") and earlier ones it builds upon, let's
make sure timing loops don't have their rdtsc()-s re-ordered, as that
would harm precision of the result (values were observed to be several
hundred clocks off without this adjustment).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -1137,7 +1137,7 @@ static int __init calibrate_APIC_clock(v
     /*
      * We wrapped around just now. Let's start:
      */
-    t1 = rdtsc();
+    t1 = rdtsc_ordered();
     tt1 = apic_read(APIC_TMCCT);
 
     /*
@@ -1147,7 +1147,7 @@ static int __init calibrate_APIC_clock(v
         wait_8254_wraparound();
 
     tt2 = apic_read(APIC_TMCCT);
-    t2 = rdtsc();
+    t2 = rdtsc_ordered();
 
     /*
      * The APIC bus clock counter is 32 bits only, it
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -541,6 +541,9 @@ static void init_amd(struct cpuinfo_x86
 			wrmsr_amd_safe(0xc001100d, l, h & ~1);
 	}
 
+	/* MFENCE stops RDTSC speculation */
+	__set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+
 	switch(c->x86)
 	{
 	case 0xf ... 0x17:
--- a/xen/arch/x86/delay.c
+++ b/xen/arch/x86/delay.c
@@ -21,10 +21,10 @@ void __udelay(unsigned long usecs)
     unsigned long ticks = usecs * (cpu_khz / 1000);
     unsigned long s, e;
 
-    s = rdtsc();
+    s = rdtsc_ordered();
     do
     {
         rep_nop();
-        e = rdtsc();
+        e = rdtsc_ordered();
     } while ((e-s) < ticks);
 }
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -123,7 +123,7 @@ static void synchronize_tsc_master(unsig
 
     for ( i = 1; i <= 5; i++ )
     {
-        tsc_value = rdtsc();
+        tsc_value = rdtsc_ordered();
         wmb();
         atomic_inc(&tsc_count);
         while ( atomic_read(&tsc_count) != (i<<1) )
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -257,10 +257,10 @@ static u64 init_pit_and_calibrate_tsc(vo
     outb(CALIBRATE_LATCH & 0xff, PIT_CH2); /* LSB of count */
     outb(CALIBRATE_LATCH >> 8, PIT_CH2);   /* MSB of count */
 
-    start = rdtsc();
+    start = rdtsc_ordered();
     for ( count = 0; (inb(0x61) & 0x20) == 0; count++ )
         continue;
-    end = rdtsc();
+    end = rdtsc_ordered();
 
     /* Error if the CTC doesn't behave itself. */
     if ( count == 0 )
@@ -760,7 +760,7 @@ s_time_t get_s_time_fixed(u64 at_tsc)
     if ( at_tsc )
         tsc = at_tsc;
     else
-        tsc = rdtsc();
+        tsc = rdtsc_ordered();
     delta = tsc - t->local_tsc_stamp;
     now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale);
 
@@ -933,7 +933,7 @@ int cpu_frequency_change(u64 freq)
     /* TSC-extrapolated time may be bogus after frequency change. */
     /*t->stime_local_stamp = get_s_time();*/
     t->stime_local_stamp = t->stime_master_stamp;
-    curr_tsc = rdtsc();
+    curr_tsc = rdtsc_ordered();
     t->local_tsc_stamp = curr_tsc;
     set_time_scale(&t->tsc_scale, freq);
     local_irq_enable();
@@ -1248,7 +1248,7 @@ static void time_calibration_tsc_rendezv
             if ( r->master_stime == 0 )
             {
                 r->master_stime = read_platform_stime();
-                r->master_tsc_stamp = rdtsc();
+                r->master_tsc_stamp = rdtsc_ordered();
             }
             atomic_inc(&r->semaphore);
 
@@ -1274,7 +1274,7 @@ static void time_calibration_tsc_rendezv
         }
     }
 
-    c->local_tsc_stamp = rdtsc();
+    c->local_tsc_stamp = rdtsc_ordered();
     c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
     c->stime_master_stamp = r->master_stime;
 
@@ -1304,7 +1304,7 @@ static void time_calibration_std_rendezv
         mb(); /* receive signal /then/ read r->master_stime */
     }
 
-    c->local_tsc_stamp = rdtsc();
+    c->local_tsc_stamp = rdtsc_ordered();
     c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
     c->stime_master_stamp = r->master_stime;
 
@@ -1338,7 +1338,7 @@ void time_latch_stamps(void) {
 
     local_irq_save(flags);
     ap_bringup_ref.master_stime = read_platform_stime();
-    tsc = rdtsc();
+    tsc = rdtsc_ordered();
     local_irq_restore(flags);
 
     ap_bringup_ref.local_stime = get_s_time_fixed(tsc);
@@ -1356,7 +1356,7 @@ void init_percpu_time(void)
 
     local_irq_save(flags);
     now = read_platform_stime();
-    tsc = rdtsc();
+    tsc = rdtsc_ordered();
     local_irq_restore(flags);
 
     t->stime_master_stamp = now;
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -16,6 +16,7 @@ XEN_CPUFEATURE(XTOPOLOGY,       (FSCAPIN
 XEN_CPUFEATURE(CPUID_FAULTING,  (FSCAPINTS+0)*32+ 6) /* cpuid faulting */
 XEN_CPUFEATURE(CLFLUSH_MONITOR, (FSCAPINTS+0)*32+ 7) /* clflush reqd with monitor */
 XEN_CPUFEATURE(APERFMPERF,      (FSCAPINTS+0)*32+ 8) /* APERFMPERF */
+XEN_CPUFEATURE(MFENCE_RDTSC,    (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTSC */
 
 #define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */
 
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -80,6 +80,22 @@ static inline uint64_t rdtsc(void)
     return ((uint64_t)high << 32) | low;
 }
 
+static inline uint64_t rdtsc_ordered(void)
+{
+	/*
+	 * The RDTSC instruction is not ordered relative to memory access.
+	 * The Intel SDM and the AMD APM are both vague on this point, but
+	 * empirically an RDTSC instruction can be speculatively executed
+	 * before prior loads.  An RDTSC immediately after an appropriate
+	 * barrier appears to be ordered as a normal load, that is, it
+	 * provides the same ordering guarantees as reading from a global
+	 * memory location that some other imaginary CPU is updating
+	 * continuously with a time stamp.
+	 */
+	alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC);
+	return rdtsc();
+}
+
 #define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val)
 #define write_tsc(val) ({                                       \
     /* Reliable TSCs are in lockstep across all CPUs. We should \



[-- Attachment #2: x86-RDTSC-ordered.patch --]
[-- Type: text/plain, Size: 6239 bytes --]

x86/time: introduce and use rdtsc_ordered()

Matching Linux commit 03b9730b76 ("x86/asm/tsc: Add rdtsc_ordered() and
use it in trivial call sites") and earlier ones it builds upon, let's
make sure timing loops don't have their rdtsc()-s re-ordered, as that
would harm precision of the result (values were observed to be several
hundred clocks off without this adjustment).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -1137,7 +1137,7 @@ static int __init calibrate_APIC_clock(v
     /*
      * We wrapped around just now. Let's start:
      */
-    t1 = rdtsc();
+    t1 = rdtsc_ordered();
     tt1 = apic_read(APIC_TMCCT);
 
     /*
@@ -1147,7 +1147,7 @@ static int __init calibrate_APIC_clock(v
         wait_8254_wraparound();
 
     tt2 = apic_read(APIC_TMCCT);
-    t2 = rdtsc();
+    t2 = rdtsc_ordered();
 
     /*
      * The APIC bus clock counter is 32 bits only, it
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -541,6 +541,9 @@ static void init_amd(struct cpuinfo_x86
 			wrmsr_amd_safe(0xc001100d, l, h & ~1);
 	}
 
+	/* MFENCE stops RDTSC speculation */
+	__set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+
 	switch(c->x86)
 	{
 	case 0xf ... 0x17:
--- a/xen/arch/x86/delay.c
+++ b/xen/arch/x86/delay.c
@@ -21,10 +21,10 @@ void __udelay(unsigned long usecs)
     unsigned long ticks = usecs * (cpu_khz / 1000);
     unsigned long s, e;
 
-    s = rdtsc();
+    s = rdtsc_ordered();
     do
     {
         rep_nop();
-        e = rdtsc();
+        e = rdtsc_ordered();
     } while ((e-s) < ticks);
 }
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -123,7 +123,7 @@ static void synchronize_tsc_master(unsig
 
     for ( i = 1; i <= 5; i++ )
     {
-        tsc_value = rdtsc();
+        tsc_value = rdtsc_ordered();
         wmb();
         atomic_inc(&tsc_count);
         while ( atomic_read(&tsc_count) != (i<<1) )
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -257,10 +257,10 @@ static u64 init_pit_and_calibrate_tsc(vo
     outb(CALIBRATE_LATCH & 0xff, PIT_CH2); /* LSB of count */
     outb(CALIBRATE_LATCH >> 8, PIT_CH2);   /* MSB of count */
 
-    start = rdtsc();
+    start = rdtsc_ordered();
     for ( count = 0; (inb(0x61) & 0x20) == 0; count++ )
         continue;
-    end = rdtsc();
+    end = rdtsc_ordered();
 
     /* Error if the CTC doesn't behave itself. */
     if ( count == 0 )
@@ -760,7 +760,7 @@ s_time_t get_s_time_fixed(u64 at_tsc)
     if ( at_tsc )
         tsc = at_tsc;
     else
-        tsc = rdtsc();
+        tsc = rdtsc_ordered();
     delta = tsc - t->local_tsc_stamp;
     now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale);
 
@@ -933,7 +933,7 @@ int cpu_frequency_change(u64 freq)
     /* TSC-extrapolated time may be bogus after frequency change. */
     /*t->stime_local_stamp = get_s_time();*/
     t->stime_local_stamp = t->stime_master_stamp;
-    curr_tsc = rdtsc();
+    curr_tsc = rdtsc_ordered();
     t->local_tsc_stamp = curr_tsc;
     set_time_scale(&t->tsc_scale, freq);
     local_irq_enable();
@@ -1248,7 +1248,7 @@ static void time_calibration_tsc_rendezv
             if ( r->master_stime == 0 )
             {
                 r->master_stime = read_platform_stime();
-                r->master_tsc_stamp = rdtsc();
+                r->master_tsc_stamp = rdtsc_ordered();
             }
             atomic_inc(&r->semaphore);
 
@@ -1274,7 +1274,7 @@ static void time_calibration_tsc_rendezv
         }
     }
 
-    c->local_tsc_stamp = rdtsc();
+    c->local_tsc_stamp = rdtsc_ordered();
     c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
     c->stime_master_stamp = r->master_stime;
 
@@ -1304,7 +1304,7 @@ static void time_calibration_std_rendezv
         mb(); /* receive signal /then/ read r->master_stime */
     }
 
-    c->local_tsc_stamp = rdtsc();
+    c->local_tsc_stamp = rdtsc_ordered();
     c->stime_local_stamp = get_s_time_fixed(c->local_tsc_stamp);
     c->stime_master_stamp = r->master_stime;
 
@@ -1338,7 +1338,7 @@ void time_latch_stamps(void) {
 
     local_irq_save(flags);
     ap_bringup_ref.master_stime = read_platform_stime();
-    tsc = rdtsc();
+    tsc = rdtsc_ordered();
     local_irq_restore(flags);
 
     ap_bringup_ref.local_stime = get_s_time_fixed(tsc);
@@ -1356,7 +1356,7 @@ void init_percpu_time(void)
 
     local_irq_save(flags);
     now = read_platform_stime();
-    tsc = rdtsc();
+    tsc = rdtsc_ordered();
     local_irq_restore(flags);
 
     t->stime_master_stamp = now;
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -16,6 +16,7 @@ XEN_CPUFEATURE(XTOPOLOGY,       (FSCAPIN
 XEN_CPUFEATURE(CPUID_FAULTING,  (FSCAPINTS+0)*32+ 6) /* cpuid faulting */
 XEN_CPUFEATURE(CLFLUSH_MONITOR, (FSCAPINTS+0)*32+ 7) /* clflush reqd with monitor */
 XEN_CPUFEATURE(APERFMPERF,      (FSCAPINTS+0)*32+ 8) /* APERFMPERF */
+XEN_CPUFEATURE(MFENCE_RDTSC,    (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes RDTSC */
 
 #define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */
 
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -80,6 +80,22 @@ static inline uint64_t rdtsc(void)
     return ((uint64_t)high << 32) | low;
 }
 
+static inline uint64_t rdtsc_ordered(void)
+{
+	/*
+	 * The RDTSC instruction is not ordered relative to memory access.
+	 * The Intel SDM and the AMD APM are both vague on this point, but
+	 * empirically an RDTSC instruction can be speculatively executed
+	 * before prior loads.  An RDTSC immediately after an appropriate
+	 * barrier appears to be ordered as a normal load, that is, it
+	 * provides the same ordering guarantees as reading from a global
+	 * memory location that some other imaginary CPU is updating
+	 * continuously with a time stamp.
+	 */
+	alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC);
+	return rdtsc();
+}
+
 #define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val)
 #define write_tsc(val) ({                                       \
     /* Reliable TSCs are in lockstep across all CPUs. We should \

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-06-15 10:27 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-15  9:50 [PATCH 0/8] x86/time: improve cross-CPU clock monotonicity (and more) Jan Beulich
2016-06-15 10:26 ` [PATCH 1/8] " Jan Beulich
2016-06-15 10:32   ` Jan Beulich
2016-06-15 22:51   ` Joao Martins
2016-06-16  8:27     ` Jan Beulich
2016-06-16 20:27       ` Joao Martins
2016-06-17  7:32         ` Jan Beulich
2016-06-21 12:05           ` Joao Martins
2016-06-21 12:28             ` Jan Beulich
2016-06-21 13:57               ` Joao Martins
2016-08-02 19:30   ` Andrew Cooper
2016-06-15 10:26 ` [PATCH 2/8] x86: also generate assembler usable equates for synthesized features Jan Beulich
2016-06-20 12:50   ` Andrew Cooper
2016-06-15 10:27 ` Jan Beulich [this message]
2016-06-20 12:59   ` [PATCH 3/8] x86/time: introduce and use rdtsc_ordered() Andrew Cooper
2016-06-20 13:06     ` Jan Beulich
2016-06-20 13:07       ` Andrew Cooper
2016-07-11 11:39     ` Dario Faggioli
2016-06-15 10:28 ` [PATCH 4/8] x86/time: calibrate TSC against platform timer Jan Beulich
2016-06-20 14:20   ` Andrew Cooper
2016-06-20 15:19     ` Jan Beulich
2016-08-02 19:25       ` Andrew Cooper
2016-08-03  9:32         ` Jan Beulich
2016-06-15 10:28 ` [PATCH 5/8] x86/time: correctly honor late clearing of TSC related feature flags Jan Beulich
2016-06-20 14:32   ` Andrew Cooper
2016-06-20 15:20     ` Jan Beulich
2016-07-04 15:39       ` Andrew Cooper
2016-07-04 15:53         ` Jan Beulich
2016-08-02 19:08           ` Andrew Cooper
2016-08-03  9:43             ` Jan Beulich
2016-08-31 13:42               ` Andrew Cooper
2016-08-31 14:03                 ` Jan Beulich
2016-06-15 10:29 ` [PATCH 6/8] x86/time: support 32-bit wide ACPI PM timer Jan Beulich
2016-07-04 15:40   ` Andrew Cooper
2016-06-15 10:30 ` [PATCH 7/8] x86/time: fold recurring code Jan Beulich
2016-07-04 15:43   ` Andrew Cooper
2016-06-15 10:30 ` [PATCH 8/8] x86/time: group time stamps into a structure Jan Beulich
2016-07-04 15:57   ` Andrew Cooper
2016-07-01  7:44 ` Ping: [PATCH 0/8] x86/time: improve cross-CPU clock monotonicity (and more) Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=576149AB02000078000F539D@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=joao.m.martins@oracle.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.