All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andy Lutomirski <luto@kernel.org>
To: x86@kernel.org
Cc: Borislav Petkov <bp@suse.de>,
	Peter Zijlstra <peterz@infradead.org>,
	John Stultz <john.stultz@linaro.org>,
	linux-kernel@vger.kernel.org, Len Brown <lenb@kernel.org>,
	Huang Rui <ray.huang@amd.com>,
	Denys Vlasenko <dvlasenk@redhat.com>,
	Andy Lutomirski <luto@kernel.org>
Subject: [PATCH 13/17] x86/tsc: Rename native_read_tsc() to rdtsc_unordered()
Date: Fri, 12 Jun 2015 16:41:47 -0700	[thread overview]
Message-ID: <c6cb6f1ea95bfcd543c3e633ccdf0ec616a217a7.1434152192.git.luto@kernel.org> (raw)
In-Reply-To: <cover.1434152192.git.luto@kernel.org>
In-Reply-To: <cover.1434152192.git.luto@kernel.org>

Now that there is no paravirt TSC, the "native" is inappropriate.
The fact that rdtsc is not ordered can catch people by surprise, so
call it rdtsc_unordered().

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
 arch/x86/boot/compressed/aslr.c                      |  2 +-
 arch/x86/entry/vdso/vclock_gettime.c                 |  2 +-
 arch/x86/include/asm/msr.h                           | 11 ++++++++++-
 arch/x86/include/asm/pvclock.h                       |  2 +-
 arch/x86/include/asm/stackprotector.h                |  2 +-
 arch/x86/include/asm/tsc.h                           |  2 +-
 arch/x86/kernel/apb_timer.c                          |  8 ++++----
 arch/x86/kernel/apic/apic.c                          |  8 ++++----
 arch/x86/kernel/cpu/amd.c                            |  4 ++--
 arch/x86/kernel/cpu/mcheck/mce.c                     |  4 ++--
 arch/x86/kernel/espfix_64.c                          |  2 +-
 arch/x86/kernel/hpet.c                               |  4 ++--
 arch/x86/kernel/trace_clock.c                        |  2 +-
 arch/x86/kernel/tsc.c                                |  4 ++--
 arch/x86/kvm/lapic.c                                 |  4 ++--
 arch/x86/kvm/svm.c                                   |  4 ++--
 arch/x86/kvm/vmx.c                                   |  4 ++--
 arch/x86/kvm/x86.c                                   | 12 ++++++------
 arch/x86/lib/delay.c                                 |  8 ++++----
 drivers/input/gameport/gameport.c                    |  4 ++--
 drivers/input/joystick/analog.c                      |  4 ++--
 drivers/net/hamradio/baycom_epp.c                    |  2 +-
 drivers/thermal/intel_powerclamp.c                   |  4 ++--
 tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c |  4 ++--
 24 files changed, 58 insertions(+), 49 deletions(-)

diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index ea33236190b1..487a8a980da3 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
 
 	if (has_cpuflag(X86_FEATURE_TSC)) {
 		debug_putstr(" RDTSC");
-		raw = native_read_tsc();
+		raw = rdtsc_unordered();
 
 		random ^= raw;
 		use_i8254 = false;
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 972b488ac16a..f9a0429875a7 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
 	 * but no one has ever seen it happen.
 	 */
 	rdtsc_barrier();
-	ret = (cycle_t)native_read_tsc();
+	ret = (cycle_t)rdtsc_unordered();
 
 	last = gtod->cycle_last;
 
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index c89ed6ceed02..e04f36f65c95 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long native_read_tsc(void)
+/**
+ * rdtsc_unordered() - returns the current TSC without ordering constraints
+ *
+ * rdtsc_unordered() returns the result of RDTSC as a 64-bit integer.  The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect.  The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline unsigned long long rdtsc_unordered(void)
 {
 	DECLARE_ARGS(val, low, high);
 
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 71bd485c2986..cfd4e89c3acf 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-	u64 delta = native_read_tsc() - src->tsc_timestamp;
+	u64 delta = rdtsc_unordered() - src->tsc_timestamp;
 	return pvclock_scale_delta(delta, src->tsc_to_system_mul,
 				   src->tsc_shift);
 }
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index bc5fa2af112e..11422aa255e6 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
 	 * on during the bootup the random pool has true entropy too.
 	 */
 	get_random_bytes(&canary, sizeof(canary));
-	tsc = native_read_tsc();
+	tsc = rdtsc_unordered();
 	canary += tsc + (tsc << 32UL);
 
 	current->stack_canary = canary;
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index b4883902948b..6e10f8b2252e 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void)
 		return 0;
 #endif
 
-	return native_read_tsc();
+	return rdtsc_unordered();
 }
 
 extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 25efa534c4e4..125d96b12141 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
 
 	/* Verify whether apbt counter works */
 	t1 = dw_apb_clocksource_read(clocksource_apbt);
-	start = native_read_tsc();
+	start = rdtsc_unordered();
 
 	/*
 	 * We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
 	 */
 	do {
 		rep_nop();
-		now = native_read_tsc();
+		now = rdtsc_unordered();
 	} while ((now - start) < 200000UL);
 
 	/* APBT is the only always on clocksource, it has to work! */
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
 	old = dw_apb_clocksource_read(clocksource_apbt);
 	old += loop;
 
-	t1 = native_read_tsc();
+	t1 = rdtsc_unordered();
 
 	do {
 		new = dw_apb_clocksource_read(clocksource_apbt);
 	} while (new < old);
 
-	t2 = native_read_tsc();
+	t2 = rdtsc_unordered();
 
 	shift = 5;
 	if (unlikely(loop >> shift == 0)) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 51af1ed1ae2e..b9a46bf64e24 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
 {
 	u64 tsc;
 
-	tsc = native_read_tsc();
+	tsc = rdtsc_unordered();
 	wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
 	return 0;
 }
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
 	unsigned long pm = acpi_pm_read_early();
 
 	if (cpu_has_tsc)
-		tsc = native_read_tsc();
+		tsc = rdtsc_unordered();
 
 	switch (lapic_cal_loops++) {
 	case 0:
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
 	long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
 	if (cpu_has_tsc)
-		tsc = native_read_tsc();
+		tsc = rdtsc_unordered();
 
 	if (disable_apic) {
 		disable_ioapic_support();
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
 		}
 		if (queued) {
 			if (cpu_has_tsc && cpu_khz) {
-				ntsc = native_read_tsc();
+				ntsc = rdtsc_unordered();
 				max_loops = (cpu_khz << 10) - (ntsc - tsc);
 			} else
 				max_loops--;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c5ceec532799..196f2131f44d 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -118,10 +118,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
 
 		n = K6_BUG_LOOP;
 		f_vide = vide;
-		d = native_read_tsc();
+		d = rdtsc_unordered();
 		while (n--)
 			f_vide();
-		d2 = native_read_tsc();
+		d2 = rdtsc_unordered();
 		d = d2-d;
 
 		if (d > 20*K6_BUG_LOOP)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a5283d2d0094..e2c9311bf098 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
 {
 	memset(m, 0, sizeof(struct mce));
 	m->cpu = m->extcpu = smp_processor_id();
-	m->tsc = native_read_tsc();
+	m->tsc = rdtsc_unordered();
 	/* We hope get_seconds stays lockless */
 	m->time = get_seconds();
 	m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
 {
 	unsigned long *cpu_tsc = (unsigned long *)data;
 
-	cpu_tsc[smp_processor_id()] = native_read_tsc();
+	cpu_tsc[smp_processor_id()] = rdtsc_unordered();
 }
 
 static int mce_apei_read_done;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 334a2a9c034d..e1d2f6839f49 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -110,7 +110,7 @@ static void init_espfix_random(void)
 	 */
 	if (!arch_get_random_long(&rand)) {
 		/* The constant is an arbitrary large prime */
-		rand = native_read_tsc();
+		rand = rdtsc_unordered();
 		rand *= 0xc345c6b72fd16123UL;
 	}
 
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ccf677cd9adc..141bba987f9b 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -734,7 +734,7 @@ static int hpet_clocksource_register(void)
 
 	/* Verify whether hpet counter works */
 	t1 = hpet_readl(HPET_COUNTER);
-	start = native_read_tsc();
+	start = rdtsc_unordered();
 
 	/*
 	 * We don't know the TSC frequency yet, but waiting for
@@ -744,7 +744,7 @@ static int hpet_clocksource_register(void)
 	 */
 	do {
 		rep_nop();
-		now = native_read_tsc();
+		now = rdtsc_unordered();
 	} while ((now - start) < 200000UL);
 
 	if (t1 == hpet_readl(HPET_COUNTER)) {
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index bd8f4d41bd56..c0ab0bed02ae 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
 	u64 ret;
 
 	rdtsc_barrier();
-	ret = native_read_tsc();
+	ret = rdtsc_unordered();
 
 	return ret;
 }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e66f5dcaeb63..5eb4d91ce9db 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 
 	data = cyc2ns_write_begin(cpu);
 
-	tsc_now = native_read_tsc();
+	tsc_now = rdtsc_unordered();
 	ns_now = cycles_2_ns(tsc_now);
 
 	/*
@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
 	}
 
 	/* read the Time Stamp Counter: */
-	tsc_now = native_read_tsc();
+	tsc_now = rdtsc_unordered();
 
 	/* return the value in ns */
 	return cycles_2_ns(tsc_now);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 629af0f1c5c4..5b13eeea578a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1148,7 +1148,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
 	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
 	apic->lapic_timer.expired_tscdeadline = 0;
-	guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+	guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc_unordered());
 	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
 	/* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
@@ -1216,7 +1216,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
 		local_irq_save(flags);
 
 		now = apic->lapic_timer.timer.base->get_time();
-		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc_unordered());
 		if (likely(tscdeadline > guest_tsc)) {
 			ns = (tscdeadline - guest_tsc) * 1000000ULL;
 			do_div(ns, this_tsc_khz);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9afa233b5482..ec26eb38a768 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1077,7 +1077,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
 	u64 tsc;
 
-	tsc = svm_scale_tsc(vcpu, native_read_tsc());
+	tsc = svm_scale_tsc(vcpu, rdtsc_unordered());
 
 	return target_tsc - tsc;
 }
@@ -3074,7 +3074,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
 	switch (ecx) {
 	case MSR_IA32_TSC: {
 		*data = svm->vmcb->control.tsc_offset +
-			svm_scale_tsc(vcpu, native_read_tsc());
+			svm_scale_tsc(vcpu, rdtsc_unordered());
 
 		break;
 	}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fcff42100948..b220331bc575 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
 {
 	u64 host_tsc, tsc_offset;
 
-	host_tsc = native_read_tsc();
+	host_tsc = rdtsc_unordered();
 	tsc_offset = vmcs_read64(TSC_OFFSET);
 	return host_tsc + tsc_offset;
 }
@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
 
 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
-	return target_tsc - native_read_tsc();
+	return target_tsc - rdtsc_unordered();
 }
 
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c26faf408bce..a9a3f31311e1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1430,7 +1430,7 @@ static cycle_t read_tsc(void)
 	 * but no one has ever seen it happen.
 	 */
 	rdtsc_barrier();
-	ret = (cycle_t)native_read_tsc();
+	ret = (cycle_t)rdtsc_unordered();
 
 	last = pvclock_gtod_data.clock.cycle_last;
 
@@ -1621,7 +1621,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 		return 1;
 	}
 	if (!use_master_clock) {
-		host_tsc = native_read_tsc();
+		host_tsc = rdtsc_unordered();
 		kernel_ns = get_kernel_ns();
 	}
 
@@ -2945,7 +2945,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
-				native_read_tsc() - vcpu->arch.last_host_tsc;
+				rdtsc_unordered() - vcpu->arch.last_host_tsc;
 		if (tsc_delta < 0)
 			mark_tsc_unstable("KVM discovered backwards TSC");
 		if (check_tsc_unstable()) {
@@ -2973,7 +2973,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	kvm_x86_ops->vcpu_put(vcpu);
 	kvm_put_guest_fpu(vcpu);
-	vcpu->arch.last_host_tsc = native_read_tsc();
+	vcpu->arch.last_host_tsc = rdtsc_unordered();
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -6388,7 +6388,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		hw_breakpoint_restore();
 
 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-							   native_read_tsc());
+							   rdtsc_unordered());
 
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
@@ -7186,7 +7186,7 @@ int kvm_arch_hardware_enable(void)
 	if (ret != 0)
 		return ret;
 
-	local_tsc = native_read_tsc();
+	local_tsc = rdtsc_unordered();
 	stable = !check_tsc_unstable();
 	list_for_each_entry(kvm, &vm_list, vm_list) {
 		kvm_for_each_vcpu(i, vcpu, kvm) {
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 35115f3786a9..a524708fa165 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops)
 	preempt_disable();
 	cpu = smp_processor_id();
 	rdtsc_barrier();
-	bclock = native_read_tsc();
+	bclock = rdtsc_unordered();
 	for (;;) {
 		rdtsc_barrier();
-		now = native_read_tsc();
+		now = rdtsc_unordered();
 		if ((now - bclock) >= loops)
 			break;
 
@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops)
 			loops -= (now - bclock);
 			cpu = smp_processor_id();
 			rdtsc_barrier();
-			bclock = native_read_tsc();
+			bclock = rdtsc_unordered();
 		}
 	}
 	preempt_enable();
@@ -100,7 +100,7 @@ void use_tsc_delay(void)
 int read_current_timer(unsigned long *timer_val)
 {
 	if (delay_fn == delay_tsc) {
-		*timer_val = native_read_tsc();
+		*timer_val = rdtsc_unordered();
 		return 0;
 	}
 	return -1;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index abc0cb22e750..5671c3e35fb6 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport)
 
 	for(i = 0; i < 50; i++) {
 		local_irq_save(flags);
-		t1 = native_read_tsc();
+		t1 = rdtsc_unordered();
 		for (t = 0; t < 50; t++) gameport_read(gameport);
-		t2 = native_read_tsc();
+		t2 = rdtsc_unordered();
 		local_irq_restore(flags);
 		udelay(i * 10);
 		if (t2 - t1 < tx) tx = t2 - t1;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index f871b4f00056..3b251daa2092 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,7 +143,7 @@ struct analog_port {
 
 #include <linux/i8253.h>
 
-#define GET_TIME(x)	do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0)
+#define GET_TIME(x)	do { if (cpu_has_tsc) x = (unsigned int)rdtsc_unordered(); else x = get_time_pit(); } while (0)
 #define DELTA(x,y)	(cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
 #define TIME_NAME	(cpu_has_tsc?"TSC":"PIT")
 static unsigned int get_time_pit(void)
@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void)
         return count;
 }
 #elif defined(__x86_64__)
-#define GET_TIME(x)	do { x = (unsigned int)native_read_tsc(); } while (0)
+#define GET_TIME(x)	do { x = (unsigned int)rdtsc_unordered(); } while (0)
 #define DELTA(x,y)	((y)-(x))
 #define TIME_NAME	"TSC"
 #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 44e5c3b5e0af..8302a54bb583 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt)
 #define GETTICK(x)                                                \
 ({                                                                \
 	if (cpu_has_tsc)                                          \
-		x = (unsigned int)native_read_tsc();		  \
+		x = (unsigned int)rdtsc_unordered();		  \
 })
 #else /* __i386__ */
 #define GETTICK(x)
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 933c5e599d1d..ea6fd07739fd 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
 
 	/* check result for the last window */
 	msr_now = pkg_state_counter();
-	tsc_now = native_read_tsc();
+	tsc_now = rdtsc_unordered();
 
 	/* calculate pkg cstate vs tsc ratio */
 	if (!msr_last || !tsc_last)
@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
 	u64 val64;
 
 	msr_now = pkg_state_counter();
-	tsc_now = native_read_tsc();
+	tsc_now = rdtsc_unordered();
 	jiffies_now = jiffies;
 
 	/* calculate pkg cstate vs tsc ratio */
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
index f02b0c0bff9b..511b7cb3a9c8 100644
--- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
+++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
 
 	printk(KERN_DEBUG "start--> \n");
 	then = read_pmtmr();
-	then_tsc = native_read_tsc();
+	then_tsc = rdtsc_unordered();
 	for (i=0;i<20;i++) {
 		mdelay(100);
 		now = read_pmtmr();
-		now_tsc = native_read_tsc();
+		now_tsc = rdtsc_unordered();
 		diff = (now - then) & 0xFFFFFF;
 		diff_tsc = now_tsc - then_tsc;
 		printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
-- 
2.4.2


  parent reply	other threads:[~2015-06-12 23:44 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-12 23:41 [PATCH 00/17] x86/tsc: Clean up rdtsc helpers Andy Lutomirski
2015-06-12 23:41 ` [PATCH 01/17] x86/tsc: Inline native_read_tsc and remove __native_read_tsc Andy Lutomirski
2015-06-12 23:41 ` [PATCH 02/17] x86/msr/kvm: Remove vget_cycles() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 03/17] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks Andy Lutomirski
2015-06-12 23:41 ` [PATCH 04/17] x86/tsc: Replace rdtscll with native_read_tsc Andy Lutomirski
     [not found] ` <cover.1434087075.git.luto@kernel.org>
2015-06-12 23:41   ` [PATCH 03/17] " Andy Lutomirski
2015-06-12 23:41   ` [PATCH 04/17] x86/tsc: Remove the rdtscp and rdtscpll macros Andy Lutomirski
2015-06-12 23:41   ` [PATCH 05/17] x86/tsc/paravirt: Remove the read_tsc and read_tscp paravirt hooks Andy Lutomirski
2015-06-12 23:41   ` [PATCH 16/17] x86/tsc: Use rdtsc_unordered() in check_tsc_warp() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 05/17] x86/tsc: Remove the rdtscp and rdtscpll macros Andy Lutomirski
2015-06-12 23:41 ` [PATCH 06/17] x86/tsc: Use the full 64-bit tsc in tsc_delay Andy Lutomirski
2015-06-12 23:41 ` [PATCH 07/17] x86/cpu/amd: Use the full 64-bit TSC to detect the 2.6.2 bug Andy Lutomirski
2015-06-12 23:41 ` [PATCH 08/17] baycom_epp: Replace rdtscl() with native_read_tsc() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 09/17] staging/lirc_serial: Remove TSC-based timing Andy Lutomirski
2015-06-12 23:41 ` [PATCH 10/17] input/joystick/analog: Switch from rdtscl() to native_read_tsc() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 11/17] drivers/input/gameport: Replace rdtscl() with native_read_tsc() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 12/17] x86/tsc: Remove rdtscl() Andy Lutomirski
2015-06-12 23:41 ` Andy Lutomirski [this message]
2015-06-12 23:41 ` [PATCH 14/17] x86/tsc: Move rdtsc_barrier() and rename it to barrier_before_rdtsc() Andy Lutomirski
2015-06-12 23:41 ` [PATCH 15/17] x86: Add rdtsc_ordered() and use it in trivial call sites Andy Lutomirski
2015-06-12 23:41 ` [PATCH 16/17] x86/tsc: Use rdtsc_ordered() in check_tsc_warp() and drop extra barriers Andy Lutomirski
2015-06-12 23:41 ` [PATCH 17/17] x86/tsc: In read_tsc, use rdtsc_ordered() instead of get_cycles() Andy Lutomirski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c6cb6f1ea95bfcd543c3e633ccdf0ec616a217a7.1434152192.git.luto@kernel.org \
    --to=luto@kernel.org \
    --cc=bp@suse.de \
    --cc=dvlasenk@redhat.com \
    --cc=john.stultz@linaro.org \
    --cc=lenb@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=ray.huang@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.