From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752880AbbJROUS (ORCPT ); Sun, 18 Oct 2015 10:20:18 -0400 Received: from mx2.suse.de ([195.135.220.15]:47018 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751900AbbJROUO (ORCPT ); Sun, 18 Oct 2015 10:20:14 -0400 Date: Sun, 18 Oct 2015 16:20:07 +0200 From: Borislav Petkov To: x86-ml Cc: Peter Zijlstra , Andy Lutomirski , Steven Rostedt , lkml Subject: [RFC PATCH] x86: Kill notsc Message-ID: <20151018142007.GA11294@pd.tnic> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline User-Agent: Mutt/1.5.23 (2014-03-12) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Ok, let's try this and see where it takes us. Patch has been only lightly tested in kvm - I'll hammer on it for real once we agree about the general form. Aanyway, this patch is something Peter and I have been talking about on IRC a couple of times already. I finally found some free time to poke at it, here's the result. Thoughts? --- Kill "notsc" cmdline option and all the glue around it. The two boxes worldwide which don't have a TSC should disable X86_TSC. Thus, make native_sched_clock() use TSC unconditionally, even if the TSC is unstable because that's fine there. This gets rid of the static key too and makes the function even simpler and faster, which is a Good Thing(tm). The jiffies-fallback is for the !X86_TSC case. Signed-off-by: Borislav Petkov --- Documentation/kernel-parameters.txt | 2 - Documentation/x86/x86_64/boot-options.txt | 5 --- arch/x86/include/asm/tsc.h | 3 +- arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/tsc.c | 74 ++++++++----------------------- 5 files changed, 21 insertions(+), 65 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 50fc09b623f6..2589559b7520 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2549,8 +2549,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. nosync [HW,M68K] Disables sync negotiation for all devices. - notsc [BUGS=X86-32] Disable Time Stamp Counter - nousb [USB] Disable the USB subsystem nowatchdog [KNL] Disable both lockup detectors, i.e. diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt index 68ed3114c363..0e43d94d9567 100644 --- a/Documentation/x86/x86_64/boot-options.txt +++ b/Documentation/x86/x86_64/boot-options.txt @@ -88,11 +88,6 @@ APICs Timing - notsc - Don't use the CPU time stamp counter to read the wall time. - This can be used to work around timing problems on multiprocessor systems - with not properly synchronized CPUs. - nohpet Don't use the HPET timer. diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 6d7c5479bcea..aa628d0f4bb0 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -33,7 +33,6 @@ extern void tsc_init(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); -extern int check_tsc_disabled(void); extern unsigned long native_calibrate_tsc(void); extern unsigned long long native_sched_clock_from_tsc(u64 tsc); @@ -46,7 +45,7 @@ extern int tsc_clocksource_reliable; extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_target(void); -extern int notsc_setup(char *); +extern int notsc_setup(void); extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2f69e3b184f6..09bf96f48227 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -152,7 +152,7 @@ static int apic_calibrate_pmtmr __initdata; static __init int setup_apicpmtimer(char *s) { apic_calibrate_pmtmr = 1; - notsc_setup(NULL); + notsc_setup(); return 0; } __setup("apicpmtimer", setup_apicpmtimer); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 69b84a26ea17..6c4bc8dc1a62 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -34,13 +34,6 @@ EXPORT_SYMBOL(tsc_khz); */ static int __read_mostly tsc_unstable; -/* native_sched_clock() is called before tsc_init(), so - we must start with the TSC soft disabled to prevent - erroneous rdtsc usage on !cpu_has_tsc processors */ -static int __read_mostly tsc_disabled = -1; - -static DEFINE_STATIC_KEY_FALSE(__use_tsc); - int tsc_clocksource_reliable; /* @@ -273,24 +266,20 @@ done: */ u64 native_sched_clock(void) { - if (static_branch_likely(&__use_tsc)) { - u64 tsc_now = rdtsc(); - - /* return the value in ns */ - return cycles_2_ns(tsc_now); - } - +#ifdef CONFIG_X86_TSC + /* return the value in ns */ + return cycles_2_ns(rdtsc()); +#else /* - * Fall back to jiffies if there's no TSC available: - * ( But note that we still use it if the TSC is marked - * unstable. We do this because unlike Time Of Day, - * the scheduler clock tolerates small errors and it's - * very important for it to be as fast as the platform - * can achieve it. ) + * Fall back to jiffies if there's no TSC available: ( But note that we + * still use it if the TSC is marked unstable. We do this because unlike + * Time Of Day, the scheduler clock tolerates small errors and it's very + * important for it to be as fast as the platform can achieve it. ) + * + * No locking - a rare wrong value is not a big deal: */ - - /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); +#endif } /* @@ -319,32 +308,15 @@ int check_tsc_unstable(void) } EXPORT_SYMBOL_GPL(check_tsc_unstable); -int check_tsc_disabled(void) -{ - return tsc_disabled; -} -EXPORT_SYMBOL_GPL(check_tsc_disabled); - -#ifdef CONFIG_X86_TSC -int __init notsc_setup(char *str) -{ - pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); - tsc_disabled = 1; - return 1; -} -#else -/* - * disable flag for tsc. Takes effect by clearing the TSC cpu flag - * in cpu/common.c - */ -int __init notsc_setup(char *str) +/* Disable the TSC feature flag to avoid further TSC use. */ +int __init notsc_setup(void) { +#ifndef CONFIG_X86_TSC setup_clear_cpu_cap(X86_FEATURE_TSC); return 1; -} #endif - -__setup("notsc", notsc_setup); + return 0; +} static int no_sched_irq_time; @@ -1137,7 +1109,7 @@ out: static int __init init_tsc_clocksource(void) { - if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) + if (!cpu_has_tsc || !tsc_khz) return 0; if (tsc_clocksource_reliable) @@ -1176,7 +1148,7 @@ void __init tsc_init(void) x86_init.timers.tsc_pre_init(); - if (!cpu_has_tsc) { + if (notsc_setup()) { setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } @@ -1205,14 +1177,6 @@ void __init tsc_init(void) set_cyc2ns_scale(cpu_khz, cpu); } - if (tsc_disabled > 0) - return; - - /* now allow native_sched_clock() to use rdtsc */ - - tsc_disabled = 0; - static_branch_enable(&__use_tsc); - if (!no_sched_irq_time) enable_sched_clock_irqtime(); @@ -1239,7 +1203,7 @@ unsigned long calibrate_delay_is_known(void) { int i, cpu = smp_processor_id(); - if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) + if (!cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) return 0; for_each_online_cpu(i) -- 2.3.5 -- Regards/Gruss, Boris. ECO tip #101: Trim your mails when you reply.