All of lore.kernel.org
 help / color / mirror / Atom feed
From: Borislav Petkov <bp@suse.de>
To: "H. Peter Anvin" <hpa@zytor.com>,
	Andy Lutomirski <luto@amacapital.net>,
	Brian Gerst <brgerst@gmail.com>
Cc: the arch/x86 maintainers <x86@kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Ingo Molnar <mingo@kernel.org>,
	Denys Vlasenko <dvlasenk@redhat.com>,
	Linus Torvalds <torvalds@linux-foundation.org>
Subject: Re: [PATCH] x86: static_cpu_has_safe: discard dynamic check after init
Date: Wed, 20 Jan 2016 00:10:21 +0100	[thread overview]
Message-ID: <20160119231021.GG15071@pd.tnic> (raw)
In-Reply-To: <20160119135714.GD15071@pd.tnic>

On Tue, Jan 19, 2016 at 02:57:14PM +0100, Borislav Petkov wrote:
> Patch removing it below.

Andy has a point: I should simply drop static_cpu_has and kill the "_safe"
suffix of the remaining variant:

---
 arch/x86/Kconfig.debug               | 10 ----
 arch/x86/include/asm/cpufeature.h    | 99 +++---------------------------------
 arch/x86/include/asm/fpu/internal.h  | 14 ++---
 arch/x86/kernel/apic/apic_numachip.c |  4 +-
 arch/x86/kernel/cpu/common.c         |  4 +-
 arch/x86/kernel/vm86_32.c            |  2 +-
 drivers/cpufreq/intel_pstate.c       |  2 +-
 fs/btrfs/disk-io.c                   |  2 +-
 8 files changed, 21 insertions(+), 116 deletions(-)

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 9b18ed9..68a2d1f 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST
 
 	  If unsure say N here.
 
-config X86_DEBUG_STATIC_CPU_HAS
-	bool "Debug alternatives"
-	depends on DEBUG_KERNEL
-	---help---
-	  This option causes additional code to be generated which
-	  fails if static_cpu_has() is used before alternatives have
-	  run.
-
-	  If unsure, say N.
-
 config X86_DEBUG_FPU
 	bool "Debug the x86 FPU code"
 	depends on DEBUG_KERNEL
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7ad8c94..9219f00 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -406,103 +406,20 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define cpu_has_osxsave		boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)
 /*
- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * Do not add any more of those clumsy macros - use static_cpu_has() for
  * fast paths and boot_cpu_has() otherwise!
  */
 
 #if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
+extern bool __static_cpu_has(u16 bit);
 
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
  * patch the target code for additional performance.
  */
-static __always_inline __pure bool __static_cpu_has(u16 bit)
-{
-#ifdef CC_HAVE_ASM_GOTO
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-
-		/*
-		 * Catch too early usage of this before alternatives
-		 * have run.
-		 */
-		asm_volatile_goto("1: jmp %l[t_warn]\n"
-			 "2:\n"
-			 ".section .altinstructions,\"a\"\n"
-			 " .long 1b - .\n"
-			 " .long 0\n"		/* no replacement */
-			 " .word %P0\n"		/* 1: do replace */
-			 " .byte 2b - 1b\n"	/* source len */
-			 " .byte 0\n"		/* replacement len */
-			 " .byte 0\n"		/* pad len */
-			 ".previous\n"
-			 /* skipping size check since replacement size = 0 */
-			 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
-
-#endif
-
-		asm_volatile_goto("1: jmp %l[t_no]\n"
-			 "2:\n"
-			 ".section .altinstructions,\"a\"\n"
-			 " .long 1b - .\n"
-			 " .long 0\n"		/* no replacement */
-			 " .word %P0\n"		/* feature bit */
-			 " .byte 2b - 1b\n"	/* source len */
-			 " .byte 0\n"		/* replacement len */
-			 " .byte 0\n"		/* pad len */
-			 ".previous\n"
-			 /* skipping size check since replacement size = 0 */
-			 : : "i" (bit) : : t_no);
-		return true;
-	t_no:
-		return false;
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-	t_warn:
-		warn_pre_alternatives();
-		return false;
-#endif
-
-#else /* CC_HAVE_ASM_GOTO */
-
-		u8 flag;
-		/* Open-coded due to __stringify() in ALTERNATIVE() */
-		asm volatile("1: movb $0,%0\n"
-			     "2:\n"
-			     ".section .altinstructions,\"a\"\n"
-			     " .long 1b - .\n"
-			     " .long 3f - .\n"
-			     " .word %P1\n"		/* feature bit */
-			     " .byte 2b - 1b\n"		/* source len */
-			     " .byte 4f - 3f\n"		/* replacement len */
-			     " .byte 0\n"		/* pad len */
-			     ".previous\n"
-			     ".section .discard,\"aw\",@progbits\n"
-			     " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
-			     "3: movb $1,%0\n"
-			     "4:\n"
-			     ".previous\n"
-			     : "=qm" (flag) : "i" (bit));
-		return flag;
-
-#endif /* CC_HAVE_ASM_GOTO */
-}
-
-#define static_cpu_has(bit)					\
-(								\
-	__builtin_constant_p(boot_cpu_has(bit)) ?		\
-		boot_cpu_has(bit) :				\
-	__builtin_constant_p(bit) ?				\
-		__static_cpu_has(bit) :				\
-		boot_cpu_has(bit)				\
-)
-
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
 #ifdef CC_HAVE_ASM_GOTO
 		asm_volatile_goto("1: jmp %l[t_dynamic]\n"
@@ -536,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 	t_no:
 		return false;
 	t_dynamic:
-		return __static_cpu_has_safe(bit);
+		return __static_cpu_has(bit);
 #else
 		u8 flag;
 		/* Open-coded due to __stringify() in ALTERNATIVE() */
@@ -574,22 +491,21 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 			     ".previous\n"
 			     : "=qm" (flag)
 			     : "i" (bit), "i" (X86_FEATURE_ALWAYS));
-		return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+		return (flag == 2 ? __static_cpu_has(bit) : flag);
 #endif /* CC_HAVE_ASM_GOTO */
 }
 
-#define static_cpu_has_safe(bit)				\
+#define static_cpu_has(bit)					\
 (								\
 	__builtin_constant_p(boot_cpu_has(bit)) ?		\
 		boot_cpu_has(bit) :				\
-		_static_cpu_has_safe(bit)			\
+		_static_cpu_has(bit)				\
 )
 #else
 /*
  * gcc 3.x is too stupid to do the static test; fall back to dynamic.
  */
 #define static_cpu_has(bit)		boot_cpu_has(bit)
-#define static_cpu_has_safe(bit)	boot_cpu_has(bit)
 #endif
 
 #define cpu_has_bug(c, bit)		cpu_has(c, (bit))
@@ -597,7 +513,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 #define clear_cpu_bug(c, bit)		clear_cpu_cap(c, (bit))
 
 #define static_cpu_has_bug(bit)		static_cpu_has((bit))
-#define static_cpu_has_bug_safe(bit)	static_cpu_has_safe((bit))
 #define boot_cpu_has_bug(bit)		cpu_has_bug(&boot_cpu_data, (bit))
 
 #define MAX_CPU_FEATURES		(NCAPINTS * 32)
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 0fd440d..97022dd 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -58,22 +58,22 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
  */
 static __always_inline __pure bool use_eager_fpu(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+	return static_cpu_has(X86_FEATURE_EAGER_FPU);
 }
 
 static __always_inline __pure bool use_xsaveopt(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+	return static_cpu_has(X86_FEATURE_XSAVEOPT);
 }
 
 static __always_inline __pure bool use_xsave(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_XSAVE);
+	return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
 static __always_inline __pure bool use_fxsr(void)
 {
-	return static_cpu_has_safe(X86_FEATURE_FXSR);
+	return static_cpu_has(X86_FEATURE_FXSR);
 }
 
 /*
@@ -300,7 +300,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
 
 	WARN_ON(system_state != SYSTEM_BOOTING);
 
-	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+	if (static_cpu_has(X86_FEATURE_XSAVES))
 		XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
 	else
 		XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -322,7 +322,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
 
 	WARN_ON(system_state != SYSTEM_BOOTING);
 
-	if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+	if (static_cpu_has(X86_FEATURE_XSAVES))
 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
 	else
 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -460,7 +460,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
 	 * pending. Clear the x87 state here by setting it to fixed values.
 	 * "m" is a random variable that should be in L1.
 	 */
-	if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
 		asm volatile(
 			"fnclex\n\t"
 			"emms\n\t"
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c80c02c..ab5c2c6 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
 	unsigned long value;
 	unsigned int id = (x >> 24) & 0xff;
 
-	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+	if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
 		rdmsrl(MSR_FAM10H_NODE_ID, value);
 		id |= (value << 2) & 0xff00;
 	}
@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 	this_cpu_write(cpu_llc_id, node);
 
 	/* Account for nodes per socket in multi-core-module processors */
-	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+	if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
 		rdmsrl(MSR_FAM10H_NODE_ID, val);
 		nodes = ((val >> 3) & 7) + 1;
 	}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 37830de..a57ec0d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1483,11 +1483,11 @@ void warn_pre_alternatives(void)
 EXPORT_SYMBOL_GPL(warn_pre_alternatives);
 #endif
 
-inline bool __static_cpu_has_safe(u16 bit)
+inline bool __static_cpu_has(u16 bit)
 {
 	return boot_cpu_has(bit);
 }
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
+EXPORT_SYMBOL_GPL(__static_cpu_has);
 
 static void bsp_resume(void)
 {
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e574b85..3dce1ca 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -362,7 +362,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
 	/* make room for real-mode segments */
 	tsk->thread.sp0 += 16;
 
-	if (static_cpu_has_safe(X86_FEATURE_SEP))
+	if (static_cpu_has(X86_FEATURE_SEP))
 		tsk->thread.sysenter_cs = 0;
 
 	load_sp0(tss, &tsk->thread);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd83d47..3a4b39a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1431,7 +1431,7 @@ static int __init intel_pstate_init(void)
 	if (!all_cpu_data)
 		return -ENOMEM;
 
-	if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+	if (static_cpu_has(X86_FEATURE_HWP) && !no_hwp) {
 		pr_info("intel_pstate: HWP enabled\n");
 		hwp_active++;
 	}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e99ccd6..87ce612 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -924,7 +924,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
 	if (bio_flags & EXTENT_BIO_TREE_LOG)
 		return 0;
 #ifdef CONFIG_X86
-	if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+	if (static_cpu_has(X86_FEATURE_XMM4_2))
 		return 0;
 #endif
 	return 1;
-- 
1.8.5.6

SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 (AG Nürnberg)
-- 

  parent reply	other threads:[~2016-01-19 23:10 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-16 19:22 [PATCH] x86: static_cpu_has_safe: discard dynamic check after init Brian Gerst
2016-01-16 19:36 ` Borislav Petkov
2016-01-16 19:58   ` Brian Gerst
2016-01-17 10:33     ` Borislav Petkov
2016-01-18 16:52       ` Brian Gerst
2016-01-18 17:49         ` Andy Lutomirski
2016-01-18 18:14         ` Borislav Petkov
2016-01-18 18:29           ` Andy Lutomirski
2016-01-18 18:39             ` Borislav Petkov
2016-01-18 19:45               ` H. Peter Anvin
2016-01-18 23:05                 ` Borislav Petkov
2016-01-18 23:13                   ` H. Peter Anvin
2016-01-18 23:25                     ` Borislav Petkov
2016-01-19 13:57                       ` Borislav Petkov
2016-01-19 16:23                         ` Borislav Petkov
2016-01-19 23:10                         ` Borislav Petkov [this message]
2016-01-19 23:26                           ` Andy Lutomirski
2016-01-19 23:49                             ` Boris Petkov
2016-01-20  4:03                         ` H. Peter Anvin
2016-01-20 10:33                           ` Borislav Petkov
2016-01-20 10:41                             ` H. Peter Anvin
2016-01-21 22:14                               ` Borislav Petkov
2016-01-21 22:22                                 ` H. Peter Anvin
2016-01-21 22:56                                   ` Borislav Petkov
2016-01-21 23:36                                     ` H. Peter Anvin
2016-01-21 23:37                                     ` H. Peter Anvin
2016-01-22 10:32                                       ` Borislav Petkov
2016-01-18 18:51           ` Borislav Petkov
2016-01-19  1:10             ` Borislav Petkov
2016-01-19  1:33               ` H. Peter Anvin
2016-01-19  9:22                 ` Borislav Petkov
2016-01-20  4:02                   ` H. Peter Anvin
2016-01-20  4:39                     ` Brian Gerst
2016-01-20  4:42                       ` H. Peter Anvin
2016-01-20 10:50                         ` Borislav Petkov
2016-01-20 10:55                           ` H. Peter Anvin
2016-01-20 11:05                             ` Borislav Petkov
2016-01-20 14:48                               ` H. Peter Anvin
2016-01-20 15:01                     ` Borislav Petkov
2016-01-20 15:09                       ` H. Peter Anvin
2016-01-20 16:04                         ` Borislav Petkov
2016-01-20 16:16                           ` H. Peter Anvin
2016-01-23  6:50 [PATCH] x86/head_64.S: do not use temporary register to check alignment Alexander Kuleshov
2016-01-26  9:31 ` Borislav Petkov
2016-01-26 21:12 [PATCH 00/10] tip-queue 2016-01-26, rest Borislav Petkov
2016-01-26 21:12 ` [PATCH 01/10] x86/asm: Add condition codes clobber to memory barrier macros Borislav Petkov
2016-01-26 21:12 ` [PATCH 02/10] x86/asm: Drop a comment left over from X86_OOSTORE Borislav Petkov
2016-01-26 21:12 ` [PATCH 03/10] x86/asm: Tweak the comment about wmb() use for IO Borislav Petkov
2016-01-26 21:12 ` [PATCH 04/10] x86/cpufeature: Carve out X86_FEATURE_* Borislav Petkov
2016-01-30 13:18   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 05/10] x86/cpufeature: Replace the old static_cpu_has() with safe variant Borislav Petkov
2016-01-30 13:19   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 06/10] x86/cpufeature: Get rid of the non-asm goto variant Borislav Petkov
2016-01-27  3:36   ` Brian Gerst
2016-01-27  8:41     ` Borislav Petkov
2016-01-27  8:43       ` [PATCH -v1.1 " Borislav Petkov
2016-01-30 13:19         ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-27  8:45       ` [PATCH -v1.1 8/10] x86/alternatives: Discard dynamic check after init Borislav Petkov
2016-01-30 13:20         ` [tip:x86/asm] " tip-bot for Brian Gerst
2016-01-26 21:12 ` [PATCH 07/10] x86/alternatives: Add an auxilary section Borislav Petkov
2016-01-30 13:19   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 08/10] x86/alternatives: Discard dynamic check after init Borislav Petkov
2016-01-26 21:12 ` [PATCH 09/10] x86/vdso: Use static_cpu_has() Borislav Petkov
2016-01-30 13:20   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 10/10] x86/head_64: Simplify kernel load address alignment check Borislav Petkov
2016-01-30 13:20   ` [tip:x86/boot] x86/boot: " tip-bot for Alexander Kuleshov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160119231021.GG15071@pd.tnic \
    --to=bp@suse.de \
    --cc=brgerst@gmail.com \
    --cc=dvlasenk@redhat.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@amacapital.net \
    --cc=mingo@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.