linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/10] tip-queue 2016-01-26, rest
@ 2016-01-26 21:12 Borislav Petkov
  2016-01-26 21:12 ` [PATCH 01/10] x86/asm: Add condition codes clobber to memory barrier macros Borislav Petkov
                   ` (9 more replies)
  0 siblings, 10 replies; 66+ messages in thread
From: Borislav Petkov @ 2016-01-26 21:12 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: LKML

From: Borislav Petkov <bp@suse.de>

Hi,

this is the rest of the series today. Main one is the cpufeature
cleanup. I merged the AVIC patch from tip:x86/cpu so that the rest
applies cleanly. The rest is simplifications/cleanups.

Please apply,
thanks.

Alexander Kuleshov (1):
  x86/head_64: Simplify kernel load address alignment check

Borislav Petkov (5):
  x86/cpufeature: Carve out X86_FEATURE_*
  x86/cpufeature: Replace the old static_cpu_has() with safe variant
  x86/cpufeature: Get rid of the non-asm goto variant
  x86/alternatives: Add an auxilary section
  x86/vdso: Use static_cpu_has()

Brian Gerst (1):
  x86/alternatives: Discard dynamic check after init

Michael S. Tsirkin (3):
  x86/asm: Add condition codes clobber to memory barrier macros
  x86/asm: Drop a comment left over from X86_OOSTORE
  x86/asm: Tweak the comment about wmb() use for IO

^ permalink raw reply	[flat|nested] 66+ messages in thread
* [PATCH] x86/head_64.S: do not use temporary register to check alignment
@ 2016-01-23  6:50 Alexander Kuleshov
  2016-01-26  9:31 ` Borislav Petkov
  0 siblings, 1 reply; 66+ messages in thread
From: Alexander Kuleshov @ 2016-01-23  6:50 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Ingo Molnar, H . Peter Anvin, Andy Lutomirski, Borislav Petkov,
	Denys Vlasenko, Andrey Ryabinin, x86, linux-kernel,
	Alexander Kuleshov

We are using temporary %rax register during checking of kernel address
alignment. We can ged rid of it since testl instruction is safe and does
not change value of the rbp register.

Signed-off-by: Alexander Kuleshov <kuleshovmail@gmail.com>
Suggested-by: Brian Gerst <brgerst@gmail.com>
---
 arch/x86/kernel/head_64.S | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ffdc0e8..7c21029 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -76,9 +76,7 @@ startup_64:
 	subq	$_text - __START_KERNEL_map, %rbp
 
 	/* Is the address not 2M aligned? */
-	movq	%rbp, %rax
-	andl	$~PMD_PAGE_MASK, %eax
-	testl	%eax, %eax
+	testl	$~PMD_PAGE_MASK, %ebp
 	jnz	bad_address
 
 	/*
-- 
2.7.0.25.gfc10eb5

^ permalink raw reply related	[flat|nested] 66+ messages in thread
* [PATCH] x86: static_cpu_has_safe: discard dynamic check after init
@ 2016-01-16 19:22 Brian Gerst
  2016-01-16 19:36 ` Borislav Petkov
  0 siblings, 1 reply; 66+ messages in thread
From: Brian Gerst @ 2016-01-16 19:22 UTC (permalink / raw)
  To: x86, linux-kernel
  Cc: Ingo Molnar, H. Peter Anvin, Denys Vlasenko, Andy Lutomirski,
	Linus Torvalds, Borislav Petkov

Move the code to do the dynamic check to the init text section so that it
is discarded after alternatives have run and a static branch has been
chosen.

A new section is defined to avoid warnings with modpost due to references
to init text from main text, which in this case is legitimate.  All such
references are patched out before init mem is discarded.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
---
 arch/x86/include/asm/cpufeature.h | 33 ++++++++++++++++++++++-----------
 arch/x86/kernel/cpu/common.c      |  6 ------
 arch/x86/kernel/vmlinux.lds.S     |  6 ++++++
 3 files changed, 28 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7ad8c94..2efbd83 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -412,7 +412,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 
 #if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
 
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
@@ -502,10 +501,10 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 		boot_cpu_has(bit)				\
 )
 
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+static __always_inline __pure bool _static_cpu_has_safe(u16 bit, __u32 *caps)
 {
 #ifdef CC_HAVE_ASM_GOTO
-		asm_volatile_goto("1: jmp %l[t_dynamic]\n"
+		asm_volatile_goto("1: jmp 6f\n"
 			 "2:\n"
 			 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
 			         "((5f-4f) - (2b-1b)),0x90\n"
@@ -530,17 +529,22 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 			 " .byte 0\n"			/* repl len */
 			 " .byte 0\n"			/* pad len */
 			 ".previous\n"
-			 : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
-			 : : t_dynamic, t_no);
+			 ".section .static_cpu_has,\"ax\"\n"
+			 "6: testl %2,%3\n"
+			 "   jnz %l[t_yes]\n"
+			 "   jmp %l[t_no]\n"
+			 ".previous\n"
+			 : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
+			     "i" (1 << (bit & 31)), "m" (caps[bit/32])
+			 : : t_yes, t_no);
+	t_yes:
 		return true;
 	t_no:
 		return false;
-	t_dynamic:
-		return __static_cpu_has_safe(bit);
 #else
 		u8 flag;
 		/* Open-coded due to __stringify() in ALTERNATIVE() */
-		asm volatile("1: movb $2,%0\n"
+		asm volatile("1: jmp 7f\n"
 			     "2:\n"
 			     ".section .altinstructions,\"a\"\n"
 			     " .long 1b - .\n"		/* src offset */
@@ -572,9 +576,15 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 			     "5: movb $1,%0\n"
 			     "6:\n"
 			     ".previous\n"
+			     ".section .static_cpu_has,\"ax\"\n"
+			     "7: testl %3,%4\n"
+			     "   setnz %0\n"
+			     "   jmp 2b\n"
+			     ".previous\n"
 			     : "=qm" (flag)
-			     : "i" (bit), "i" (X86_FEATURE_ALWAYS));
-		return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+			     : "i" (bit), "i" (X86_FEATURE_ALWAYS),
+			       "i" (1 << (bit & 31)), "m" (caps[bit/32]));
+		return (flag != 0);
 #endif /* CC_HAVE_ASM_GOTO */
 }
 
@@ -582,7 +592,8 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 (								\
 	__builtin_constant_p(boot_cpu_has(bit)) ?		\
 		boot_cpu_has(bit) :				\
-		_static_cpu_has_safe(bit)			\
+		_static_cpu_has_safe(bit,			\
+			 &boot_cpu_data.x86_capability[0])	\
 )
 #else
 /*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 37830de..897c65b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1483,12 +1483,6 @@ void warn_pre_alternatives(void)
 EXPORT_SYMBOL_GPL(warn_pre_alternatives);
 #endif
 
-inline bool __static_cpu_has_safe(u16 bit)
-{
-	return boot_cpu_has(bit);
-}
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
-
 static void bsp_resume(void)
 {
 	if (this_cpu->c_bsp_resume)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 4f19942..4df1467 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -195,6 +195,12 @@ SECTIONS
 	:init
 #endif
 
+	.static_cpu_has : AT(ADDR(.static_cpu_has) - LOAD_OFFSET) {
+		__static_cpu_has_start = .;
+		*(.static_cpu_has)
+		__static_cpu_has_end = .;
+	}
+
 	INIT_DATA_SECTION(16)
 
 	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 66+ messages in thread

end of thread, other threads:[~2016-01-30 13:22 UTC | newest]

Thread overview: 66+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-26 21:12 [PATCH 00/10] tip-queue 2016-01-26, rest Borislav Petkov
2016-01-26 21:12 ` [PATCH 01/10] x86/asm: Add condition codes clobber to memory barrier macros Borislav Petkov
2016-01-26 21:12 ` [PATCH 02/10] x86/asm: Drop a comment left over from X86_OOSTORE Borislav Petkov
2016-01-26 21:12 ` [PATCH 03/10] x86/asm: Tweak the comment about wmb() use for IO Borislav Petkov
2016-01-26 21:12 ` [PATCH 04/10] x86/cpufeature: Carve out X86_FEATURE_* Borislav Petkov
2016-01-30 13:18   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 05/10] x86/cpufeature: Replace the old static_cpu_has() with safe variant Borislav Petkov
2016-01-30 13:19   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 06/10] x86/cpufeature: Get rid of the non-asm goto variant Borislav Petkov
2016-01-27  3:36   ` Brian Gerst
2016-01-27  8:41     ` Borislav Petkov
2016-01-27  8:43       ` [PATCH -v1.1 " Borislav Petkov
2016-01-30 13:19         ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-27  8:45       ` [PATCH -v1.1 8/10] x86/alternatives: Discard dynamic check after init Borislav Petkov
2016-01-30 13:20         ` [tip:x86/asm] " tip-bot for Brian Gerst
2016-01-26 21:12 ` [PATCH 07/10] x86/alternatives: Add an auxilary section Borislav Petkov
2016-01-30 13:19   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 08/10] x86/alternatives: Discard dynamic check after init Borislav Petkov
2016-01-26 21:12 ` [PATCH 09/10] x86/vdso: Use static_cpu_has() Borislav Petkov
2016-01-30 13:20   ` [tip:x86/asm] " tip-bot for Borislav Petkov
2016-01-26 21:12 ` [PATCH 10/10] x86/head_64: Simplify kernel load address alignment check Borislav Petkov
2016-01-30 13:20   ` [tip:x86/boot] x86/boot: " tip-bot for Alexander Kuleshov
  -- strict thread matches above, loose matches on Subject: below --
2016-01-23  6:50 [PATCH] x86/head_64.S: do not use temporary register to check alignment Alexander Kuleshov
2016-01-26  9:31 ` Borislav Petkov
2016-01-16 19:22 [PATCH] x86: static_cpu_has_safe: discard dynamic check after init Brian Gerst
2016-01-16 19:36 ` Borislav Petkov
2016-01-16 19:58   ` Brian Gerst
2016-01-17 10:33     ` Borislav Petkov
2016-01-18 16:52       ` Brian Gerst
2016-01-18 17:49         ` Andy Lutomirski
2016-01-18 18:14         ` Borislav Petkov
2016-01-18 18:29           ` Andy Lutomirski
2016-01-18 18:39             ` Borislav Petkov
2016-01-18 19:45               ` H. Peter Anvin
2016-01-18 23:05                 ` Borislav Petkov
2016-01-18 23:13                   ` H. Peter Anvin
2016-01-18 23:25                     ` Borislav Petkov
2016-01-19 13:57                       ` Borislav Petkov
2016-01-19 16:23                         ` Borislav Petkov
2016-01-19 23:10                         ` Borislav Petkov
2016-01-19 23:26                           ` Andy Lutomirski
2016-01-19 23:49                             ` Boris Petkov
2016-01-20  4:03                         ` H. Peter Anvin
2016-01-20 10:33                           ` Borislav Petkov
2016-01-20 10:41                             ` H. Peter Anvin
2016-01-21 22:14                               ` Borislav Petkov
2016-01-21 22:22                                 ` H. Peter Anvin
2016-01-21 22:56                                   ` Borislav Petkov
2016-01-21 23:36                                     ` H. Peter Anvin
2016-01-21 23:37                                     ` H. Peter Anvin
2016-01-22 10:32                                       ` Borislav Petkov
2016-01-18 18:51           ` Borislav Petkov
2016-01-19  1:10             ` Borislav Petkov
2016-01-19  1:33               ` H. Peter Anvin
2016-01-19  9:22                 ` Borislav Petkov
2016-01-20  4:02                   ` H. Peter Anvin
2016-01-20  4:39                     ` Brian Gerst
2016-01-20  4:42                       ` H. Peter Anvin
2016-01-20 10:50                         ` Borislav Petkov
2016-01-20 10:55                           ` H. Peter Anvin
2016-01-20 11:05                             ` Borislav Petkov
2016-01-20 14:48                               ` H. Peter Anvin
2016-01-20 15:01                     ` Borislav Petkov
2016-01-20 15:09                       ` H. Peter Anvin
2016-01-20 16:04                         ` Borislav Petkov
2016-01-20 16:16                           ` H. Peter Anvin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).