All of lore.kernel.org
 help / color / mirror / Atom feed
* [for-next][PATCH 0/5] tracing: Final updates for this merge window
@ 2019-05-15  2:27 Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 1/5] tracing: Simplify "if" macro code Steven Rostedt
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton

  git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
for-next

Head SHA1: 693713cbdb3a4bda5a8a678c31f06560bbb14657


Jiri Kosina (1):
      livepatch: Remove klp_check_compiler_support()

Linus Torvalds (1):
      tracing: Simplify "if" macro code

Steven Rostedt (VMware) (3):
      ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
      ftrace/x86: Remove mcount support
      x86: Hide the int3_emulate_call/jmp functions from UML

----
 arch/powerpc/include/asm/livepatch.h |  5 ---
 arch/s390/include/asm/livepatch.h    |  5 ---
 arch/x86/Kconfig                     | 11 ++++++
 arch/x86/include/asm/ftrace.h        |  8 ++--
 arch/x86/include/asm/livepatch.h     |  8 ----
 arch/x86/include/asm/text-patching.h |  4 +-
 arch/x86/kernel/ftrace_32.S          | 75 +++---------------------------------
 arch/x86/kernel/ftrace_64.S          | 28 +-------------
 include/linux/compiler.h             | 35 +++++++++--------
 kernel/livepatch/core.c              |  8 ----
 10 files changed, 41 insertions(+), 146 deletions(-)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [for-next][PATCH 1/5] tracing: Simplify "if" macro code
  2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
@ 2019-05-15  2:27 ` Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 2/5] ftrace/x86_32: Remove support for non DYNAMIC_FTRACE Steven Rostedt
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Andrew Morton, Peter Zijlstra (Intel),
	Josh Poimboeuf, Linus Torvalds

From: Linus Torvalds <torvalds@linux-foundation.org>

Peter Zijlstra noticed that with CONFIG_PROFILE_ALL_BRANCHES, the "if"
macro converts the conditional to an array index.  This can cause GCC
to create horrible code.  When there are nested ifs, the generated code
uses register values to encode branching decisions.

Josh Poimboeuf found that replacing the define "if" macro from using
the condition as an array index and incrementing the branch statics
with an if statement itself, reduced the asm complexity and shrinks the
generated code quite a bit.

But this can be simplified even further by replacing the internal if
statement with a ternary operator.

Link: https://lkml.kernel.org/r/20190307174802.46fmpysxyo35hh43@treble
Link: http://lkml.kernel.org/r/CAHk-=wiALN3jRuzARpwThN62iKd476Xj-uom+YnLZ4=eqcz7xQ@mail.gmail.com

Reported-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reported-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 include/linux/compiler.h | 35 ++++++++++++++++++-----------------
 1 file changed, 18 insertions(+), 17 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 445348facea9..8aaf7cd026b0 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -53,23 +53,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
  * "Define 'is'", Bill Clinton
  * "Define 'if'", Steven Rostedt
  */
-#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
-#define __trace_if(cond) \
-	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
-	({								\
-		int ______r;						\
-		static struct ftrace_branch_data			\
-			__aligned(4)					\
-			__section("_ftrace_branch")			\
-			______f = {					\
-				.func = __func__,			\
-				.file = __FILE__,			\
-				.line = __LINE__,			\
-			};						\
-		______r = !!(cond);					\
-		______f.miss_hit[______r]++;					\
-		______r;						\
-	}))
+#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
+
+#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
+
+#define __trace_if_value(cond) ({			\
+	static struct ftrace_branch_data		\
+		__aligned(4)				\
+		__section("_ftrace_branch")		\
+		__if_trace = {				\
+			.func = __func__,		\
+			.file = __FILE__,		\
+			.line = __LINE__,		\
+		};					\
+	(cond) ?					\
+		(__if_trace.miss_hit[1]++,1) :		\
+		(__if_trace.miss_hit[0]++,0);		\
+})
+
 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
 
 #else
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [for-next][PATCH 2/5] ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
  2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 1/5] tracing: Simplify "if" macro code Steven Rostedt
@ 2019-05-15  2:27 ` Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 3/5] ftrace/x86: Remove mcount support Steven Rostedt
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

When DYNAMIC_FTRACE is enabled in the kernel, all the functions that can be
traced by the function tracer have a "nop" placeholder at the start of the
function. When function tracing is enabled, the nop is converted into a call
to the tracing infrastructure where the functions get traced. This also
allows for specifying specific functions to trace, and a lot of
infrastructure is built on top of this.

When DYNAMIC_FTRACE is not enabled, all the functions have a call to the
ftrace trampoline. A check is made to see if a function pointer is the
ftrace_stub or not, and if it is not, it calls the function pointer to trace
the code. This adds over 10% overhead to the kernel even when tracing is
disabled.

When an architecture supports DYNAMIC_FTRACE there really is no reason to
use the static tracing. I have kept non DYNAMIC_FTRACE available in x86 so
that the generic code for non DYNAMIC_FTRACE can be tested. There is no
reason to support non DYNAMIC_FTRACE for both x86_64 and x86_32. As the non
DYNAMIC_FTRACE for x86_32 does not even support fentry, and we want to
remove mcount completely, there's no reason to keep non DYNAMIC_FTRACE
around for x86_32.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 arch/x86/Kconfig            | 11 +++++++++++
 arch/x86/kernel/ftrace_32.S | 39 -------------------------------------
 2 files changed, 11 insertions(+), 39 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5ad92419be19..0544041ae3a2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,17 @@ config X86_64
 	select X86_DEV_DMA_OPS
 	select ARCH_HAS_SYSCALL_WRAPPER
 
+config FORCE_DYNAMIC_FTRACE
+	def_bool y
+	depends on X86_32
+	depends on FUNCTION_TRACER
+	select DYNAMIC_FTRACE
+	help
+	 We keep the static function tracing (!DYNAMIC_FTRACE) around
+	 in order to test the non static function tracing in the
+	 generic code, as other architectures still use it. But we
+	 only need to keep it around for x86_64. No need to keep it
+	 for x86_32. For x86_32, force DYNAMIC_FTRACE. 
 #
 # Arch settings
 #
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 4c8440de3355..459e6b4a19bc 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -18,8 +18,6 @@ EXPORT_SYMBOL(__fentry__)
 EXPORT_SYMBOL(mcount)
 #endif
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-
 /* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
 #if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
 # define USING_FRAME_POINTER
@@ -170,43 +168,6 @@ GLOBAL(ftrace_regs_call)
 	lea	3*4(%esp), %esp			/* Skip orig_ax, ip and cs */
 
 	jmp	.Lftrace_ret
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(function_hook)
-	cmpl	$__PAGE_OFFSET, %esp
-	jb	ftrace_stub			/* Paging not enabled yet? */
-
-	cmpl	$ftrace_stub, ftrace_trace_function
-	jnz	.Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	cmpl	$ftrace_stub, ftrace_graph_return
-	jnz	ftrace_graph_caller
-
-	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
-	jnz	ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
-	ret
-
-	/* taken from glibc */
-.Ltrace:
-	pushl	%eax
-	pushl	%ecx
-	pushl	%edx
-	movl	0xc(%esp), %eax
-	movl	0x4(%ebp), %edx
-	subl	$MCOUNT_INSN_SIZE, %eax
-
-	movl	ftrace_trace_function, %ecx
-	CALL_NOSPEC %ecx
-
-	popl	%edx
-	popl	%ecx
-	popl	%eax
-	jmp	ftrace_stub
-END(function_hook)
-#endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [for-next][PATCH 3/5] ftrace/x86: Remove mcount support
  2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 1/5] tracing: Simplify "if" macro code Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 2/5] ftrace/x86_32: Remove support for non DYNAMIC_FTRACE Steven Rostedt
@ 2019-05-15  2:27 ` Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 4/5] livepatch: Remove klp_check_compiler_support() Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 5/5] x86: Hide the int3_emulate_call/jmp functions from UML Steven Rostedt
  4 siblings, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Andrew Morton, Peter Zijlstra (Intel),
	Jiri Kosina, Josh Poimboeuf

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

There's two methods of enabling function tracing in Linux on x86. One is
with just "gcc -pg" and the other is "gcc -pg -mfentry". The former will use
calls to a special function "mcount" after the frame is set up in all C
functions. The latter will add calls to a special function called "fentry"
as the very first instruction of all C functions.

At compile time, there is a check to see if gcc supports, -mfentry, and if
it does, it will use that, because it is more versatile and less error prone
for function tracing.

Starting with v4.19, the minimum gcc supported to build the Linux kernel,
was raised to version 4.6. That also happens to be the first gcc version to
support -mfentry. Since on x86, using gcc versions from 4.6 and beyond will
unconditionally enable the -mfentry, it will no longer use mcount as the
method for inserting calls into the C functions of the kernel. This means
that there is no point in continuing to maintain mcount in x86.

Remove support for using mcount. This makes the code less complex, and will
also allow it to be simplified in the future.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 arch/x86/include/asm/ftrace.h    |  8 +++----
 arch/x86/include/asm/livepatch.h |  3 ---
 arch/x86/kernel/ftrace_32.S      | 36 +++++---------------------------
 arch/x86/kernel/ftrace_64.S      | 28 +------------------------
 4 files changed, 9 insertions(+), 66 deletions(-)

diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index cf350639e76d..287f1f7b2e52 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,12 +3,10 @@
 #define _ASM_X86_FTRACE_H
 
 #ifdef CONFIG_FUNCTION_TRACER
-#ifdef CC_USING_FENTRY
-# define MCOUNT_ADDR		((unsigned long)(__fentry__))
-#else
-# define MCOUNT_ADDR		((unsigned long)(mcount))
-# define HAVE_FUNCTION_GRAPH_FP_TEST
+#ifndef CC_USING_FENTRY
+# error Compiler does not support fentry?
 #endif
+# define MCOUNT_ADDR		((unsigned long)(__fentry__))
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 
 #ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index ed80003ce3e2..2f2bdf0662f8 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -26,9 +26,6 @@
 
 static inline int klp_check_compiler_support(void)
 {
-#ifndef CC_USING_FENTRY
-	return 1;
-#endif
 	return 0;
 }
 
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 459e6b4a19bc..2ba914a34b06 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -10,20 +10,10 @@
 #include <asm/ftrace.h>
 #include <asm/nospec-branch.h>
 
-#ifdef CC_USING_FENTRY
 # define function_hook	__fentry__
 EXPORT_SYMBOL(__fentry__)
-#else
-# define function_hook	mcount
-EXPORT_SYMBOL(mcount)
-#endif
-
-/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
-#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
-# define USING_FRAME_POINTER
-#endif
 
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
 # define MCOUNT_FRAME			1	/* using frame = true  */
 #else
 # define MCOUNT_FRAME			0	/* using frame = false */
@@ -35,8 +25,7 @@ END(function_hook)
 
 ENTRY(ftrace_caller)
 
-#ifdef USING_FRAME_POINTER
-# ifdef CC_USING_FENTRY
+#ifdef CONFIG_FRAME_POINTER
 	/*
 	 * Frame pointers are of ip followed by bp.
 	 * Since fentry is an immediate jump, we are left with
@@ -47,7 +36,7 @@ ENTRY(ftrace_caller)
 	pushl	%ebp
 	movl	%esp, %ebp
 	pushl	2*4(%esp)			/* function ip */
-# endif
+
 	/* For mcount, the function ip is directly above */
 	pushl	%ebp
 	movl	%esp, %ebp
@@ -57,7 +46,7 @@ ENTRY(ftrace_caller)
 	pushl	%edx
 	pushl	$0				/* Pass NULL as regs pointer */
 
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
 	/* Load parent ebp into edx */
 	movl	4*4(%esp), %edx
 #else
@@ -80,13 +69,11 @@ ftrace_call:
 	popl	%edx
 	popl	%ecx
 	popl	%eax
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
 	popl	%ebp
-# ifdef CC_USING_FENTRY
 	addl	$4,%esp				/* skip function ip */
 	popl	%ebp				/* this is the orig bp */
 	addl	$4, %esp			/* skip parent ip */
-# endif
 #endif
 .Lftrace_ret:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,11 +118,7 @@ ENTRY(ftrace_regs_caller)
 
 	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
 	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
-#ifdef CC_USING_FENTRY
 	movl	15*4(%esp), %edx		/* Load parent ip (2nd parameter) */
-#else
-	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
-#endif
 	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
 	pushl	%esp				/* Save pt_regs as 4th parameter */
 
@@ -176,13 +159,8 @@ ENTRY(ftrace_graph_caller)
 	pushl	%edx
 	movl	3*4(%esp), %eax
 	/* Even with frame pointers, fentry doesn't have one here */
-#ifdef CC_USING_FENTRY
 	lea	4*4(%esp), %edx
 	movl	$0, %ecx
-#else
-	lea	0x4(%ebp), %edx
-	movl	(%ebp), %ecx
-#endif
 	subl	$MCOUNT_INSN_SIZE, %eax
 	call	prepare_ftrace_return
 	popl	%edx
@@ -195,11 +173,7 @@ END(ftrace_graph_caller)
 return_to_handler:
 	pushl	%eax
 	pushl	%edx
-#ifdef CC_USING_FENTRY
 	movl	$0, %eax
-#else
-	movl	%ebp, %eax
-#endif
 	call	ftrace_return_to_handler
 	movl	%eax, %ecx
 	popl	%edx
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 75f2b36b41a6..10eb2760ef2c 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -13,22 +13,12 @@
 	.code64
 	.section .entry.text, "ax"
 
-#ifdef CC_USING_FENTRY
 # define function_hook	__fentry__
 EXPORT_SYMBOL(__fentry__)
-#else
-# define function_hook	mcount
-EXPORT_SYMBOL(mcount)
-#endif
 
 #ifdef CONFIG_FRAME_POINTER
-# ifdef CC_USING_FENTRY
 /* Save parent and function stack frames (rip and rbp) */
 #  define MCOUNT_FRAME_SIZE	(8+16*2)
-# else
-/* Save just function stack frame (rip and rbp) */
-#  define MCOUNT_FRAME_SIZE	(8+16)
-# endif
 #else
 /* No need to save a stack frame */
 # define MCOUNT_FRAME_SIZE	0
@@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount)
 	 * fentry is called before the stack frame is set up, where as mcount
 	 * is called afterward.
 	 */
-#ifdef CC_USING_FENTRY
+
 	/* Save the parent pointer (skip orig rbp and our return address) */
 	pushq \added+8*2(%rsp)
 	pushq %rbp
 	movq %rsp, %rbp
 	/* Save the return address (now skip orig rbp, rbp and parent) */
 	pushq \added+8*3(%rsp)
-#else
-	/* Can't assume that rip is before this (unless added was zero) */
-	pushq \added+8(%rsp)
-#endif
 	pushq %rbp
 	movq %rsp, %rbp
 #endif /* CONFIG_FRAME_POINTER */
@@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount)
 	movq %rdx, RBP(%rsp)
 
 	/* Copy the parent address into %rsi (second parameter) */
-#ifdef CC_USING_FENTRY
 	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
-#else
-	/* %rdx contains original %rbp */
-	movq 8(%rdx), %rsi
-#endif
 
 	 /* Move RIP to its proper location */
 	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
@@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller)
 	/* Saves rbp into %rdx and fills first parameter  */
 	save_mcount_regs
 
-#ifdef CC_USING_FENTRY
 	leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
 	movq $0, %rdx	/* No framepointers needed */
-#else
-	/* Save address of the return address of traced function */
-	leaq 8(%rdx), %rsi
-	/* ftrace does sanity checks against frame pointers */
-	movq (%rdx), %rdx
-#endif
 	call	prepare_ftrace_return
 
 	restore_mcount_regs
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [for-next][PATCH 4/5] livepatch: Remove klp_check_compiler_support()
  2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
                   ` (2 preceding siblings ...)
  2019-05-15  2:27 ` [for-next][PATCH 3/5] ftrace/x86: Remove mcount support Steven Rostedt
@ 2019-05-15  2:27 ` Steven Rostedt
  2019-05-15  2:27 ` [for-next][PATCH 5/5] x86: Hide the int3_emulate_call/jmp functions from UML Steven Rostedt
  4 siblings, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Linus Torvalds, Jiri Kosina

From: Jiri Kosina <jkosina@suse.cz>

The only purpose of klp_check_compiler_support() is to make sure that we
are not using ftrace on x86 via mcount (because that's executed only after
prologue has already happened, and that's too late for livepatching
purposes).

Now that mcount is not supported by ftrace any more, there is no need for
klp_check_compiler_support() either.

Link: http://lkml.kernel.org/r/nycvar.YFH.7.76.1905102346100.17054@cbobk.fhfr.pm

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 arch/powerpc/include/asm/livepatch.h | 5 -----
 arch/s390/include/asm/livepatch.h    | 5 -----
 arch/x86/include/asm/livepatch.h     | 5 -----
 kernel/livepatch/core.c              | 8 --------
 4 files changed, 23 deletions(-)

diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 5070df19d463..c005aee5ea43 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -24,11 +24,6 @@
 #include <linux/sched/task_stack.h>
 
 #ifdef CONFIG_LIVEPATCH
-static inline int klp_check_compiler_support(void)
-{
-	return 0;
-}
-
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
 	regs->nip = ip;
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 672f95b12d40..818612b784cd 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -13,11 +13,6 @@
 
 #include <asm/ptrace.h>
 
-static inline int klp_check_compiler_support(void)
-{
-	return 0;
-}
-
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
 	regs->psw.addr = ip;
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 2f2bdf0662f8..a66f6706c2de 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -24,11 +24,6 @@
 #include <asm/setup.h>
 #include <linux/ftrace.h>
 
-static inline int klp_check_compiler_support(void)
-{
-	return 0;
-}
-
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
 	regs->ip = ip;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index eb0ee10a1981..112a36ed4a09 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -1220,14 +1220,6 @@ void klp_module_going(struct module *mod)
 
 static int __init klp_init(void)
 {
-	int ret;
-
-	ret = klp_check_compiler_support();
-	if (ret) {
-		pr_info("Your compiler is too old; turning off.\n");
-		return -EINVAL;
-	}
-
 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
 	if (!klp_root_kobj)
 		return -ENOMEM;
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [for-next][PATCH 5/5] x86: Hide the int3_emulate_call/jmp functions from UML
  2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
                   ` (3 preceding siblings ...)
  2019-05-15  2:27 ` [for-next][PATCH 4/5] livepatch: Remove klp_check_compiler_support() Steven Rostedt
@ 2019-05-15  2:27 ` Steven Rostedt
  4 siblings, 0 replies; 6+ messages in thread
From: Steven Rostedt @ 2019-05-15  2:27 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, kbuild test robot

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

User Mode Linux does not have access to the ip or sp fields of the pt_regs,
and accessing them causes UML to fail to build. Hide the int3_emulate_jmp()
and int3_emulate_call() instructions from UML, as it doesn't need them
anyway.

Reported-by: kbuild test robot <lkp@intel.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 arch/x86/include/asm/text-patching.h | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 05861cc08787..0bbb07eaed6b 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
 extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
 extern int after_bootmem;
 
+#ifndef CONFIG_UML_X86
 static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
 {
 	regs->ip = ip;
@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
 	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
 	int3_emulate_jmp(regs, func);
 }
-#endif
+#endif /* CONFIG_X86_64 */
+#endif /* !CONFIG_UML_X86 */
 
 #endif /* _ASM_X86_TEXT_PATCHING_H */
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-05-15  2:28 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-15  2:27 [for-next][PATCH 0/5] tracing: Final updates for this merge window Steven Rostedt
2019-05-15  2:27 ` [for-next][PATCH 1/5] tracing: Simplify "if" macro code Steven Rostedt
2019-05-15  2:27 ` [for-next][PATCH 2/5] ftrace/x86_32: Remove support for non DYNAMIC_FTRACE Steven Rostedt
2019-05-15  2:27 ` [for-next][PATCH 3/5] ftrace/x86: Remove mcount support Steven Rostedt
2019-05-15  2:27 ` [for-next][PATCH 4/5] livepatch: Remove klp_check_compiler_support() Steven Rostedt
2019-05-15  2:27 ` [for-next][PATCH 5/5] x86: Hide the int3_emulate_call/jmp functions from UML Steven Rostedt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.