linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code
@ 2014-11-25 11:50 Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 1/9 v2] ftrace/x86: Have static tracing also use ftrace_caller_setup Steven Rostedt
                   ` (11 more replies)
  0 siblings, 12 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu


Linus,


I ran these through my test suite and they passed. Are these changes
fine for you? If so, I'll include them in my 3.19 queue.

I still think the "band-aid patch" should go into 3.18 now, as it does
fix a real bug, and is small and less likely to cause any regressions that
this patch series might do. Which also makes it a candidate for stable.

The rolled up patch is below as well, and you can get the full code
from my git tree.

Thanks,

-- Steve

  git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
rfc/mcount-update

Head SHA1: dcdf7dedfa5d4efd22306b0463469ef35fac8d27


Steven Rostedt (Red Hat) (9):
      ftrace/x86: Have static tracing also use ftrace_caller_setup
      ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
      ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
      ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
      ftrace/x86: Simplify save_mcount_regs on getting RIP
      ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
      ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
      ftrace/x86: Get rid of ftrace_caller_setup
      ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter

----
 arch/x86/include/asm/ftrace.h |  33 ------
 arch/x86/kernel/ftrace.c      |   4 +-
 arch/x86/kernel/mcount_64.S   | 233 ++++++++++++++++++++++++++----------------
 3 files changed, 149 insertions(+), 121 deletions(-)
---------------------------
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index e1f7fecaa7d6..f45acad3c4b6 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -1,39 +1,6 @@
 #ifndef _ASM_X86_FTRACE_H
 #define _ASM_X86_FTRACE_H
 
-#ifdef __ASSEMBLY__
-
-	/* skip is set if the stack was already partially adjusted */
-	.macro MCOUNT_SAVE_FRAME skip=0
-	 /*
-	  * We add enough stack to save all regs.
-	  */
-	subq $(SS+8-\skip), %rsp
-	movq %rax, RAX(%rsp)
-	movq %rcx, RCX(%rsp)
-	movq %rdx, RDX(%rsp)
-	movq %rsi, RSI(%rsp)
-	movq %rdi, RDI(%rsp)
-	movq %r8, R8(%rsp)
-	movq %r9, R9(%rsp)
-	 /* Move RIP to its proper location */
-	movq SS+8(%rsp), %rdx
-	movq %rdx, RIP(%rsp)
-	.endm
-
-	.macro MCOUNT_RESTORE_FRAME skip=0
-	movq R9(%rsp), %r9
-	movq R8(%rsp), %r8
-	movq RDI(%rsp), %rdi
-	movq RSI(%rsp), %rsi
-	movq RDX(%rsp), %rdx
-	movq RCX(%rsp), %rcx
-	movq RAX(%rsp), %rax
-	addq $(SS+8-\skip), %rsp
-	.endm
-
-#endif
-
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CC_USING_FENTRY
 # define MCOUNT_ADDR		((long)(__fentry__))
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 60881d919432..2142376dc8c6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -871,7 +871,7 @@ static void *addr_from_call(void *ptr)
 	return ptr + MCOUNT_INSN_SIZE + calc.offset;
 }
 
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer);
 
 /*
@@ -964,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void)
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer)
 {
 	unsigned long old;
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 6dc134b8dc70..94ea120fa21f 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,78 +21,151 @@
 # define function_hook	mcount
 #endif
 
-#ifdef CONFIG_DYNAMIC_FTRACE
+/* All cases save the original rbp (8 bytes) */
+#ifdef CONFIG_FRAME_POINTER
+# ifdef CC_USING_FENTRY
+/* Save parent and function stack frames (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE	(8+16*2)
+# else
+/* Save just function stack frame (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE	(8+16)
+# endif
+#else
+/* No need to save a stack frame */
+# define MCOUNT_FRAME_SIZE	8
+#endif /* CONFIG_FRAME_POINTER */
 
-ENTRY(function_hook)
-	retq
-END(function_hook)
+/* Size of stack used to save mcount regs in save_mcount_regs */
+#define MCOUNT_REG_SIZE		(SS+8 + MCOUNT_FRAME_SIZE)
 
-/* skip is set if stack has been adjusted */
-.macro ftrace_caller_setup trace_label skip=0
-	MCOUNT_SAVE_FRAME \skip
+/*
+ * gcc -pg option adds a call to 'mcount' in most functions.
+ * When -mfentry is used, the call is to 'fentry' and not 'mcount'
+ * and is done before the function's stack frame is set up.
+ * They both require a set of regs to be saved before calling
+ * any C code and restored before returning back to the function.
+ *
+ * On boot up, all these calls are converted into nops. When tracing
+ * is enabled, the call can jump to either ftrace_caller or
+ * ftrace_regs_caller. Callbacks (tracing functions) that require
+ * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
+ * it. For this reason, the size of the pt_regs structure will be
+ * allocated on the stack and the required mcount registers will
+ * be saved in the locations that pt_regs has them in.
+ */
 
-	/* Save this location */
-GLOBAL(\trace_label)
-	/* Load the ftrace_ops into the 3rd parameter */
-	movq function_trace_op(%rip), %rdx
+/*
+ * @added: the amount of stack added before calling this
+ *
+ * After this is called, the following registers contain:
+ *
+ *  %rdi - holds the address that called the trampoline
+ *  %rsi - holds the parent function (traced function's return address)
+ *  %rdx - holds the original %rbp
+ */
+.macro save_mcount_regs added=0
 
-	/* Load ip into the first parameter */
-	movq RIP(%rsp), %rdi
-	subq $MCOUNT_INSN_SIZE, %rdi
-	/* Load the parent_ip into the second parameter */
-#ifdef CC_USING_FENTRY
-	movq SS+16(%rsp), %rsi
-#else
-	movq 8(%rbp), %rsi
-#endif
-.endm
+	/* Always save the original rbp */
+	pushq %rbp
 
 #ifdef CONFIG_FRAME_POINTER
-/*
- * Stack traces will stop at the ftrace trampoline if the frame pointer
- * is not set up properly. If fentry is used, we need to save a frame
- * pointer for the parent as well as the function traced, because the
- * fentry is called before the stack frame is set up, where as mcount
- * is called afterward.
- */
-.macro create_frame parent rip
+	/*
+	 * Stack traces will stop at the ftrace trampoline if the frame pointer
+	 * is not set up properly. If fentry is used, we need to save a frame
+	 * pointer for the parent as well as the function traced, because the
+	 * fentry is called before the stack frame is set up, where as mcount
+	 * is called afterward.
+	 */
 #ifdef CC_USING_FENTRY
-	pushq \parent
+	/* Save the parent pointer (skip orig rbp and our return address) */
+	pushq \added+8*2(%rsp)
 	pushq %rbp
 	movq %rsp, %rbp
+	/* Save the return address (now skip orig rbp, rbp and parent) */
+	pushq \added+8*3(%rsp)
+#else
+	/* Can't assume that rip is before this (unless added was zero) */
+	pushq \added+8(%rsp)
 #endif
-	pushq \rip
 	pushq %rbp
 	movq %rsp, %rbp
-.endm
+#endif /* CONFIG_FRAME_POINTER */
 
-.macro restore_frame
+	/*
+	 * We add enough stack to save all regs.
+	 */
+	subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
+	movq %rax, RAX(%rsp)
+	movq %rcx, RCX(%rsp)
+	movq %rdx, RDX(%rsp)
+	movq %rsi, RSI(%rsp)
+	movq %rdi, RDI(%rsp)
+	movq %r8, R8(%rsp)
+	movq %r9, R9(%rsp)
+	/*
+	 * Save the original RBP. Even though the mcount ABI does not
+	 * require this, it helps out callers.
+	 */
+	movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+	movq %rdx, RBP(%rsp)
+
+	/* Copy the parent address into %rsi (second parameter) */
 #ifdef CC_USING_FENTRY
-	addq $16, %rsp
-#endif
-	popq %rbp
-	addq $8, %rsp
-.endm
+	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
 #else
-.macro create_frame parent rip
-.endm
-.macro restore_frame
-.endm
-#endif /* CONFIG_FRAME_POINTER */
+	/* %rdx contains original %rbp */
+	movq 8(%rdx), %rsi
+#endif
+
+	 /* Move RIP to its proper location */
+	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
+	movq %rdi, RIP(%rsp)
+
+	/*
+	 * Now %rdi (the first parameter) has the return address of
+	 * where ftrace_call returns. But the callbacks expect the
+	 * address of the call itself.
+	 */
+	subq $MCOUNT_INSN_SIZE, %rdi
+	.endm
+
+.macro restore_mcount_regs
+	movq R9(%rsp), %r9
+	movq R8(%rsp), %r8
+	movq RDI(%rsp), %rdi
+	movq RSI(%rsp), %rsi
+	movq RDX(%rsp), %rdx
+	movq RCX(%rsp), %rcx
+	movq RAX(%rsp), %rax
+
+	/* ftrace_regs_caller can modify %rbp */
+	movq RBP(%rsp), %rbp
+
+	addq $MCOUNT_REG_SIZE, %rsp
+
+	.endm
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(function_hook)
+	retq
+END(function_hook)
 
 ENTRY(ftrace_caller)
-	ftrace_caller_setup ftrace_caller_op_ptr
+	/* save_mcount_regs fills in first two parameters */
+	save_mcount_regs
+
+GLOBAL(ftrace_caller_op_ptr)
+	/* Load the ftrace_ops into the 3rd parameter */
+	movq function_trace_op(%rip), %rdx
+
 	/* regs go into 4th parameter (but make it NULL) */
 	movq $0, %rcx
 
-	create_frame %rsi, %rdi
-
 GLOBAL(ftrace_call)
 	call ftrace_stub
 
-	restore_frame
-
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	/*
 	 * The copied trampoline must call ftrace_return as it
@@ -112,11 +185,16 @@ GLOBAL(ftrace_stub)
 END(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
-	/* Save the current flags before compare (in SS location)*/
+	/* Save the current flags before any operations that can change them */
 	pushfq
 
-	/* skip=8 to skip flags saved in SS */
-	ftrace_caller_setup ftrace_regs_caller_op_ptr 8
+	/* added 8 bytes to save flags */
+	save_mcount_regs 8
+	/* save_mcount_regs fills in first two parameters */
+
+GLOBAL(ftrace_regs_caller_op_ptr)
+	/* Load the ftrace_ops into the 3rd parameter */
+	movq function_trace_op(%rip), %rdx
 
 	/* Save the rest of pt_regs */
 	movq %r15, R15(%rsp)
@@ -125,37 +203,32 @@ ENTRY(ftrace_regs_caller)
 	movq %r12, R12(%rsp)
 	movq %r11, R11(%rsp)
 	movq %r10, R10(%rsp)
-	movq %rbp, RBP(%rsp)
 	movq %rbx, RBX(%rsp)
 	/* Copy saved flags */
-	movq SS(%rsp), %rcx
+	movq MCOUNT_REG_SIZE(%rsp), %rcx
 	movq %rcx, EFLAGS(%rsp)
 	/* Kernel segments */
 	movq $__KERNEL_DS, %rcx
 	movq %rcx, SS(%rsp)
 	movq $__KERNEL_CS, %rcx
 	movq %rcx, CS(%rsp)
-	/* Stack - skipping return address */
-	leaq SS+16(%rsp), %rcx
+	/* Stack - skipping return address and flags */
+	leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
 	movq %rcx, RSP(%rsp)
 
 	/* regs go into 4th parameter */
 	leaq (%rsp), %rcx
 
-	create_frame %rsi, %rdi
-
 GLOBAL(ftrace_regs_call)
 	call ftrace_stub
 
-	restore_frame
-
 	/* Copy flags back to SS, to restore them */
 	movq EFLAGS(%rsp), %rax
-	movq %rax, SS(%rsp)
+	movq %rax, MCOUNT_REG_SIZE(%rsp)
 
 	/* Handlers can change the RIP */
 	movq RIP(%rsp), %rax
-	movq %rax, SS+8(%rsp)
+	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
 
 	/* restore the rest of pt_regs */
 	movq R15(%rsp), %r15
@@ -163,11 +236,9 @@ GLOBAL(ftrace_regs_call)
 	movq R13(%rsp), %r13
 	movq R12(%rsp), %r12
 	movq R10(%rsp), %r10
-	movq RBP(%rsp), %rbp
 	movq RBX(%rsp), %rbx
 
-	/* skip=8 to skip flags saved in SS */
-	MCOUNT_RESTORE_FRAME 8
+	restore_mcount_regs
 
 	/* Restore flags */
 	popfq
@@ -182,9 +253,6 @@ GLOBAL(ftrace_regs_caller_end)
 
 	jmp ftrace_return
 
-	popfq
-	jmp  ftrace_stub
-
 END(ftrace_regs_caller)
 
 
@@ -207,19 +275,12 @@ GLOBAL(ftrace_stub)
 	retq
 
 trace:
-	MCOUNT_SAVE_FRAME
-
-	movq RIP(%rsp), %rdi
-#ifdef CC_USING_FENTRY
-	movq SS+16(%rsp), %rsi
-#else
-	movq 8(%rbp), %rsi
-#endif
-	subq $MCOUNT_INSN_SIZE, %rdi
+	/* save_mcount_regs fills in first two parameters */
+	save_mcount_regs
 
 	call   *ftrace_trace_function
 
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	jmp fgraph_trace
 END(function_hook)
@@ -228,21 +289,21 @@ END(function_hook)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
-	MCOUNT_SAVE_FRAME
+	/* Saves rbp into %rdx and fills first parameter  */
+	save_mcount_regs
 
 #ifdef CC_USING_FENTRY
-	leaq SS+16(%rsp), %rdi
+	leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
 	movq $0, %rdx	/* No framepointers needed */
 #else
-	leaq 8(%rbp), %rdi
-	movq (%rbp), %rdx
+	/* Save address of the return address of traced function */
+	leaq 8(%rdx), %rsi
+	/* ftrace does sanity checks against frame pointers */
+	movq (%rdx), %rdx
 #endif
-	movq RIP(%rsp), %rsi
-	subq $MCOUNT_INSN_SIZE, %rsi
-
 	call	prepare_ftrace_return
 
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	retq
 END(ftrace_graph_caller)

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 1/9 v2] ftrace/x86: Have static tracing also use ftrace_caller_setup
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 2/9 v2] ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file Steven Rostedt
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0001-ftrace-x86-Have-static-tracing-also-use-ftrace_calle.patch --]
[-- Type: text/plain, Size: 2029 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

Linus pointed out that there were locations that did the hard coded
update of the parent and rip parameters. One of them was the static tracer
which could also use the ftrace_caller_setup to do that work. In fact,
because it did not use it, it is prone to bugs, and since the static
tracer is hardly ever used (who wants function tracing code always being
called?) it doesn't get tested very often. I only run a few "does it still
work" tests on it. But I do not run stress tests on that code. Although,
since it is never turned off, just having it on should be stressful enough.
(especially for the performance folks)

There's no reason that the static tracer can't also use ftrace_caller_setup.
Have it do so.

Link: http://lkml.kernel.org/r/CA+55aFwF+qCGSKdGaEgW4p6N65GZ5_XTV=1NbtWDvxnd5yYLiw@mail.gmail.com

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 6dc134b8dc70..24842c701660 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,12 +21,6 @@
 # define function_hook	mcount
 #endif
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(function_hook)
-	retq
-END(function_hook)
-
 /* skip is set if stack has been adjusted */
 .macro ftrace_caller_setup trace_label skip=0
 	MCOUNT_SAVE_FRAME \skip
@@ -47,6 +41,12 @@ GLOBAL(\trace_label)
 #endif
 .endm
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(function_hook)
+	retq
+END(function_hook)
+
 #ifdef CONFIG_FRAME_POINTER
 /*
  * Stack traces will stop at the ftrace trampoline if the frame pointer
@@ -207,15 +207,7 @@ GLOBAL(ftrace_stub)
 	retq
 
 trace:
-	MCOUNT_SAVE_FRAME
-
-	movq RIP(%rsp), %rdi
-#ifdef CC_USING_FENTRY
-	movq SS+16(%rsp), %rsi
-#else
-	movq 8(%rbp), %rsi
-#endif
-	subq $MCOUNT_INSN_SIZE, %rdi
+	ftrace_caller_setup ftrace_caller_op_ptr
 
 	call   *ftrace_trace_function
 
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 2/9 v2] ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 1/9 v2] ftrace/x86: Have static tracing also use ftrace_caller_setup Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 3/9 v2] ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments Steven Rostedt
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0002-ftrace-x86-Move-MCOUNT_SAVE_FRAME-out-of-header-file.patch --]
[-- Type: text/plain, Size: 2671 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

Linus pointed out that MCOUNT_SAVE_FRAME is used in only a single file
and that there's no reason that it should be in a header file.
Move the macro to the code that uses it.

Link: http://lkml.kernel.org/r/CA+55aFwF+qCGSKdGaEgW4p6N65GZ5_XTV=1NbtWDvxnd5yYLiw@mail.gmail.com

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/include/asm/ftrace.h | 33 ---------------------------------
 arch/x86/kernel/mcount_64.S   | 29 +++++++++++++++++++++++++++++
 2 files changed, 29 insertions(+), 33 deletions(-)

diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index e1f7fecaa7d6..f45acad3c4b6 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -1,39 +1,6 @@
 #ifndef _ASM_X86_FTRACE_H
 #define _ASM_X86_FTRACE_H
 
-#ifdef __ASSEMBLY__
-
-	/* skip is set if the stack was already partially adjusted */
-	.macro MCOUNT_SAVE_FRAME skip=0
-	 /*
-	  * We add enough stack to save all regs.
-	  */
-	subq $(SS+8-\skip), %rsp
-	movq %rax, RAX(%rsp)
-	movq %rcx, RCX(%rsp)
-	movq %rdx, RDX(%rsp)
-	movq %rsi, RSI(%rsp)
-	movq %rdi, RDI(%rsp)
-	movq %r8, R8(%rsp)
-	movq %r9, R9(%rsp)
-	 /* Move RIP to its proper location */
-	movq SS+8(%rsp), %rdx
-	movq %rdx, RIP(%rsp)
-	.endm
-
-	.macro MCOUNT_RESTORE_FRAME skip=0
-	movq R9(%rsp), %r9
-	movq R8(%rsp), %r8
-	movq RDI(%rsp), %rdi
-	movq RSI(%rsp), %rsi
-	movq RDX(%rsp), %rdx
-	movq RCX(%rsp), %rcx
-	movq RAX(%rsp), %rax
-	addq $(SS+8-\skip), %rsp
-	.endm
-
-#endif
-
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CC_USING_FENTRY
 # define MCOUNT_ADDR		((long)(__fentry__))
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 24842c701660..94fe46725fe0 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,6 +21,35 @@
 # define function_hook	mcount
 #endif
 
+/* skip is set if the stack was already partially adjusted */
+.macro MCOUNT_SAVE_FRAME skip=0
+	 /*
+	  * We add enough stack to save all regs.
+	  */
+	subq $(SS+8-\skip), %rsp
+	movq %rax, RAX(%rsp)
+	movq %rcx, RCX(%rsp)
+	movq %rdx, RDX(%rsp)
+	movq %rsi, RSI(%rsp)
+	movq %rdi, RDI(%rsp)
+	movq %r8, R8(%rsp)
+	movq %r9, R9(%rsp)
+	 /* Move RIP to its proper location */
+	movq SS+8(%rsp), %rdx
+	movq %rdx, RIP(%rsp)
+	.endm
+
+.macro MCOUNT_RESTORE_FRAME skip=0
+	movq R9(%rsp), %r9
+	movq R8(%rsp), %r8
+	movq RDI(%rsp), %rdi
+	movq RSI(%rsp), %rsi
+	movq RDX(%rsp), %rdx
+	movq RCX(%rsp), %rcx
+	movq RAX(%rsp), %rax
+	addq $(SS+8-\skip), %rsp
+	.endm
+
 /* skip is set if stack has been adjusted */
 .macro ftrace_caller_setup trace_label skip=0
 	MCOUNT_SAVE_FRAME \skip
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 3/9 v2] ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 1/9 v2] ftrace/x86: Have static tracing also use ftrace_caller_setup Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 2/9 v2] ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 4/9 v2] ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter Steven Rostedt
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0003-ftrace-x86-Rename-MCOUNT_SAVE_FRAME-and-add-more-det.patch --]
[-- Type: text/plain, Size: 3273 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

The name MCOUNT_SAVE_FRAME is rather confusing as it really isn't a
function frame that is saved, but just the required mcount registers
that are needed to be saved before C code may be called. The word
"frame" confuses it as being a function frame which it is not.

Rename MCOUNT_SAVE_FRAME and MCOUNT_RESTORE_FRAME to save_mcount_regs
and restore_mcount_regs respectively. Noticed the lower case, which
keeps it from screaming at the reviewers.

Link: http://lkml.kernel.org/r/CA+55aFwF+qCGSKdGaEgW4p6N65GZ5_XTV=1NbtWDvxnd5yYLiw@mail.gmail.com

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 32 ++++++++++++++++++++++++--------
 1 file changed, 24 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94fe46725fe0..0a693d011980 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,8 +21,24 @@
 # define function_hook	mcount
 #endif
 
+/*
+ * gcc -pg option adds a call to 'mcount' in most functions.
+ * When -mfentry is used, the call is to 'fentry' and not 'mcount'
+ * and is done before the function's stack frame is set up.
+ * They both require a set of regs to be saved before calling
+ * any C code and restored before returning back to the function.
+ *
+ * On boot up, all these calls are converted into nops. When tracing
+ * is enabled, the call can jump to either ftrace_caller or
+ * ftrace_regs_caller. Callbacks (tracing functions) that require
+ * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
+ * it. For this reason, the size of the pt_regs structure will be
+ * allocated on the stack and the required mcount registers will
+ * be saved in the locations that pt_regs has them in.
+ */
+
 /* skip is set if the stack was already partially adjusted */
-.macro MCOUNT_SAVE_FRAME skip=0
+.macro save_mcount_regs skip=0
 	 /*
 	  * We add enough stack to save all regs.
 	  */
@@ -39,7 +55,7 @@
 	movq %rdx, RIP(%rsp)
 	.endm
 
-.macro MCOUNT_RESTORE_FRAME skip=0
+.macro restore_mcount_regs skip=0
 	movq R9(%rsp), %r9
 	movq R8(%rsp), %r8
 	movq RDI(%rsp), %rdi
@@ -52,7 +68,7 @@
 
 /* skip is set if stack has been adjusted */
 .macro ftrace_caller_setup trace_label skip=0
-	MCOUNT_SAVE_FRAME \skip
+	save_mcount_regs \skip
 
 	/* Save this location */
 GLOBAL(\trace_label)
@@ -121,7 +137,7 @@ GLOBAL(ftrace_call)
 
 	restore_frame
 
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	/*
 	 * The copied trampoline must call ftrace_return as it
@@ -196,7 +212,7 @@ GLOBAL(ftrace_regs_call)
 	movq RBX(%rsp), %rbx
 
 	/* skip=8 to skip flags saved in SS */
-	MCOUNT_RESTORE_FRAME 8
+	restore_mcount_regs 8
 
 	/* Restore flags */
 	popfq
@@ -240,7 +256,7 @@ trace:
 
 	call   *ftrace_trace_function
 
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	jmp fgraph_trace
 END(function_hook)
@@ -249,7 +265,7 @@ END(function_hook)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
-	MCOUNT_SAVE_FRAME
+	save_mcount_regs
 
 #ifdef CC_USING_FENTRY
 	leaq SS+16(%rsp), %rdi
@@ -263,7 +279,7 @@ ENTRY(ftrace_graph_caller)
 
 	call	prepare_ftrace_return
 
-	MCOUNT_RESTORE_FRAME
+	restore_mcount_regs
 
 	retq
 END(ftrace_graph_caller)
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 4/9 v2] ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (2 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 3/9 v2] ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 5/9 v2] ftrace/x86: Simplify save_mcount_regs on getting RIP Steven Rostedt
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0004-ftrace-x86-Have-save_mcount_regs-store-RIP-in-rdi-fo.patch --]
[-- Type: text/plain, Size: 1412 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

Instead of having save_mcount_regs store the RIP in %rdx as a temp register
to place it in the proper location of the pt_regs on the stack. Use the
%rdi register as the temp register. This lets us remove the extra store
in the ftrace_caller_setup macro.

Link: http://lkml.kernel.org/r/CA+55aFwF+qCGSKdGaEgW4p6N65GZ5_XTV=1NbtWDvxnd5yYLiw@mail.gmail.com

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 0a693d011980..4f1b27642495 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -51,8 +51,8 @@
 	movq %r8, R8(%rsp)
 	movq %r9, R9(%rsp)
 	 /* Move RIP to its proper location */
-	movq SS+8(%rsp), %rdx
-	movq %rdx, RIP(%rsp)
+	movq SS+8(%rsp), %rdi
+	movq %rdi, RIP(%rsp)
 	.endm
 
 .macro restore_mcount_regs skip=0
@@ -75,8 +75,7 @@ GLOBAL(\trace_label)
 	/* Load the ftrace_ops into the 3rd parameter */
 	movq function_trace_op(%rip), %rdx
 
-	/* Load ip into the first parameter */
-	movq RIP(%rsp), %rdi
+	/* %rdi already has %rip from the save_mcount_regs macro */
 	subq $MCOUNT_INSN_SIZE, %rdi
 	/* Load the parent_ip into the second parameter */
 #ifdef CC_USING_FENTRY
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 5/9 v2] ftrace/x86: Simplify save_mcount_regs on getting RIP
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (3 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 4/9 v2] ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 6/9 v2] ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs Steven Rostedt
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0005-ftrace-x86-Simplify-save_mcount_regs-on-getting-RIP.patch --]
[-- Type: text/plain, Size: 3993 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

Currently save_mcount_regs is passed a "skip" parameter to know how much
stack updated the pt_regs, as it tries to keep the saved pt_regs in the
same location for all users. This is rather stupid, especially since the
part stored on the pt_regs has nothing to do with what is suppose to be
in that location.

Instead of doing that, just pass in an "added" parameter that lets that
macro know how much stack was added before it was called so that it
can get to the RIP.  But the difference is that it will now offset the
pt_regs by that "added" count. The caller now needs to take care of
the offset of the pt_regs.

This will make it easier to simplify the code later.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 37 ++++++++++++++++++-------------------
 1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 4f1b27642495..596ac330c1db 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -37,12 +37,12 @@
  * be saved in the locations that pt_regs has them in.
  */
 
-/* skip is set if the stack was already partially adjusted */
-.macro save_mcount_regs skip=0
+/* @added: the amount of stack added before calling this */
+.macro save_mcount_regs added=0
 	 /*
 	  * We add enough stack to save all regs.
 	  */
-	subq $(SS+8-\skip), %rsp
+	subq $(SS+8), %rsp
 	movq %rax, RAX(%rsp)
 	movq %rcx, RCX(%rsp)
 	movq %rdx, RDX(%rsp)
@@ -51,11 +51,11 @@
 	movq %r8, R8(%rsp)
 	movq %r9, R9(%rsp)
 	 /* Move RIP to its proper location */
-	movq SS+8(%rsp), %rdi
+	movq SS+8+\added(%rsp), %rdi
 	movq %rdi, RIP(%rsp)
 	.endm
 
-.macro restore_mcount_regs skip=0
+.macro restore_mcount_regs
 	movq R9(%rsp), %r9
 	movq R8(%rsp), %r8
 	movq RDI(%rsp), %rdi
@@ -63,12 +63,12 @@
 	movq RDX(%rsp), %rdx
 	movq RCX(%rsp), %rcx
 	movq RAX(%rsp), %rax
-	addq $(SS+8-\skip), %rsp
+	addq $(SS+8), %rsp
 	.endm
 
 /* skip is set if stack has been adjusted */
-.macro ftrace_caller_setup trace_label skip=0
-	save_mcount_regs \skip
+.macro ftrace_caller_setup trace_label added=0
+	save_mcount_regs \added
 
 	/* Save this location */
 GLOBAL(\trace_label)
@@ -79,9 +79,9 @@ GLOBAL(\trace_label)
 	subq $MCOUNT_INSN_SIZE, %rdi
 	/* Load the parent_ip into the second parameter */
 #ifdef CC_USING_FENTRY
-	movq SS+16(%rsp), %rsi
+	movq SS+16+\added(%rsp), %rsi
 #else
-	movq 8(%rbp), %rsi
+	movq 8+\added(%rbp), %rsi
 #endif
 .endm
 
@@ -156,10 +156,10 @@ GLOBAL(ftrace_stub)
 END(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
-	/* Save the current flags before compare (in SS location)*/
+	/* Save the current flags before any operations that can change them */
 	pushfq
 
-	/* skip=8 to skip flags saved in SS */
+	/* added 8 bytes to save flags */
 	ftrace_caller_setup ftrace_regs_caller_op_ptr 8
 
 	/* Save the rest of pt_regs */
@@ -172,15 +172,15 @@ ENTRY(ftrace_regs_caller)
 	movq %rbp, RBP(%rsp)
 	movq %rbx, RBX(%rsp)
 	/* Copy saved flags */
-	movq SS(%rsp), %rcx
+	movq SS+8(%rsp), %rcx
 	movq %rcx, EFLAGS(%rsp)
 	/* Kernel segments */
 	movq $__KERNEL_DS, %rcx
 	movq %rcx, SS(%rsp)
 	movq $__KERNEL_CS, %rcx
 	movq %rcx, CS(%rsp)
-	/* Stack - skipping return address */
-	leaq SS+16(%rsp), %rcx
+	/* Stack - skipping return address and flags */
+	leaq SS+8*3(%rsp), %rcx
 	movq %rcx, RSP(%rsp)
 
 	/* regs go into 4th parameter */
@@ -195,11 +195,11 @@ GLOBAL(ftrace_regs_call)
 
 	/* Copy flags back to SS, to restore them */
 	movq EFLAGS(%rsp), %rax
-	movq %rax, SS(%rsp)
+	movq %rax, SS+8(%rsp)
 
 	/* Handlers can change the RIP */
 	movq RIP(%rsp), %rax
-	movq %rax, SS+8(%rsp)
+	movq %rax, SS+8*2(%rsp)
 
 	/* restore the rest of pt_regs */
 	movq R15(%rsp), %r15
@@ -210,8 +210,7 @@ GLOBAL(ftrace_regs_call)
 	movq RBP(%rsp), %rbp
 	movq RBX(%rsp), %rbx
 
-	/* skip=8 to skip flags saved in SS */
-	restore_mcount_regs 8
+	restore_mcount_regs
 
 	/* Restore flags */
 	popfq
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 6/9 v2] ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (4 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 5/9 v2] ftrace/x86: Simplify save_mcount_regs on getting RIP Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 7/9 v2] ftrace/x86: Have save_mcount_regs macro also save stack frames if needed Steven Rostedt
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0006-ftrace-x86-Add-macro-MCOUNT_REG_SIZE-for-amount-of-s.patch --]
[-- Type: text/plain, Size: 3273 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

The macro save_mcount_regs saves regs onto the stack. But to uncouple the
amount of stack used in that macro from the users of the macro, we need
to have a define that tells all the users how much stack is used by that
macro. This way we can change the amount of stack the macro uses without
breaking its users.

Also remove some dead code that was left over from commit fdc841b58cf5
"ftrace: x86: Remove check of obsolete variable function_trace_stop".

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 596ac330c1db..a0f6f942183a 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,6 +21,9 @@
 # define function_hook	mcount
 #endif
 
+/* Size of stack used to save mcount regs in save_mcount_regs */
+#define MCOUNT_REG_SIZE		(SS+8)
+
 /*
  * gcc -pg option adds a call to 'mcount' in most functions.
  * When -mfentry is used, the call is to 'fentry' and not 'mcount'
@@ -42,7 +45,7 @@
 	 /*
 	  * We add enough stack to save all regs.
 	  */
-	subq $(SS+8), %rsp
+	subq $MCOUNT_REG_SIZE, %rsp
 	movq %rax, RAX(%rsp)
 	movq %rcx, RCX(%rsp)
 	movq %rdx, RDX(%rsp)
@@ -51,7 +54,7 @@
 	movq %r8, R8(%rsp)
 	movq %r9, R9(%rsp)
 	 /* Move RIP to its proper location */
-	movq SS+8+\added(%rsp), %rdi
+	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
 	movq %rdi, RIP(%rsp)
 	.endm
 
@@ -63,7 +66,7 @@
 	movq RDX(%rsp), %rdx
 	movq RCX(%rsp), %rcx
 	movq RAX(%rsp), %rax
-	addq $(SS+8), %rsp
+	addq $MCOUNT_REG_SIZE, %rsp
 	.endm
 
 /* skip is set if stack has been adjusted */
@@ -79,7 +82,7 @@ GLOBAL(\trace_label)
 	subq $MCOUNT_INSN_SIZE, %rdi
 	/* Load the parent_ip into the second parameter */
 #ifdef CC_USING_FENTRY
-	movq SS+16+\added(%rsp), %rsi
+	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
 #else
 	movq 8+\added(%rbp), %rsi
 #endif
@@ -172,7 +175,7 @@ ENTRY(ftrace_regs_caller)
 	movq %rbp, RBP(%rsp)
 	movq %rbx, RBX(%rsp)
 	/* Copy saved flags */
-	movq SS+8(%rsp), %rcx
+	movq MCOUNT_REG_SIZE(%rsp), %rcx
 	movq %rcx, EFLAGS(%rsp)
 	/* Kernel segments */
 	movq $__KERNEL_DS, %rcx
@@ -180,7 +183,7 @@ ENTRY(ftrace_regs_caller)
 	movq $__KERNEL_CS, %rcx
 	movq %rcx, CS(%rsp)
 	/* Stack - skipping return address and flags */
-	leaq SS+8*3(%rsp), %rcx
+	leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
 	movq %rcx, RSP(%rsp)
 
 	/* regs go into 4th parameter */
@@ -195,11 +198,11 @@ GLOBAL(ftrace_regs_call)
 
 	/* Copy flags back to SS, to restore them */
 	movq EFLAGS(%rsp), %rax
-	movq %rax, SS+8(%rsp)
+	movq %rax, MCOUNT_REG_SIZE(%rsp)
 
 	/* Handlers can change the RIP */
 	movq RIP(%rsp), %rax
-	movq %rax, SS+8*2(%rsp)
+	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
 
 	/* restore the rest of pt_regs */
 	movq R15(%rsp), %r15
@@ -225,9 +228,6 @@ GLOBAL(ftrace_regs_caller_end)
 
 	jmp ftrace_return
 
-	popfq
-	jmp  ftrace_stub
-
 END(ftrace_regs_caller)
 
 
@@ -266,7 +266,7 @@ ENTRY(ftrace_graph_caller)
 	save_mcount_regs
 
 #ifdef CC_USING_FENTRY
-	leaq SS+16(%rsp), %rdi
+	leaq MCOUNT_REG_SIZE+8(%rsp), %rdi
 	movq $0, %rdx	/* No framepointers needed */
 #else
 	leaq 8(%rbp), %rdi
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 7/9 v2] ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (5 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 6/9 v2] ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 8/9 v2] ftrace/x86: Get rid of ftrace_caller_setup Steven Rostedt
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0007-ftrace-x86-Have-save_mcount_regs-macro-also-save-sta.patch --]
[-- Type: text/plain, Size: 6328 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

The save_mcount_regs macro saves and restores the required mcount regs that
need to be saved before calling C code. It is done for all the function hook
utilities (static tracing, dynamic tracing, regs, function graph).

When frame pointers are enabled, the ftrace trampolines need to set up
frames and pointers such that a back trace (dump stack) can continue passed
them. Currently, a separate macro is used (create_frame) to do this, but
it's only done for the ftrace_caller and ftrace_reg_caller functions. It
is not done for the static tracer or function graph tracing.

Instead of having a separate macro doing the recording of the frames,
have the save_mcount_regs perform this task. This also has all tracers
saving the frame pointers when needed.

Link: http://lkml.kernel.org/r/CA+55aFwF+qCGSKdGaEgW4p6N65GZ5_XTV=1NbtWDvxnd5yYLiw@mail.gmail.com

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 118 +++++++++++++++++++++++++-------------------
 1 file changed, 67 insertions(+), 51 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index a0f6f942183a..003b22df1d87 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -21,8 +21,22 @@
 # define function_hook	mcount
 #endif
 
+/* All cases save the original rbp (8 bytes) */
+#ifdef CONFIG_FRAME_POINTER
+# ifdef CC_USING_FENTRY
+/* Save parent and function stack frames (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE	(8+16*2)
+# else
+/* Save just function stack frame (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE	(8+16)
+# endif
+#else
+/* No need to save a stack frame */
+# define MCOUNT_FRAME_SIZE	8
+#endif /* CONFIG_FRAME_POINTER */
+
 /* Size of stack used to save mcount regs in save_mcount_regs */
-#define MCOUNT_REG_SIZE		(SS+8)
+#define MCOUNT_REG_SIZE		(SS+8 + MCOUNT_FRAME_SIZE)
 
 /*
  * gcc -pg option adds a call to 'mcount' in most functions.
@@ -42,10 +56,37 @@
 
 /* @added: the amount of stack added before calling this */
 .macro save_mcount_regs added=0
-	 /*
-	  * We add enough stack to save all regs.
-	  */
-	subq $MCOUNT_REG_SIZE, %rsp
+
+	/* Always save the original rbp */
+	pushq %rbp
+
+#ifdef CONFIG_FRAME_POINTER
+	/*
+	 * Stack traces will stop at the ftrace trampoline if the frame pointer
+	 * is not set up properly. If fentry is used, we need to save a frame
+	 * pointer for the parent as well as the function traced, because the
+	 * fentry is called before the stack frame is set up, where as mcount
+	 * is called afterward.
+	 */
+#ifdef CC_USING_FENTRY
+	/* Save the parent pointer (skip orig rbp and our return address) */
+	pushq \added+8*2(%rsp)
+	pushq %rbp
+	movq %rsp, %rbp
+	/* Save the return address (now skip orig rbp, rbp and parent) */
+	pushq \added+8*3(%rsp)
+#else
+	/* Can't assume that rip is before this (unless added was zero) */
+	pushq \added+8(%rsp)
+#endif
+	pushq %rbp
+	movq %rsp, %rbp
+#endif /* CONFIG_FRAME_POINTER */
+
+	/*
+	 * We add enough stack to save all regs.
+	 */
+	subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
 	movq %rax, RAX(%rsp)
 	movq %rcx, RCX(%rsp)
 	movq %rdx, RDX(%rsp)
@@ -53,6 +94,13 @@
 	movq %rdi, RDI(%rsp)
 	movq %r8, R8(%rsp)
 	movq %r9, R9(%rsp)
+	/*
+	 * Save the original RBP. Even though the mcount ABI does not
+	 * require this, it helps out callers.
+	 */
+	movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+	movq %rdx, RBP(%rsp)
+
 	 /* Move RIP to its proper location */
 	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
 	movq %rdi, RIP(%rsp)
@@ -66,7 +114,12 @@
 	movq RDX(%rsp), %rdx
 	movq RCX(%rsp), %rcx
 	movq RAX(%rsp), %rax
+
+	/* ftrace_regs_caller can modify %rbp */
+	movq RBP(%rsp), %rbp
+
 	addq $MCOUNT_REG_SIZE, %rsp
+
 	.endm
 
 /* skip is set if stack has been adjusted */
@@ -84,7 +137,10 @@ GLOBAL(\trace_label)
 #ifdef CC_USING_FENTRY
 	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
 #else
-	movq 8+\added(%rbp), %rsi
+	/* Need to grab the original %rbp */
+	movq RBP(%rsp), %rsi
+	/* Now parent address is 8 above original %rbp */
+	movq 8(%rsi), %rsi
 #endif
 .endm
 
@@ -94,51 +150,14 @@ ENTRY(function_hook)
 	retq
 END(function_hook)
 
-#ifdef CONFIG_FRAME_POINTER
-/*
- * Stack traces will stop at the ftrace trampoline if the frame pointer
- * is not set up properly. If fentry is used, we need to save a frame
- * pointer for the parent as well as the function traced, because the
- * fentry is called before the stack frame is set up, where as mcount
- * is called afterward.
- */
-.macro create_frame parent rip
-#ifdef CC_USING_FENTRY
-	pushq \parent
-	pushq %rbp
-	movq %rsp, %rbp
-#endif
-	pushq \rip
-	pushq %rbp
-	movq %rsp, %rbp
-.endm
-
-.macro restore_frame
-#ifdef CC_USING_FENTRY
-	addq $16, %rsp
-#endif
-	popq %rbp
-	addq $8, %rsp
-.endm
-#else
-.macro create_frame parent rip
-.endm
-.macro restore_frame
-.endm
-#endif /* CONFIG_FRAME_POINTER */
-
 ENTRY(ftrace_caller)
 	ftrace_caller_setup ftrace_caller_op_ptr
 	/* regs go into 4th parameter (but make it NULL) */
 	movq $0, %rcx
 
-	create_frame %rsi, %rdi
-
 GLOBAL(ftrace_call)
 	call ftrace_stub
 
-	restore_frame
-
 	restore_mcount_regs
 
 	/*
@@ -172,7 +191,6 @@ ENTRY(ftrace_regs_caller)
 	movq %r12, R12(%rsp)
 	movq %r11, R11(%rsp)
 	movq %r10, R10(%rsp)
-	movq %rbp, RBP(%rsp)
 	movq %rbx, RBX(%rsp)
 	/* Copy saved flags */
 	movq MCOUNT_REG_SIZE(%rsp), %rcx
@@ -189,13 +207,9 @@ ENTRY(ftrace_regs_caller)
 	/* regs go into 4th parameter */
 	leaq (%rsp), %rcx
 
-	create_frame %rsi, %rdi
-
 GLOBAL(ftrace_regs_call)
 	call ftrace_stub
 
-	restore_frame
-
 	/* Copy flags back to SS, to restore them */
 	movq EFLAGS(%rsp), %rax
 	movq %rax, MCOUNT_REG_SIZE(%rsp)
@@ -210,7 +224,6 @@ GLOBAL(ftrace_regs_call)
 	movq R13(%rsp), %r13
 	movq R12(%rsp), %r12
 	movq R10(%rsp), %r10
-	movq RBP(%rsp), %rbp
 	movq RBX(%rsp), %rbx
 
 	restore_mcount_regs
@@ -269,8 +282,11 @@ ENTRY(ftrace_graph_caller)
 	leaq MCOUNT_REG_SIZE+8(%rsp), %rdi
 	movq $0, %rdx	/* No framepointers needed */
 #else
-	leaq 8(%rbp), %rdi
-	movq (%rbp), %rdx
+	/* Need to grab the original %rbp */
+	movq RBP(%rsp), %rdx
+	/* Now parent address is 8 above original %rbp */
+	leaq 8(%rdx), %rdi
+	movq (%rdx), %rdx
 #endif
 	movq RIP(%rsp), %rsi
 	subq $MCOUNT_INSN_SIZE, %rsi
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 8/9 v2] ftrace/x86: Get rid of ftrace_caller_setup
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (6 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 7/9 v2] ftrace/x86: Have save_mcount_regs macro also save stack frames if needed Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 11:50 ` [RFC][PATCH 9/9 v2] ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter Steven Rostedt
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0008-ftrace-x86-Get-rid-of-ftrace_caller_setup.patch --]
[-- Type: text/plain, Size: 4178 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

Move all the work from ftrace_caller_setup into save_mcount_regs. This
simplifies the code and makes it easier to understand.

Link: http://lkml.kernel.org/r/CA+55aFxUTUbdxpjVMW8X9c=o8sui7OB_MYPfcbJuDyfUWtNrNg@mail.gmail.com

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/mcount_64.S | 71 +++++++++++++++++++++++++++------------------
 1 file changed, 42 insertions(+), 29 deletions(-)

diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 003b22df1d87..ddc766efa1f1 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -54,7 +54,15 @@
  * be saved in the locations that pt_regs has them in.
  */
 
-/* @added: the amount of stack added before calling this */
+/*
+ * @added: the amount of stack added before calling this
+ *
+ * After this is called, the following registers contain:
+ *
+ *  %rdi - holds the address that called the trampoline
+ *  %rsi - holds the parent function (traced function's return address)
+ *  %rdx - holds the original %rbp
+ */
 .macro save_mcount_regs added=0
 
 	/* Always save the original rbp */
@@ -101,9 +109,24 @@
 	movq MCOUNT_REG_SIZE-8(%rsp), %rdx
 	movq %rdx, RBP(%rsp)
 
+	/* Copy the parent address into %rsi (second parameter) */
+#ifdef CC_USING_FENTRY
+	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
+#else
+	/* %rdx contains original %rbp */
+	movq 8(%rdx), %rsi
+#endif
+
 	 /* Move RIP to its proper location */
 	movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
 	movq %rdi, RIP(%rsp)
+
+	/*
+	 * Now %rdi (the first parameter) has the return address of
+	 * where ftrace_call returns. But the callbacks expect the
+	 * the address of the call itself.
+	 */
+	subq $MCOUNT_INSN_SIZE, %rdi
 	.endm
 
 .macro restore_mcount_regs
@@ -122,28 +145,6 @@
 
 	.endm
 
-/* skip is set if stack has been adjusted */
-.macro ftrace_caller_setup trace_label added=0
-	save_mcount_regs \added
-
-	/* Save this location */
-GLOBAL(\trace_label)
-	/* Load the ftrace_ops into the 3rd parameter */
-	movq function_trace_op(%rip), %rdx
-
-	/* %rdi already has %rip from the save_mcount_regs macro */
-	subq $MCOUNT_INSN_SIZE, %rdi
-	/* Load the parent_ip into the second parameter */
-#ifdef CC_USING_FENTRY
-	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
-#else
-	/* Need to grab the original %rbp */
-	movq RBP(%rsp), %rsi
-	/* Now parent address is 8 above original %rbp */
-	movq 8(%rsi), %rsi
-#endif
-.endm
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 ENTRY(function_hook)
@@ -151,7 +152,13 @@ ENTRY(function_hook)
 END(function_hook)
 
 ENTRY(ftrace_caller)
-	ftrace_caller_setup ftrace_caller_op_ptr
+	/* save_mcount_regs fills in first two parameters */
+	save_mcount_regs
+
+GLOBAL(ftrace_caller_op_ptr)
+	/* Load the ftrace_ops into the 3rd parameter */
+	movq function_trace_op(%rip), %rdx
+
 	/* regs go into 4th parameter (but make it NULL) */
 	movq $0, %rcx
 
@@ -182,7 +189,12 @@ ENTRY(ftrace_regs_caller)
 	pushfq
 
 	/* added 8 bytes to save flags */
-	ftrace_caller_setup ftrace_regs_caller_op_ptr 8
+	save_mcount_regs 8
+	/* save_mcount_regs fills in first two parameters */
+
+GLOBAL(ftrace_regs_caller_op_ptr)
+	/* Load the ftrace_ops into the 3rd parameter */
+	movq function_trace_op(%rip), %rdx
 
 	/* Save the rest of pt_regs */
 	movq %r15, R15(%rsp)
@@ -263,7 +275,8 @@ GLOBAL(ftrace_stub)
 	retq
 
 trace:
-	ftrace_caller_setup ftrace_caller_op_ptr
+	/* save_mcount_regs fills in first two parameters */
+	save_mcount_regs
 
 	call   *ftrace_trace_function
 
@@ -276,16 +289,16 @@ END(function_hook)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
+	/* Saves rbp into %rdx */
 	save_mcount_regs
 
 #ifdef CC_USING_FENTRY
 	leaq MCOUNT_REG_SIZE+8(%rsp), %rdi
 	movq $0, %rdx	/* No framepointers needed */
 #else
-	/* Need to grab the original %rbp */
-	movq RBP(%rsp), %rdx
-	/* Now parent address is 8 above original %rbp */
+	/* Save address of the return address of traced function */
 	leaq 8(%rdx), %rdi
+	/* ftrace does sanity checks against frame pointers */
 	movq (%rdx), %rdx
 #endif
 	movq RIP(%rsp), %rsi
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [RFC][PATCH 9/9 v2] ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (7 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 8/9 v2] ftrace/x86: Get rid of ftrace_caller_setup Steven Rostedt
@ 2014-11-25 11:50 ` Steven Rostedt
  2014-11-25 17:36 ` [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Linus Torvalds
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-25 11:50 UTC (permalink / raw)
  To: linux-kernel
  Cc: Linus Torvalds, Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Thomas Gleixner, Masami Hiramatsu

[-- Attachment #1: 0009-ftrace-fgraph-x86-Have-prepare_ftrace_return-take-ip.patch --]
[-- Type: text/plain, Size: 2534 bytes --]

From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

The function graph helper function prepare_ftrace_return() which does the work
to hijack the parent pointer has that parent pointer as its first parameter.
Instead, if we make it the second parameter and have ip as the first parameter
(self_addr), then it can use the %rdi from save_mcount_regs that loads it
already.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 arch/x86/kernel/ftrace.c    |  4 ++--
 arch/x86/kernel/mcount_64.S | 11 ++++-------
 2 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 60881d919432..2142376dc8c6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -871,7 +871,7 @@ static void *addr_from_call(void *ptr)
 	return ptr + MCOUNT_INSN_SIZE + calc.offset;
 }
 
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer);
 
 /*
@@ -964,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void)
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer)
 {
 	unsigned long old;
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index ddc766efa1f1..94ea120fa21f 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -124,7 +124,7 @@
 	/*
 	 * Now %rdi (the first parameter) has the return address of
 	 * where ftrace_call returns. But the callbacks expect the
-	 * the address of the call itself.
+	 * address of the call itself.
 	 */
 	subq $MCOUNT_INSN_SIZE, %rdi
 	.endm
@@ -289,21 +289,18 @@ END(function_hook)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
-	/* Saves rbp into %rdx */
+	/* Saves rbp into %rdx and fills first parameter  */
 	save_mcount_regs
 
 #ifdef CC_USING_FENTRY
-	leaq MCOUNT_REG_SIZE+8(%rsp), %rdi
+	leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
 	movq $0, %rdx	/* No framepointers needed */
 #else
 	/* Save address of the return address of traced function */
-	leaq 8(%rdx), %rdi
+	leaq 8(%rdx), %rsi
 	/* ftrace does sanity checks against frame pointers */
 	movq (%rdx), %rdx
 #endif
-	movq RIP(%rsp), %rsi
-	subq $MCOUNT_INSN_SIZE, %rsi
-
 	call	prepare_ftrace_return
 
 	restore_mcount_regs
-- 
2.1.1



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (8 preceding siblings ...)
  2014-11-25 11:50 ` [RFC][PATCH 9/9 v2] ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter Steven Rostedt
@ 2014-11-25 17:36 ` Linus Torvalds
  2014-11-26 22:03 ` Thomas Gleixner
  2014-11-26 22:05 ` Thomas Gleixner
  11 siblings, 0 replies; 14+ messages in thread
From: Linus Torvalds @ 2014-11-25 17:36 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Linux Kernel Mailing List, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Thomas Gleixner, Masami Hiramatsu

On Tue, Nov 25, 2014 at 3:50 AM, Steven Rostedt <rostedt@goodmis.org> wrote:
>
> I ran these through my test suite and they passed. Are these changes
> fine for you? If so, I'll include them in my 3.19 queue.

Yeah, the end result here looks much more understandable.

Or maybe it's just that I've now seen so many versions that it doesn't
look strange to me any more.

Either way, this looks ok. Maybe some other x86 asm person can look at
it, but if you get the right backtraces it can't be too broken.

                     Linus

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (9 preceding siblings ...)
  2014-11-25 17:36 ` [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Linus Torvalds
@ 2014-11-26 22:03 ` Thomas Gleixner
  2014-11-26 22:05 ` Thomas Gleixner
  11 siblings, 0 replies; 14+ messages in thread
From: Thomas Gleixner @ 2014-11-26 22:03 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, Linus Torvalds, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Masami Hiramatsu

On Tue, 25 Nov 2014, Steven Rostedt wrote:
> I ran these through my test suite and they passed. Are these changes
> fine for you? If so, I'll include them in my 3.19 queue.
> 
> I still think the "band-aid patch" should go into 3.18 now, as it does
> fix a real bug, and is small and less likely to cause any regressions that
> this patch series might do. Which also makes it a candidate for stable.

Bah. It confused the hell out of me to figure out what that band aid
patch would be in the series.

So you are referring to the patch which I acked already, and which
caused Linus to stick his nose into that code.

Right, we really should (ignoring the ugliness) push that back into
stable. Having debug infrastructure which provides half baken
information is a real pain. I wasted days in the past just to figure
out that the tracer was giving me incomplete information or refusing
to stop when I wanted it to. I rather prefer a tool to be 'slow' than
disfunctional.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code
  2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
                   ` (10 preceding siblings ...)
  2014-11-26 22:03 ` Thomas Gleixner
@ 2014-11-26 22:05 ` Thomas Gleixner
  2014-11-26 23:13   ` Steven Rostedt
  11 siblings, 1 reply; 14+ messages in thread
From: Thomas Gleixner @ 2014-11-26 22:05 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, Linus Torvalds, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Masami Hiramatsu

On Tue, 25 Nov 2014, Steven Rostedt wrote:
> Steven Rostedt (Red Hat) (9):
>       ftrace/x86: Have static tracing also use ftrace_caller_setup
>       ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
>       ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
>       ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
>       ftrace/x86: Simplify save_mcount_regs on getting RIP
>       ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
>       ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
>       ftrace/x86: Get rid of ftrace_caller_setup
>       ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter

I took the last remaining cells of my review damaged brain together to
go through this with a fine comb. So far I could not find something,
but I might have missed the obvious.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code
  2014-11-26 22:05 ` Thomas Gleixner
@ 2014-11-26 23:13   ` Steven Rostedt
  0 siblings, 0 replies; 14+ messages in thread
From: Steven Rostedt @ 2014-11-26 23:13 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: linux-kernel, Linus Torvalds, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Masami Hiramatsu

On Wed, 26 Nov 2014 23:05:48 +0100 (CET)
Thomas Gleixner <tglx@linutronix.de> wrote:

> On Tue, 25 Nov 2014, Steven Rostedt wrote:
> > Steven Rostedt (Red Hat) (9):
> >       ftrace/x86: Have static tracing also use ftrace_caller_setup
> >       ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
> >       ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
> >       ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
> >       ftrace/x86: Simplify save_mcount_regs on getting RIP
> >       ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
> >       ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
> >       ftrace/x86: Get rid of ftrace_caller_setup
> >       ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
> 
> I took the last remaining cells of my review damaged brain together to
> go through this with a fine comb. So far I could not find something,
> but I might have missed the obvious.
> 
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

Thanks!

-- Steve

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2014-11-26 23:13 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-11-25 11:50 [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 1/9 v2] ftrace/x86: Have static tracing also use ftrace_caller_setup Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 2/9 v2] ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 3/9 v2] ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 4/9 v2] ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 5/9 v2] ftrace/x86: Simplify save_mcount_regs on getting RIP Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 6/9 v2] ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 7/9 v2] ftrace/x86: Have save_mcount_regs macro also save stack frames if needed Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 8/9 v2] ftrace/x86: Get rid of ftrace_caller_setup Steven Rostedt
2014-11-25 11:50 ` [RFC][PATCH 9/9 v2] ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter Steven Rostedt
2014-11-25 17:36 ` [RFC][PATCH 0/9 v2] ftrace/x86: Clean up of mcount.S code Linus Torvalds
2014-11-26 22:03 ` Thomas Gleixner
2014-11-26 22:05 ` Thomas Gleixner
2014-11-26 23:13   ` Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).