* [PATCH v7 01/10] ppc64 (le): prepare for -mprofile-kernel
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
@ 2016-01-25 15:26 ` Torsten Duwe
2016-01-25 15:27 ` [PATCH v7 02/10] ppc64le FTRACE_WITH_REGS implementation Torsten Duwe
` (8 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:26 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
The gcc switch -mprofile-kernel, available for ppc64 on gcc > 4.8.5,
allows to call _mcount very early in the function, which low-level
ASM code and code patching functions need to consider.
Especially the link register and the parameter registers are still
alive and not yet saved into a new stack frame.
* arch/powerpc/kernel/entry_64.S:
- modify the default _mcount to be prepared for such call sites
- have the ftrace_graph_caller save function arguments before
calling its C helper prepare_ftrace_return
* arch/powerpc/include/asm/code-patching.h:
- define some common macros to make things readable.
- pull the R2 stack location definition from
arch/powerpc/kernel/module_64.c
* arch/powerpc/kernel/module_64.c:
- enhance binary code examination to handle the new patterns.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/include/asm/code-patching.h | 24 ++++++++++++++++
arch/powerpc/kernel/entry_64.S | 48 +++++++++++++++++++++++++++++++-
arch/powerpc/kernel/ftrace.c | 44 ++++++++++++++++++++++-------
arch/powerpc/kernel/module_64.c | 31 +++++++++++++++++++--
4 files changed, 133 insertions(+), 14 deletions(-)
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 840a550..7820b32 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -99,4 +99,28 @@ static inline unsigned long ppc_global_function_entry(void *func)
#endif
}
+#ifdef CONFIG_PPC64
+/* Some instruction encodings commonly used in dynamic ftracing
+ * and function live patching:
+ */
+
+/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define R2_STACK_OFFSET 24
+#else
+#define R2_STACK_OFFSET 40
+#endif
+
+/* load / store the TOC from / into the stack frame */
+#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
+ ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_STD_TOC (PPC_INST_STD | ___PPC_RS(__REG_R2) | \
+ ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+
+/* usually preceded by a mflr r0 */
+#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
+ ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+
+#endif /* CONFIG_PPC64 */
+
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0d525ce..2a7313c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1143,7 +1143,10 @@ _GLOBAL(enter_prom)
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
- blr
+ mflr r12
+ mtctr r12
+ mtlr r0
+ bctr
_GLOBAL_TOC(ftrace_caller)
/* Taken from output of objdump from lib64/glibc */
@@ -1198,6 +1201,7 @@ _GLOBAL(ftrace_stub)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef CC_USING_MPROFILE_KERNEL
_GLOBAL(ftrace_graph_caller)
/* load r4 with local address */
ld r4, 128(r1)
@@ -1222,6 +1226,48 @@ _GLOBAL(ftrace_graph_caller)
addi r1, r1, 112
blr
+#else /* CC_USING_MPROFILE_KERNEL */
+_GLOBAL(ftrace_graph_caller)
+ /* with -mprofile-kernel, parameter regs are still alive at _mcount */
+ std r10, 104(r1)
+ std r9, 96(r1)
+ std r8, 88(r1)
+ std r7, 80(r1)
+ std r6, 72(r1)
+ std r5, 64(r1)
+ std r4, 56(r1)
+ std r3, 48(r1)
+ mfctr r4 /* ftrace_caller has moved local addr here */
+ std r4, 40(r1)
+ mflr r3 /* ftrace_caller has restored LR from stack */
+ subi r4, r4, MCOUNT_INSN_SIZE
+
+ bl prepare_ftrace_return
+ nop
+
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR to this.
+ */
+ mtlr r3
+
+ ld r0, 40(r1)
+ mtctr r0
+ ld r10, 104(r1)
+ ld r9, 96(r1)
+ ld r8, 88(r1)
+ ld r7, 80(r1)
+ ld r6, 72(r1)
+ ld r5, 64(r1)
+ ld r4, 56(r1)
+ ld r3, 48(r1)
+
+ addi r1, r1, 112
+ mflr r0
+ std r0, LRSAVE(r1)
+ bctr
+#endif /* CC_USING_MPROFILE_KERNEL */
+
_GLOBAL(return_to_handler)
/* need to save return values */
std r4, -32(r1)
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 44d4d8e..861af90 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -287,16 +287,14 @@ int ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MODULES
#ifdef CONFIG_PPC64
+/* Examine the existing instructions for __ftrace_make_call.
+ * They should effectively be a NOP, and follow formal constraints,
+ * depending on the ABI. Return false if they don't.
+ */
+#ifndef CC_USING_MPROFILE_KERNEL
static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
{
- unsigned int op[2];
- void *ip = (void *)rec->ip;
-
- /* read where this goes */
- if (probe_kernel_read(op, ip, sizeof(op)))
- return -EFAULT;
-
/*
* We expect to see:
*
@@ -306,8 +304,34 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* The load offset is different depending on the ABI. For simplicity
* just mask it out when doing the compare.
*/
- if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
- pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
+ if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
+ return 0;
+ return 1;
+}
+#else
+static int
+expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+{
+ /* look for patched "NOP" on ppc64 with -mprofile-kernel */
+ if (op0 != PPC_INST_NOP)
+ return 0;
+ return 1;
+}
+#endif
+
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op[2];
+ void *ip = (void *)rec->ip;
+
+ /* read where this goes */
+ if (probe_kernel_read(op, ip, sizeof(op)))
+ return -EFAULT;
+
+ if (!expected_nop_sequence(ip, op[0], op[1])) {
+ pr_err("Unexpected call sequence at %p: %x %x\n",
+ ip, op[0], op[1]);
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 59663af..d75a25f 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -41,7 +41,6 @@
--RR. */
#if defined(_CALL_ELF) && _CALL_ELF == 2
-#define R2_STACK_OFFSET 24
/* An address is simply the address of the function. */
typedef unsigned long func_desc_t;
@@ -73,7 +72,6 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym)
return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
}
#else
-#define R2_STACK_OFFSET 40
/* An address is address of the OPD entry, which contains address of fn. */
typedef struct ppc64_opd_entry func_desc_t;
@@ -470,17 +468,44 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
return (unsigned long)&stubs[i];
}
+#ifdef CC_USING_MPROFILE_KERNEL
+static int is_early_mcount_callsite(u32 *instruction)
+{
+ /* -mprofile-kernel sequence starting with
+ * mflr r0 and maybe std r0, LRSAVE(r1).
+ */
+ if ((instruction[-3] == PPC_INST_MFLR &&
+ instruction[-2] == PPC_INST_STD_LR) ||
+ instruction[-2] == PPC_INST_MFLR) {
+ /* Nothing to be done here, it's an _mcount
+ * call location and r2 will have to be
+ * restored in the _mcount function.
+ */
+ return 1;
+ }
+ return 0;
+}
+#else
+/* without -mprofile-kernel, mcount calls are never early */
+static int is_early_mcount_callsite(u32 *instruction)
+{
+ return 0;
+}
+#endif
+
/* We expect a noop next: if it is, replace it with instruction to
restore r2. */
static int restore_r2(u32 *instruction, struct module *me)
{
if (*instruction != PPC_INST_NOP) {
+ if (is_early_mcount_callsite(instruction))
+ return 1;
pr_err("%s: Expect noop after relocate, got %08x\n",
me->name, *instruction);
return 0;
}
/* ld r2,R2_STACK_OFFSET(r1) */
- *instruction = 0xe8410000 | R2_STACK_OFFSET;
+ *instruction = PPC_INST_LD_TOC;
return 1;
}
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 02/10] ppc64le FTRACE_WITH_REGS implementation
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
2016-01-25 15:26 ` [PATCH v7 01/10] ppc64 (le): prepare for -mprofile-kernel Torsten Duwe
@ 2016-01-25 15:27 ` Torsten Duwe
2016-01-25 15:29 ` [PATCH v7 03/10] ppc use ftrace_modify_all_code default Torsten Duwe
` (7 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:27 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Implement FTRACE_WITH_REGS for powerpc64, on ELF ABI v2.
Initial work started by Vojtech Pavlik, used with permission.
* arch/powerpc/kernel/entry_64.S:
- Implement an effective ftrace_caller that works from
within the kernel binary as well as from modules.
* arch/powerpc/kernel/ftrace.c:
- be prepared to deal with ppc64 ELF ABI v2, especially
calls to _mcount that result from gcc -mprofile-kernel
- a little more error verbosity
* arch/powerpc/kernel/module_64.c:
- do not save the TOC pointer on the trampoline when the
destination is ftrace_caller. This trampoline jump happens from
a function prologue before a new stack frame is set up, so bad
things may happen otherwise...
- relax is_module_trampoline() to recognise the modified
trampoline.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/include/asm/ftrace.h | 5 +++
arch/powerpc/kernel/entry_64.S | 78 +++++++++++++++++++++++++++++++++++++++
arch/powerpc/kernel/ftrace.c | 64 +++++++++++++++++++++++++++++---
arch/powerpc/kernel/module_64.c | 25 ++++++++++++-
4 files changed, 165 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index ef89b14..50ca758 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -46,6 +46,8 @@
extern void _mcount(void);
#ifdef CONFIG_DYNAMIC_FTRACE
+# define FTRACE_ADDR ((unsigned long)ftrace_caller)
+# define FTRACE_REGS_ADDR FTRACE_ADDR
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* reloction of mcount call site is the same as the address */
@@ -58,6 +60,9 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
#endif
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2a7313c..c063564 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1148,6 +1148,7 @@ _GLOBAL(_mcount)
mtlr r0
bctr
+#ifndef CC_USING_MPROFILE_KERNEL
_GLOBAL_TOC(ftrace_caller)
/* Taken from output of objdump from lib64/glibc */
mflr r3
@@ -1169,6 +1170,83 @@ _GLOBAL(ftrace_graph_stub)
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
+#else
+_GLOBAL(ftrace_caller)
+ std r0,LRSAVE(r1)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+ mflr r0
+ bl 2f
+2: mflr r12
+ mtlr r0
+ mr r0,r2 /* save callee's TOC */
+ addis r2,r12,(.TOC.-ftrace_caller-12)@ha
+ addi r2,r2,(.TOC.-ftrace_caller-12)@l
+#else
+ mr r0,r2
+#endif
+ ld r12,LRSAVE(r1) /* get caller's address */
+
+ stdu r1,-SWITCH_FRAME_SIZE(r1)
+
+ std r12, _LINK(r1)
+ SAVE_8GPRS(0,r1)
+ std r0, 24(r1) /* save TOC */
+ SAVE_8GPRS(8,r1)
+ SAVE_8GPRS(16,r1)
+ SAVE_8GPRS(24,r1)
+
+ addis r3,r2,function_trace_op@toc@ha
+ addi r3,r3,function_trace_op@toc@l
+ ld r5,0(r3)
+
+ mflr r3
+ std r3, _NIP(r1)
+ std r3, 16(r1)
+ subi r3, r3, MCOUNT_INSN_SIZE
+ mfmsr r4
+ std r4, _MSR(r1)
+ mfctr r4
+ std r4, _CTR(r1)
+ mfxer r4
+ std r4, _XER(r1)
+ mr r4, r12
+ addi r6, r1 ,STACK_FRAME_OVERHEAD
+
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+
+ ld r3, _NIP(r1)
+ mtlr r3
+
+ REST_8GPRS(0,r1)
+ REST_8GPRS(8,r1)
+ REST_8GPRS(16,r1)
+ REST_8GPRS(24,r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+
+ ld r12, LRSAVE(r1) /* get caller's address */
+ mtlr r12
+ mr r2,r0 /* restore callee's TOC */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ stdu r1, -112(r1)
+.globl ftrace_graph_call
+ftrace_graph_call:
+ b ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+ addi r1, r1, 112
+#endif
+
+ mflr r0 /* move this LR to CTR */
+ mtctr r0
+
+ ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
+ mtlr r0
+ bctr /* jump after _mcount site */
+#endif /* CC_USING_MPROFILE_KERNEL */
_GLOBAL(ftrace_stub)
blr
#else
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 861af90..1fad1b3 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -61,8 +61,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
return -EFAULT;
/* Make sure it is what we expect it to be */
- if (replaced != old)
+ if (replaced != old) {
+ pr_err("%p: replaced (%#x) != old (%#x)",
+ (void *)ip, replaced, old);
return -EINVAL;
+ }
/* replace the text with the new text */
if (patch_instruction((unsigned int *)ip, new))
@@ -106,14 +109,16 @@ static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
+ unsigned int op, op0, op1, pop;
unsigned long entry, ptr;
unsigned long ip = rec->ip;
void *tramp;
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
+ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ pr_err("Fetching opcode failed.\n");
return -EFAULT;
+ }
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
@@ -158,10 +163,49 @@ __ftrace_make_nop(struct module *mod,
*
* Use a b +8 to jump over the load.
*/
- op = 0x48000008; /* b +8 */
- if (patch_instruction((unsigned int *)ip, op))
+ pop = PPC_INST_BRANCH | 8; /* b +8 */
+
+ /*
+ * Check what is in the next instruction. We can see ld r2,40(r1), but
+ * on first pass after boot we will see mflr r0.
+ */
+ if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
+ pr_err("Fetching op failed.\n");
+ return -EFAULT;
+ }
+
+ if (op != PPC_INST_LD_TOC)
+ {
+ if (probe_kernel_read(&op0, (void *)(ip-8), MCOUNT_INSN_SIZE)) {
+ pr_err("Fetching op0 failed.\n");
+ return -EFAULT;
+ }
+
+ if (probe_kernel_read(&op1, (void *)(ip-4), MCOUNT_INSN_SIZE)) {
+ pr_err("Fetching op1 failed.\n");
+ return -EFAULT;
+ }
+
+ /* mflr r0 ; [ std r0,LRSAVE(r1) ]? */
+ if ( (op0 != PPC_INST_MFLR ||
+ op1 != PPC_INST_STD_LR)
+ && op1 != PPC_INST_MFLR )
+ {
+ pr_err("Unexpected instructions around bl _mcount\n"
+ "when enabling dynamic ftrace!\t"
+ "(%08x,%08x,bl,%08x)\n", op0, op1, op);
+ return -EINVAL;
+ }
+
+ /* When using -mkernel_profile there is no load to jump over */
+ pop = PPC_INST_NOP;
+ }
+
+ if (patch_instruction((unsigned int *)ip, pop)) {
+ pr_err("Patching NOP failed.\n");
return -EPERM;
+ }
return 0;
}
@@ -287,6 +331,14 @@ int ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MODULES
#ifdef CONFIG_PPC64
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return ftrace_make_call(rec, addr);
+}
+#endif
+
/* Examine the existing instructions for __ftrace_make_call.
* They should effectively be a NOP, and follow formal constraints,
* depending on the ABI. Return false if they don't.
@@ -354,7 +406,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return 0;
}
-#else
+#else /* !CONFIG_PPC64: */
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index d75a25f..5bd8399 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -136,12 +136,25 @@ static u32 ppc64_stub_insns[] = {
0x4e800420 /* bctr */
};
+#ifdef CC_USING_MPROFILE_KERNEL
+/* In case of _mcount calls or dynamic ftracing, Do not save the
+ * current callee's TOC (in R2) again into the original caller's stack
+ * frame during this trampoline hop. The stack frame already holds
+ * that of the original caller. _mcount and ftrace_caller will take
+ * care of this TOC value themselves.
+ */
+#define SQUASH_TOC_SAVE_INSN(trampoline_addr) \
+ (((struct ppc64_stub_entry *)(trampoline_addr))->jump[2] = PPC_INST_NOP)
+#else
+#define SQUASH_TOC_SAVE_INSN(trampoline_addr)
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
static u32 ppc64_stub_mask[] = {
0xffff0000,
0xffff0000,
- 0xffffffff,
+ 0x00000000,
0xffffffff,
#if !defined(_CALL_ELF) || _CALL_ELF != 2
0xffffffff,
@@ -168,6 +181,9 @@ bool is_module_trampoline(u32 *p)
if ((insna & mask) != (insnb & mask))
return false;
}
+ if (insns[2] != ppc64_stub_insns[2] &&
+ insns[2] != PPC_INST_NOP)
+ return false;
return true;
}
@@ -630,6 +646,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return -ENOENT;
if (!restore_r2((u32 *)location + 1, me))
return -ENOEXEC;
+ /* Squash the TOC saver for profiler calls */
+ if (!strcmp("_mcount", strtab+sym->st_name))
+ SQUASH_TOC_SAVE_INSN(value);
} else
value += local_entry_offset(sym);
@@ -717,6 +736,10 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
me->arch.tramp = stub_for_addr(sechdrs,
(unsigned long)ftrace_caller,
me);
+ /* ftrace_caller will take care of the TOC;
+ * do not clobber original caller's value.
+ */
+ SQUASH_TOC_SAVE_INSN(me->arch.tramp);
#endif
return 0;
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 03/10] ppc use ftrace_modify_all_code default
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
2016-01-25 15:26 ` [PATCH v7 01/10] ppc64 (le): prepare for -mprofile-kernel Torsten Duwe
2016-01-25 15:27 ` [PATCH v7 02/10] ppc64le FTRACE_WITH_REGS implementation Torsten Duwe
@ 2016-01-25 15:29 ` Torsten Duwe
2016-01-25 15:29 ` [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables Torsten Duwe
` (6 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:29 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Convert ppc's arch_ftrace_update_code from its own function copy
to use the generic default functionality (without stop_machine --
our instructions are properly aligned and the replacements atomic ;)
With this we gain error checking and the much-needed function_trace_op
handling.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/kernel/ftrace.c | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1fad1b3..ef8b916 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -531,20 +531,12 @@ void ftrace_replace_code(int enable)
}
}
+/* Use the default ftrace_modify_all_code, but without
+ * stop_machine().
+ */
void arch_ftrace_update_code(int command)
{
- if (command & FTRACE_UPDATE_CALLS)
- ftrace_replace_code(1);
- else if (command & FTRACE_DISABLE_CALLS)
- ftrace_replace_code(0);
-
- if (command & FTRACE_UPDATE_TRACE_FUNC)
- ftrace_update_ftrace_func(ftrace_trace_function);
-
- if (command & FTRACE_START_FUNC_RET)
- ftrace_enable_ftrace_graph_caller();
- else if (command & FTRACE_STOP_FUNC_RET)
- ftrace_disable_ftrace_graph_caller();
+ ftrace_modify_all_code(command);
}
int __init ftrace_dyn_arch_init(void)
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (2 preceding siblings ...)
2016-01-25 15:29 ` [PATCH v7 03/10] ppc use ftrace_modify_all_code default Torsten Duwe
@ 2016-01-25 15:29 ` Torsten Duwe
2016-02-05 14:05 ` Petr Mladek
2016-01-25 15:30 ` [PATCH v7 05/10] ppc64 ftrace_with_regs: spare early boot and low level Torsten Duwe
` (5 subsequent siblings)
9 siblings, 1 reply; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:29 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
* Makefile:
- globally use -mprofile-kernel in case it's configured,
available and bug-free.
* arch/powerpc/gcc-mprofile-kernel-notrace.sh:
- make sure -mprofile-kernel works and has none of the
known bugs.
* arch/powerpc/kernel/ftrace.c:
- error out on compile with HAVE_DYNAMIC_FTRACE_WITH_REGS
and a buggy compiler.
* arch/powerpc/Kconfig / kernel/trace/Kconfig:
- declare that ppc64le HAVE_MPROFILE_KERNEL and
HAVE_DYNAMIC_FTRACE_WITH_REGS, and use it.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/Kconfig | 2 ++
arch/powerpc/Makefile | 17 +++++++++++++++
arch/powerpc/gcc-mprofile-kernel-notrace.sh | 33 +++++++++++++++++++++++++++++
arch/powerpc/kernel/ftrace.c | 5 +++++
kernel/trace/Kconfig | 5 +++++
5 files changed, 62 insertions(+)
create mode 100755 arch/powerpc/gcc-mprofile-kernel-notrace.sh
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd..e5f288c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -94,8 +94,10 @@ config PPC
select OF_RESERVED_MEM
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if PPC64 && CPU_LITTLE_ENDIAN
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_MPROFILE_KERNEL if PPC64 && CPU_LITTLE_ENDIAN
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
select VIRT_TO_BUS if !PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 96efd82..08a3952 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -133,6 +133,23 @@ else
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
+ifeq ($(CONFIG_PPC64),y)
+ifdef CONFIG_HAVE_MPROFILE_KERNEL
+
+ifdef CONFIG_DYNAMIC_FTRACE
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/arch/powerpc/gcc-mprofile-kernel-notrace.sh $(CC) -I$(srctree)/include -D__KERNEL__), y)
+CC_USING_MPROFILE_KERNEL := -mprofile-kernel
+endif
+endif
+
+ifdef CC_USING_MPROFILE_KERNEL
+CC_FLAGS_FTRACE := -pg $(CC_USING_MPROFILE_KERNEL)
+KBUILD_CPPFLAGS += -DCC_USING_MPROFILE_KERNEL
+endif
+
+endif
+endif
+
CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
diff --git a/arch/powerpc/gcc-mprofile-kernel-notrace.sh b/arch/powerpc/gcc-mprofile-kernel-notrace.sh
new file mode 100755
index 0000000..68d6482
--- /dev/null
+++ b/arch/powerpc/gcc-mprofile-kernel-notrace.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# Test whether the compile option -mprofile-kernel
+# generates profiling code ( = a call to mcount), and
+# whether a function without any global references sets
+# the TOC pointer properly at the beginning, and
+# whether the "notrace" function attribute successfully
+# suppresses the _mcount call.
+
+echo "int func() { return 0; }" | \
+ $* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \
+ grep -q "mcount"
+
+trace_result=$?
+
+echo "int func() { return 0; }" | \
+ $* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \
+ sed -n -e '/func:/,/bl _mcount/p' | grep -q TOC
+
+leaf_toc_result=$?
+
+/bin/echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \
+ $* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \
+ grep -q "mcount"
+
+notrace_result=$?
+
+if [ "$trace_result" -eq "0" -a \
+ "$leaf_toc_result" -eq "0" -a \
+ "$notrace_result" -eq "1" ]; then
+ echo y
+else
+ echo n
+fi
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index ef8b916..29b7014 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -28,6 +28,11 @@
#ifdef CONFIG_DYNAMIC_FTRACE
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && defined(CONFIG_PPC64) && \
+ !defined(CC_USING_MPROFILE_KERNEL)
+#error "DYNAMIC_FTRACE_WITH_REGS requires working -mprofile-kernel"
+#endif
+
static unsigned int
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e45db6b..a138f6d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -52,6 +52,11 @@ config HAVE_FENTRY
help
Arch supports the gcc options -pg with -mfentry
+config HAVE_MPROFILE_KERNEL
+ bool
+ help
+ Arch supports the gcc options -pg with -mprofile-kernel
+
config HAVE_C_RECORDMCOUNT
bool
help
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 05/10] ppc64 ftrace_with_regs: spare early boot and low level
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (3 preceding siblings ...)
2016-01-25 15:29 ` [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables Torsten Duwe
@ 2016-01-25 15:30 ` Torsten Duwe
2016-01-25 15:31 ` [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions Torsten Duwe
` (4 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:30 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Using -mprofile-kernel on early boot code not only confuses the
checker but is also useless, as the infrastructure is not yet in
place. Proceed like with -pg (remove it from CFLAGS), equally with
time.o and ftrace itself.
* arch/powerpc/kernel/Makefile:
- remove -mprofile-kernel from low level and boot code objects'
CFLAGS for FUNCTION_TRACER configurations.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/kernel/Makefile | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 794f22a..44667fd 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -16,14 +16,14 @@ endif
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
-CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
-CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
-CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
-CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
+CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog -mprofile-kernel
+CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog -mprofile-kernel
+CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog -mprofile-kernel
+CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog -mprofile-kernel
# do not trace tracer code
-CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
+CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog -mprofile-kernel
# timers used by tracing
-CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
+CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog -mprofile-kernel
endif
obj-y := cputable.o ptrace.o syscalls.o \
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (4 preceding siblings ...)
2016-01-25 15:30 ` [PATCH v7 05/10] ppc64 ftrace_with_regs: spare early boot and low level Torsten Duwe
@ 2016-01-25 15:31 ` Torsten Duwe
2016-02-10 1:50 ` Michael Ellerman
2016-01-25 15:31 ` [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files Torsten Duwe
` (3 subsequent siblings)
9 siblings, 1 reply; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:31 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
At least POWER7/8 have MMUs that don't completely autoload;
a normal, recoverable memory fault might pass through these functions.
If a dynamic tracer function causes such a fault, any of these functions
being traced with -mprofile-kernel may cause an endless recursion.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/kernel/process.c | 2 +-
arch/powerpc/mm/fault.c | 2 +-
arch/powerpc/mm/hash_utils_64.c | 18 +++++++++---------
arch/powerpc/mm/hugetlbpage-hash64.c | 2 +-
arch/powerpc/mm/hugetlbpage.c | 4 ++--
arch/powerpc/mm/mem.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 2 +-
arch/powerpc/mm/slb.c | 6 +++---
arch/powerpc/mm/slice.c | 8 ++++----
9 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e..5e72e8b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -822,7 +822,7 @@ static inline void __switch_to_tm(struct task_struct *prev)
* don't know which of the checkpointed state and the transactional
* state to use.
*/
-void restore_tm_state(struct pt_regs *regs)
+notrace void restore_tm_state(struct pt_regs *regs)
{
unsigned long msr_diff;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a67c6d7..125be37 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -205,7 +205,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+notrace int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ba59d59..01d3dee 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -825,7 +825,7 @@ void early_init_mmu_secondary(void)
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
-unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
+notrace unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
{
struct page *page;
@@ -846,7 +846,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
}
#ifdef CONFIG_PPC_MM_SLICES
-static unsigned int get_paca_psize(unsigned long addr)
+static notrace unsigned int get_paca_psize(unsigned long addr)
{
u64 lpsizes;
unsigned char *hpsizes;
@@ -875,7 +875,7 @@ unsigned int get_paca_psize(unsigned long addr)
* For now this makes the whole process use 4k pages.
*/
#ifdef CONFIG_PPC_64K_PAGES
-void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+notrace void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
@@ -897,7 +897,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
* Result is 0: full permissions, _PAGE_RW: read-only,
* _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
*/
-static int subpage_protection(struct mm_struct *mm, unsigned long ea)
+static notrace int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
struct subpage_prot_table *spt = &mm->context.spt;
u32 spp = 0;
@@ -945,7 +945,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
trap, vsid, ssize, psize, lpsize, pte);
}
-static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
+static notrace void check_paca_psize(unsigned long ea, struct mm_struct *mm,
int psize, bool user_region)
{
if (user_region) {
@@ -967,7 +967,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
* -1 - critical hash insertion error
* -2 - access not permitted by subpage protection mechanism
*/
-int hash_page_mm(struct mm_struct *mm, unsigned long ea,
+notrace int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap,
unsigned long flags)
{
@@ -1165,7 +1165,7 @@ bail:
}
EXPORT_SYMBOL_GPL(hash_page_mm);
-int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+notrace int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
unsigned long dsisr)
{
unsigned long flags = 0;
@@ -1296,7 +1296,7 @@ out_exit:
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
-void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
+notrace void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
unsigned long flags)
{
unsigned long hash, index, shift, hidx, slot;
@@ -1444,7 +1444,7 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
exception_exit(prev_state);
}
-long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+notrace long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index e2138c7..17fc139 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,7 +18,7 @@ extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rlags,
unsigned long vflags, int psize, int ssize);
-int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+notrace int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize)
{
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 744e24b..70dda66 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -870,7 +870,7 @@ static int __init hugetlbpage_init(void)
#endif
arch_initcall(hugetlbpage_init);
-void flush_dcache_icache_hugepage(struct page *page)
+notrace void flush_dcache_icache_hugepage(struct page *page)
{
int i;
void *start;
@@ -903,7 +903,7 @@ void flush_dcache_icache_hugepage(struct page *page)
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+notrace pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *shift)
{
pgd_t pgd, *pgdp;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 22d94c3..f690e8a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -406,7 +406,7 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
-void flush_dcache_icache_page(struct page *page)
+notrace void flush_dcache_icache_page(struct page *page)
{
#ifdef CONFIG_HUGETLB_PAGE
if (PageCompound(page)) {
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20..bb9041b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -442,7 +442,7 @@ static void page_table_free_rcu(void *table)
}
}
-void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+notrace void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
unsigned long pgf = (unsigned long)table;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 825b687..852bd54 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -96,7 +96,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}
-static void __slb_flush_and_rebolt(void)
+static notrace void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* and PR KVM appropriately too. */
@@ -136,7 +136,7 @@ static void __slb_flush_and_rebolt(void)
: "memory");
}
-void slb_flush_and_rebolt(void)
+notrace void slb_flush_and_rebolt(void)
{
WARN_ON(!irqs_disabled());
@@ -151,7 +151,7 @@ void slb_flush_and_rebolt(void)
get_paca()->slb_cache_ptr = 0;
}
-void slb_vmalloc_update(void)
+notrace void slb_vmalloc_update(void)
{
unsigned long vflags;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 42954f0..5fb0e5b 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -76,8 +76,8 @@ static void slice_print_mask(const char *label, struct slice_mask mask) {}
#endif
-static struct slice_mask slice_range_to_mask(unsigned long start,
- unsigned long len)
+static notrace struct slice_mask slice_range_to_mask(unsigned long start,
+ unsigned long len)
{
unsigned long end = start + len - 1;
struct slice_mask ret = { 0, 0 };
@@ -563,7 +563,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
current->mm->context.user_psize, 1);
}
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+notrace unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *hpsizes;
int index, mask_index;
@@ -644,7 +644,7 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
spin_unlock_irqrestore(&slice_convert_lock, flags);
}
-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+notrace void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
struct slice_mask mask = slice_range_to_mask(start, len);
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (5 preceding siblings ...)
2016-01-25 15:31 ` [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions Torsten Duwe
@ 2016-01-25 15:31 ` Torsten Duwe
2016-02-10 0:33 ` Michael Ellerman
2016-01-25 15:33 ` [PATCH v7 08/10] Implement kernel live patching for ppc64le (ABIv2) Torsten Duwe
` (2 subsequent siblings)
9 siblings, 1 reply; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:31 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
This patch complements the "notrace" attribute for selected functions.
It adds -mprofile-kernel to the cc flags to be stripped from the command
line for code-patching.o and feature-fixups.o, in addition to "-pg"
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/lib/Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index a47e142..98e22b2 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -6,8 +6,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-CFLAGS_REMOVE_code-patching.o = -pg
-CFLAGS_REMOVE_feature-fixups.o = -pg
+CFLAGS_REMOVE_code-patching.o = -pg -mprofile-kernel
+CFLAGS_REMOVE_feature-fixups.o = -pg -mprofile-kernel
obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
feature-fixups.o
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 08/10] Implement kernel live patching for ppc64le (ABIv2)
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (6 preceding siblings ...)
2016-01-25 15:31 ` [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files Torsten Duwe
@ 2016-01-25 15:33 ` Torsten Duwe
2016-01-25 15:33 ` [PATCH v7 09/10] Enable LIVEPATCH to be configured on ppc64le and add livepatch.o if it is selected Torsten Duwe
2016-01-28 15:32 ` [PATCH v7 10/10] livepatch: Detect offset for the ftrace location during build Petr Mladek
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:33 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
* create the appropriate files+functions
arch/powerpc/include/asm/livepatch.h
klp_check_compiler_support,
klp_arch_set_pc
arch/powerpc/kernel/livepatch.c with a stub for
klp_write_module_reloc
This is architecture-independent work in progress.
* introduce a fixup in arch/powerpc/kernel/entry_64.S
for local calls that are becoming global due to live patching.
And of course do the main KLP thing: return to a maybe different
address, possibly altered by the live patching ftrace op.
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/include/asm/livepatch.h | 45 +++++++++++++++++++++++++++++++
arch/powerpc/kernel/entry_64.S | 51 +++++++++++++++++++++++++++++++++---
arch/powerpc/kernel/livepatch.c | 38 +++++++++++++++++++++++++++
3 files changed, 130 insertions(+), 4 deletions(-)
create mode 100644 arch/powerpc/include/asm/livepatch.h
create mode 100644 arch/powerpc/kernel/livepatch.c
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
new file mode 100644
index 0000000..44e8a2d
--- /dev/null
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -0,0 +1,45 @@
+/*
+ * livepatch.h - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ASM_POWERPC64_LIVEPATCH_H
+#define _ASM_POWERPC64_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+#if !defined(_CALL_ELF) || _CALL_ELF != 2 || !defined(CC_USING_MPROFILE_KERNEL)
+ return 1;
+#endif
+ return 0;
+}
+
+extern int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value);
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif /* _ASM_POWERPC64_LIVEPATCH_H */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c063564..52c7a15 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1202,6 +1202,9 @@ _GLOBAL(ftrace_caller)
mflr r3
std r3, _NIP(r1)
std r3, 16(r1)
+#ifdef CONFIG_LIVEPATCH
+ mr r14,r3 /* remember old NIP */
+#endif
subi r3, r3, MCOUNT_INSN_SIZE
mfmsr r4
std r4, _MSR(r1)
@@ -1218,7 +1221,10 @@ ftrace_call:
nop
ld r3, _NIP(r1)
- mtlr r3
+ mtctr r3 /* prepare to jump there */
+#ifdef CONFIG_LIVEPATCH
+ cmpd r14,r3 /* has NIP been altered? */
+#endif
REST_8GPRS(0,r1)
REST_8GPRS(8,r1)
@@ -1231,6 +1237,27 @@ ftrace_call:
mtlr r12
mr r2,r0 /* restore callee's TOC */
+#ifdef CONFIG_LIVEPATCH
+ beq+ 4f /* likely(old_NIP == new_NIP) */
+
+ /* For a local call, restore this TOC after calling the patch function.
+ * For a global call, it does not matter what we restore here,
+ * since the global caller does its own restore right afterwards,
+ * anyway. Just insert a KLP_return_helper frame in any case,
+ * so a patch function can always count on the changed stack offsets.
+ */
+ stdu r1,-32(r1) /* open new mini stack frame */
+ std r0,24(r1) /* save TOC now, unconditionally. */
+ bl 5f
+5: mflr r12
+ addi r12,r12,(KLP_return_helper+4-.)@l
+ std r12,LRSAVE(r1)
+ mtlr r12
+ mfctr r12 /* allow for TOC calculation in newfunc */
+ bctr
+4:
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
stdu r1, -112(r1)
.globl ftrace_graph_call
@@ -1240,15 +1267,31 @@ _GLOBAL(ftrace_graph_stub)
addi r1, r1, 112
#endif
- mflr r0 /* move this LR to CTR */
- mtctr r0
-
ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
mtlr r0
bctr /* jump after _mcount site */
#endif /* CC_USING_MPROFILE_KERNEL */
_GLOBAL(ftrace_stub)
blr
+
+#ifdef CONFIG_LIVEPATCH
+/* Helper function for local calls that are becoming global
+ due to live patching.
+ We can't simply patch the NOP after the original call,
+ because, depending on the consistency model, some kernel
+ threads may still have called the original, local function
+ *without* saving their TOC in the respective stack frame slot,
+ so the decision is made per-thread during function return by
+ maybe inserting a KLP_return_helper frame or not.
+*/
+KLP_return_helper:
+ ld r2,24(r1) /* restore TOC (saved by ftrace_caller) */
+ addi r1, r1, 32 /* destroy mini stack frame */
+ ld r0,LRSAVE(r1) /* get the real return address */
+ mtlr r0
+ blr
+#endif
+
#else
_GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */
diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c
new file mode 100644
index 0000000..564eafa
--- /dev/null
+++ b/arch/powerpc/kernel/livepatch.c
@@ -0,0 +1,38 @@
+/*
+ * livepatch.c - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <asm/livepatch.h>
+
+/**
+ * klp_write_module_reloc() - write a relocation in a module
+ * @mod: module in which the section to be modified is found
+ * @type: ELF relocation type (see asm/elf.h)
+ * @loc: address that the relocation should be written to
+ * @value: relocation value (sym address + addend)
+ *
+ * This function writes a relocation to the specified location for
+ * a particular module.
+ */
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value)
+{
+ /* This requires infrastructure changes; we need the loadinfos. */
+ pr_err("klp_write_module_reloc not yet supported\n");
+ return -ENOSYS;
+}
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 09/10] Enable LIVEPATCH to be configured on ppc64le and add livepatch.o if it is selected.
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (7 preceding siblings ...)
2016-01-25 15:33 ` [PATCH v7 08/10] Implement kernel live patching for ppc64le (ABIv2) Torsten Duwe
@ 2016-01-25 15:33 ` Torsten Duwe
2016-01-28 15:32 ` [PATCH v7 10/10] livepatch: Detect offset for the ftrace location during build Petr Mladek
9 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-01-25 15:33 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/Kconfig | 3 +++
arch/powerpc/kernel/Makefile | 1 +
2 files changed, 4 insertions(+)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e5f288c..8c7a327 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -160,6 +160,7 @@ config PPC
select ARCH_HAS_DEVMEM_IS_ALLOWED
select HAVE_ARCH_SECCOMP_FILTER
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select HAVE_LIVEPATCH if PPC64 && CPU_LITTLE_ENDIAN
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -1093,3 +1094,5 @@ config PPC_LIB_RHEAP
bool
source "arch/powerpc/kvm/Kconfig"
+
+source "kernel/livepatch/Kconfig"
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 44667fd..405efce 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_TRACING) += trace_clock.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
obj-y += iomap.o
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 10/10] livepatch: Detect offset for the ftrace location during build
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
` (8 preceding siblings ...)
2016-01-25 15:33 ` [PATCH v7 09/10] Enable LIVEPATCH to be configured on ppc64le and add livepatch.o if it is selected Torsten Duwe
@ 2016-01-28 15:32 ` Petr Mladek
9 siblings, 0 replies; 26+ messages in thread
From: Petr Mladek @ 2016-01-28 15:32 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Livepatch works on x86_64 and s390 only when the ftrace call
is at the very beginning of the function. But PPC is different.
We need to handle TOC and save LR there before calling the
global ftrace handler.
Now, the problem is that the extra operations have different
length on PPC depending on the used gcc version. It is
4 instructions (16 bytes) before gcc-6 and only 3 instructions
(12 bytes) with gcc-6.
This patch tries to detect the offset a generic way during
build. It assumes that the offset of the ftrace location
is the same for all functions. It modifies the existing
recordmcount tool that is able to find read mcount locations
directly from the object files. It adds an option -p
to print the first found offset.
The recordmcount tool is then used in the kernel/livepatch
subdirectory to generate a header file. It defines
a constant that is used to compute the ftrace location
from the function address.
Finally, we have to enable the C implementation of the
recordmcount tool to be used on PPC and S390. It seems
to work fine there. It should be more reliable because
it reads the standardized elf structures. The old perl
implementation uses rather complex regular expressions
to parse objdump output and is therefore much more tricky.
Signed-off-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Torsten Duwe <duwe@suse.de>
---
arch/powerpc/Kconfig | 1 +
arch/s390/Kconfig | 1 +
kernel/livepatch/Makefile | 13 +++++++++++++
kernel/livepatch/core.c | 12 +++++++++---
kernel/livepatch/ftrace-test.c | 6 ++++++
scripts/recordmcount.c | 6 +++++-
scripts/recordmcount.h | 17 +++++++++++++++--
7 files changed, 50 insertions(+), 6 deletions(-)
create mode 100644 kernel/livepatch/ftrace-test.c
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8c7a327..a546829 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -93,6 +93,7 @@ config PPC
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if PPC64 && CPU_LITTLE_ENDIAN
select HAVE_FUNCTION_TRACER
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3be9c83..c574bc4 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -121,6 +121,7 @@ config S390
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
+ select HAVE_C_RECORDMCOUNT
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index e8780c0..65a44b6 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,3 +1,16 @@
obj-$(CONFIG_LIVEPATCH) += livepatch.o
livepatch-objs := core.o
+
+always := $(hostprogs-y) ftrace-test.o
+
+# dependencies on generated files need to be listed explicitly
+$(obj)/core.o: $(obj)/livepatch-ftrace.h
+
+quiet_cmd_livepatch-rmcount = RMCOUNT $@
+ cmd_livepatch-rmcount = $(objtree)/scripts/recordmcount -p $< > $@
+
+$(obj)/livepatch-ftrace.h: $(obj)/ftrace-test.o $(objtree)/scripts/recordmcount
+ $(call if_changed,livepatch-rmcount)
+
+targets += livepatch-ftrace.h
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index bc2c85c..864d589 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -30,6 +30,8 @@
#include <linux/livepatch.h>
#include <asm/cacheflush.h>
+#include "livepatch-ftrace.h"
+
/**
* struct klp_ops - structure for tracking registered ftrace ops structs
*
@@ -312,8 +314,10 @@ static void klp_disable_func(struct klp_func *func)
return;
if (list_is_singular(&ops->func_stack)) {
+ unsigned long ftrace_loc = func->old_addr + KLP_FTRACE_LOCATION;
+
WARN_ON(unregister_ftrace_function(&ops->fops));
- WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
+ WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
list_del_rcu(&func->stack_node);
list_del(&ops->node);
@@ -338,6 +342,8 @@ static int klp_enable_func(struct klp_func *func)
ops = klp_find_ops(func->old_addr);
if (!ops) {
+ unsigned long ftrace_loc = func->old_addr + KLP_FTRACE_LOCATION;
+
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
@@ -352,7 +358,7 @@ static int klp_enable_func(struct klp_func *func)
INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack);
- ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+ ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret);
@@ -363,7 +369,7 @@ static int klp_enable_func(struct klp_func *func)
if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret);
- ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
goto err;
}
diff --git a/kernel/livepatch/ftrace-test.c b/kernel/livepatch/ftrace-test.c
new file mode 100644
index 0000000..22f0c54
--- /dev/null
+++ b/kernel/livepatch/ftrace-test.c
@@ -0,0 +1,6 @@
+/* Sample code to figure out mcount location offset */
+
+int test(int a)
+{
+ return ++a;
+}
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index e167592..e351b2f 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -53,6 +53,7 @@ static struct stat sb; /* Remember .st_size, etc. */
static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
static const char *altmcount; /* alternate mcount symbol name */
static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */
+static int print_mcount_offset; /* print offset of the first mcount location */
static void *file_map; /* pointer of the mapped file */
static void *file_end; /* pointer to the end of the mapped file */
static int file_updated; /* flag to state file was changed */
@@ -539,11 +540,14 @@ main(int argc, char *argv[])
int c;
int i;
- while ((c = getopt(argc, argv, "w")) >= 0) {
+ while ((c = getopt(argc, argv, "wp")) >= 0) {
switch (c) {
case 'w':
warn_on_notrace_sect = 1;
break;
+ case 'p':
+ print_mcount_offset = 1;
+ break;
default:
fprintf(stderr, "usage: recordmcount [-w] file.o...\n");
return 0;
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index b9897e2..a677a5a 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -47,6 +47,7 @@
#undef fn_ELF_R_SYM
#undef fn_ELF_R_INFO
#undef uint_t
+#undef uint_t_format
#undef _w
#undef _align
#undef _size
@@ -81,6 +82,7 @@
# define fn_ELF_R_SYM fn_ELF64_R_SYM
# define fn_ELF_R_INFO fn_ELF64_R_INFO
# define uint_t uint64_t
+# define uint_t_format "%lu"
# define _w w8
# define _align 7u
# define _size 8
@@ -114,6 +116,7 @@
# define fn_ELF_R_SYM fn_ELF32_R_SYM
# define fn_ELF_R_INFO fn_ELF32_R_INFO
# define uint_t uint32_t
+# define uint_t_format "%u"
# define _w w
# define _align 3u
# define _size 4
@@ -338,7 +341,14 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
} else
*mlocp++ = addend;
+ if (print_mcount_offset) {
+ printf("#define KLP_FTRACE_LOCATION " uint_t_format "\n",
+ addend);
+ succeed_file();
+ }
+
mrelp = (Elf_Rel *)(rel_entsize + (void *)mrelp);
+
}
relp = (Elf_Rel const *)(rel_entsize + (void *)relp);
}
@@ -458,7 +468,8 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)];
char const *const txtname = &shstrtab[w(txthdr->sh_name)];
- if (strcmp("__mcount_loc", txtname) == 0) {
+ /* Allow to print the mcount offset for an already modified file. */
+ if (strcmp("__mcount_loc", txtname) == 0 && !print_mcount_offset) {
fprintf(stderr, "warning: __mcount_loc already exists: %s\n",
fname);
succeed_file();
@@ -546,7 +557,9 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype)
nop_mcount(relhdr, ehdr, txtname);
}
}
- if (mloc0 != mlocp) {
+
+ /* The file is not modified when the offset is just printed. */
+ if (mloc0 != mlocp && !print_mcount_offset) {
append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp,
rel_entsize, symsec_sh_link);
}
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2)
@ 2016-02-04 14:29 Torsten Duwe
2016-01-25 15:26 ` [PATCH v7 01/10] ppc64 (le): prepare for -mprofile-kernel Torsten Duwe
` (9 more replies)
0 siblings, 10 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-02-04 14:29 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
Changes since v6:
* include Petr's patch, on popular demand ;)
* move #ifdefs out of functions for readability;
introduce static helper functions instead.
* No more literal binary instructions in hex,
at least not added by this patch set.
* add compile time checker to detect the presence
of known-good -mprofile-kernel support.
* limit Kconfig / compile to the configurations really supported:
+ (static) FTRACE with -pg
+ DYNAMIC_FTRACE with -pg with or without -mprofile-kernel
(depending on the compiler)
+ DYNAMIC_FTRACE_WITH_REGS only with -mprofile-kernel
(will error out if the compiler is broken)
Changes since v5:
* extra "std r0,LRSAVE(r1)" for gcc-6
This makes the code compiler-agnostic.
* Follow Petr Mladek's suggestion to avoid
redefinition of HAVE_LIVEPATCH
Changes since v4:
* change comment style in entry_64.S to C89
(nobody is using assembler syntax comments there).
* the bool function restore_r2 shouldn't return 2,
that's a little confusing.
* Test whether the compiler supports -mprofile-kernel
and only then define CC_USING_MPROFILE_KERNEL
* also make the return value of klp_check_compiler_support
depend on that.
Major changes since v3:
* the graph tracer works now.
It turned out the stack frame it tried to manipulate does not
exist at that point.
* changes only needed in order to support -mprofile-kernel are now
in a separate patch, prepended.
* Kconfig cleanup so this is only selectable on ppc64le.
Petr Mladek (1):
livepatch: Detect offset for the ftrace location during build
Torsten Duwe (9):
ppc64 (le): prepare for -mprofile-kernel
ppc64le FTRACE_WITH_REGS implementation
ppc use ftrace_modify_all_code default
ppc64 ftrace_with_regs configuration variables
ppc64 ftrace_with_regs: spare early boot and low level
ppc64 ftrace: disable profiling for some functions
ppc64 ftrace: disable profiling for some files
Implement kernel live patching for ppc64le (ABIv2)
Enable LIVEPATCH to be configured on ppc64le and add livepatch.o if it
is selected.
arch/powerpc/Kconfig | 6 +
arch/powerpc/Makefile | 17 +++
arch/powerpc/gcc-mprofile-kernel-notrace.sh | 33 ++++++
arch/powerpc/include/asm/code-patching.h | 24 ++++
arch/powerpc/include/asm/ftrace.h | 5 +
arch/powerpc/include/asm/livepatch.h | 45 ++++++++
arch/powerpc/kernel/Makefile | 13 ++-
arch/powerpc/kernel/entry_64.S | 169 +++++++++++++++++++++++++++-
arch/powerpc/kernel/ftrace.c | 129 ++++++++++++++++-----
arch/powerpc/kernel/livepatch.c | 38 +++++++
arch/powerpc/kernel/module_64.c | 56 ++++++++-
arch/powerpc/kernel/process.c | 2 +-
arch/powerpc/lib/Makefile | 4 +-
arch/powerpc/mm/fault.c | 2 +-
arch/powerpc/mm/hash_utils_64.c | 18 +--
arch/powerpc/mm/hugetlbpage-hash64.c | 2 +-
arch/powerpc/mm/hugetlbpage.c | 4 +-
arch/powerpc/mm/mem.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 2 +-
arch/powerpc/mm/slb.c | 6 +-
arch/powerpc/mm/slice.c | 8 +-
arch/s390/Kconfig | 1 +
kernel/livepatch/Makefile | 13 +++
kernel/livepatch/core.c | 12 +-
kernel/livepatch/ftrace-test.c | 6 +
kernel/trace/Kconfig | 5 +
scripts/recordmcount.c | 6 +-
scripts/recordmcount.h | 17 ++-
28 files changed, 575 insertions(+), 70 deletions(-)
create mode 100755 arch/powerpc/gcc-mprofile-kernel-notrace.sh
create mode 100644 arch/powerpc/include/asm/livepatch.h
create mode 100644 arch/powerpc/kernel/livepatch.c
create mode 100644 kernel/livepatch/ftrace-test.c
--
1.8.5.6
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-01-25 15:29 ` [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables Torsten Duwe
@ 2016-02-05 14:05 ` Petr Mladek
2016-02-05 14:48 ` Steven Rostedt
0 siblings, 1 reply; 26+ messages in thread
From: Petr Mladek @ 2016-02-05 14:05 UTC (permalink / raw)
To: Torsten Duwe
Cc: Michael Ellerman, Jiri Kosina, Miroslav Benes, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
On Mon 2016-01-25 16:29:54, Torsten Duwe wrote:
> diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
> index ef8b916..29b7014 100644
> --- a/arch/powerpc/kernel/ftrace.c
> +++ b/arch/powerpc/kernel/ftrace.c
> @@ -28,6 +28,11 @@
>
>
> #ifdef CONFIG_DYNAMIC_FTRACE
> +#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && defined(CONFIG_PPC64) && \
> + !defined(CC_USING_MPROFILE_KERNEL)
> +#error "DYNAMIC_FTRACE_WITH_REGS requires working -mprofile-kernel"
> +#endif
CONFIG_DYNAMIC_FTRACE_WITH_REGS is automatically enabled when
both CONFIG_DYNAMIC_FTRACE and HAVE_DYNAMIC_FTRACE_WITH_REGS
are enabled.
Therefore it is not possible to build kernel with broken gcc
and DYNAMIC_FTRACE.
IMHO, we need to allow to explicitely disable DYNAMIC_FTRACE_WITH_REGS
if the compiler is broken to get around this build error.
We either need to define DYNAMIC_FTRACE_WITH_REGS as a proper
bool with description, help text, ... Or we need a way to
explicitely disable HAVE_DYNAMIC_FTRACE_WITH_REGS. Or something
like this.
Best Regards,
Petr
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-05 14:05 ` Petr Mladek
@ 2016-02-05 14:48 ` Steven Rostedt
2016-02-05 16:18 ` Petr Mladek
0 siblings, 1 reply; 26+ messages in thread
From: Steven Rostedt @ 2016-02-05 14:48 UTC (permalink / raw)
To: Petr Mladek
Cc: Torsten Duwe, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Fri, 5 Feb 2016 15:05:17 +0100
Petr Mladek <pmladek@suse.com> wrote:
> On Mon 2016-01-25 16:29:54, Torsten Duwe wrote:
> > diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
> > index ef8b916..29b7014 100644
> > --- a/arch/powerpc/kernel/ftrace.c
> > +++ b/arch/powerpc/kernel/ftrace.c
> > @@ -28,6 +28,11 @@
> >
> >
> > #ifdef CONFIG_DYNAMIC_FTRACE
> > +#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && defined(CONFIG_PPC64) && \
> > + !defined(CC_USING_MPROFILE_KERNEL)
> > +#error "DYNAMIC_FTRACE_WITH_REGS requires working -mprofile-kernel"
> > +#endif
>
> CONFIG_DYNAMIC_FTRACE_WITH_REGS is automatically enabled when
> both CONFIG_DYNAMIC_FTRACE and HAVE_DYNAMIC_FTRACE_WITH_REGS
> are enabled.
>
> Therefore it is not possible to build kernel with broken gcc
> and DYNAMIC_FTRACE.
>
> IMHO, we need to allow to explicitely disable DYNAMIC_FTRACE_WITH_REGS
> if the compiler is broken to get around this build error.
>
> We either need to define DYNAMIC_FTRACE_WITH_REGS as a proper
> bool with description, help text, ... Or we need a way to
> explicitely disable HAVE_DYNAMIC_FTRACE_WITH_REGS. Or something
> like this.
>
You mean something like this?
(not tested)
-- Steve
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e45db6b0d878..19377bacebfc 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -470,6 +470,19 @@ config DYNAMIC_FTRACE_WITH_REGS
def_bool y
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+ depends on !DISABLE_DYNAMIC_FTRACE_WITH_REGS
+
+config DISABLE_DYNAMIC_FTRACE_WITH_REGS
+ bool "Force build to not have function tracer pass in registers"
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+ help
+ If the architecture supports passing in registers to the function
+ tracer, then that is automatically enabled. But there may be some
+ compilers out there that are broken and cause this to fail.
+ This option makes the build think that the architecture does not
+ support the register passing and allows the build to work even
+ with compilers that do not support the feature.g
config FUNCTION_PROFILER
bool "Kernel function profiler"
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-05 14:48 ` Steven Rostedt
@ 2016-02-05 16:18 ` Petr Mladek
2016-02-05 16:30 ` Steven Rostedt
2016-02-06 10:32 ` Torsten Duwe
0 siblings, 2 replies; 26+ messages in thread
From: Petr Mladek @ 2016-02-05 16:18 UTC (permalink / raw)
To: Steven Rostedt
Cc: Torsten Duwe, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Fri 2016-02-05 09:48:03, Steven Rostedt wrote:
> On Fri, 5 Feb 2016 15:05:17 +0100
> Petr Mladek <pmladek@suse.com> wrote:
>
> > On Mon 2016-01-25 16:29:54, Torsten Duwe wrote:
> > > diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
> > > index ef8b916..29b7014 100644
> > > --- a/arch/powerpc/kernel/ftrace.c
> > > +++ b/arch/powerpc/kernel/ftrace.c
> > > @@ -28,6 +28,11 @@
> > >
> > >
> > > #ifdef CONFIG_DYNAMIC_FTRACE
> > > +#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && defined(CONFIG_PPC64) && \
> > > + !defined(CC_USING_MPROFILE_KERNEL)
> > > +#error "DYNAMIC_FTRACE_WITH_REGS requires working -mprofile-kernel"
> > > +#endif
> >
> > CONFIG_DYNAMIC_FTRACE_WITH_REGS is automatically enabled when
> > both CONFIG_DYNAMIC_FTRACE and HAVE_DYNAMIC_FTRACE_WITH_REGS
> > are enabled.
> >
> > Therefore it is not possible to build kernel with broken gcc
> > and DYNAMIC_FTRACE.
> >
> > IMHO, we need to allow to explicitely disable DYNAMIC_FTRACE_WITH_REGS
> > if the compiler is broken to get around this build error.
> >
> > We either need to define DYNAMIC_FTRACE_WITH_REGS as a proper
> > bool with description, help text, ... Or we need a way to
> > explicitely disable HAVE_DYNAMIC_FTRACE_WITH_REGS. Or something
> > like this.
> >
>
> You mean something like this?
>
> (not tested)
>
> -- Steve
>
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index e45db6b0d878..19377bacebfc 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -470,6 +470,19 @@ config DYNAMIC_FTRACE_WITH_REGS
> def_bool y
> depends on DYNAMIC_FTRACE
> depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
> + depends on !DISABLE_DYNAMIC_FTRACE_WITH_REGS
> +
> +config DISABLE_DYNAMIC_FTRACE_WITH_REGS
> + bool "Force build to not have function tracer pass in registers"
> + depends on DYNAMIC_FTRACE
> + depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
> + help
> + If the architecture supports passing in registers to the function
> + tracer, then that is automatically enabled. But there may be some
> + compilers out there that are broken and cause this to fail.
> + This option makes the build think that the architecture does not
> + support the register passing and allows the build to work even
> + with compilers that do not support the feature.g
s/feature.g/feature/
> config FUNCTION_PROFILER
> bool "Kernel function profiler"
It works but the extra option and reverse logic makes things even
more complicated. Whem I think about it, the change below does similar
job and looks more strightforwad:
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a138f6d..de6dab0 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -449,7 +449,7 @@ config PROBE_EVENTS
def_bool n
config DYNAMIC_FTRACE
- bool "enable/disable function tracing dynamically"
+ bool "Enable/Disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
default y
@@ -472,9 +472,17 @@ config DYNAMIC_FTRACE
otherwise has native performance as long as no tracing is active.
config DYNAMIC_FTRACE_WITH_REGS
- def_bool y
+ bool "Pass registers to function tracer"
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+ default y
+ help
+ This option enables passing the current state of processor
+ registers to the function tracer. It allows to do a more
+ detailed analyze and print more information.
+
+ Say Y here if you are unsure. The only exception is if
+ you want to pass a build error caused by a broken compiler.
config FUNCTION_PROFILER
bool "Kernel function profiler"
I made "enable/disable" uppercase because it looked weird in
context of other descritions.
Best Regards,
Petr
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-05 16:18 ` Petr Mladek
@ 2016-02-05 16:30 ` Steven Rostedt
2016-02-06 10:32 ` Torsten Duwe
1 sibling, 0 replies; 26+ messages in thread
From: Steven Rostedt @ 2016-02-05 16:30 UTC (permalink / raw)
To: Petr Mladek
Cc: Torsten Duwe, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Fri, 5 Feb 2016 17:18:34 +0100
Petr Mladek <pmladek@suse.com> wrote:
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index a138f6d..de6dab0 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -449,7 +449,7 @@ config PROBE_EVENTS
> def_bool n
>
> config DYNAMIC_FTRACE
> - bool "enable/disable function tracing dynamically"
> + bool "Enable/Disable function tracing dynamically"
> depends on FUNCTION_TRACER
> depends on HAVE_DYNAMIC_FTRACE
> default y
> @@ -472,9 +472,17 @@ config DYNAMIC_FTRACE
> otherwise has native performance as long as no tracing is active.
>
> config DYNAMIC_FTRACE_WITH_REGS
> - def_bool y
> + bool "Pass registers to function tracer"
> depends on DYNAMIC_FTRACE
> depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
> + default y
> + help
> + This option enables passing the current state of processor
> + registers to the function tracer. It allows to do a more
> + detailed analyze and print more information.
> +
> + Say Y here if you are unsure. The only exception is if
> + you want to pass a build error caused by a broken compiler.
>
> config FUNCTION_PROFILER
> bool "Kernel function profiler"
>
>
> I made "enable/disable" uppercase because it looked weird in
> context of other descritions.
>
This works too, and I'm fine with it.
-- Steve
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-05 16:18 ` Petr Mladek
2016-02-05 16:30 ` Steven Rostedt
@ 2016-02-06 10:32 ` Torsten Duwe
2016-02-08 10:34 ` Petr Mladek
1 sibling, 1 reply; 26+ messages in thread
From: Torsten Duwe @ 2016-02-06 10:32 UTC (permalink / raw)
To: Petr Mladek
Cc: Steven Rostedt, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Fri, Feb 05, 2016 at 05:18:34PM +0100, Petr Mladek wrote:
[...]
> more complicated. Whem I think about it, the change below does similar
> job and looks more strightforwad:
Had I only looked closer. That's exactly how I thought it would work
in the first place. I'd call that a fix. Full ACK from my side.
Torsten
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-06 10:32 ` Torsten Duwe
@ 2016-02-08 10:34 ` Petr Mladek
2016-02-08 12:12 ` Torsten Duwe
0 siblings, 1 reply; 26+ messages in thread
From: Petr Mladek @ 2016-02-08 10:34 UTC (permalink / raw)
To: Torsten Duwe
Cc: Steven Rostedt, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Sat 2016-02-06 11:32:43, Torsten Duwe wrote:
> On Fri, Feb 05, 2016 at 05:18:34PM +0100, Petr Mladek wrote:
> [...]
> > more complicated. Whem I think about it, the change below does similar
> > job and looks more strightforwad:
>
> Had I only looked closer. That's exactly how I thought it would work
> in the first place. I'd call that a fix. Full ACK from my side.
Feel free to merge this into your patch. Or do you want to do
this in a separate one, please?
Best Regards,
Petr
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-08 10:34 ` Petr Mladek
@ 2016-02-08 12:12 ` Torsten Duwe
2016-02-08 15:23 ` Petr Mladek
0 siblings, 1 reply; 26+ messages in thread
From: Torsten Duwe @ 2016-02-08 12:12 UTC (permalink / raw)
To: Petr Mladek
Cc: Steven Rostedt, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Mon, Feb 08, 2016 at 11:34:06AM +0100, Petr Mladek wrote:
> On Sat 2016-02-06 11:32:43, Torsten Duwe wrote:
> > On Fri, Feb 05, 2016 at 05:18:34PM +0100, Petr Mladek wrote:
> > [...]
> > > more complicated. Whem I think about it, the change below does similar
> > > job and looks more strightforwad:
> >
> > Had I only looked closer. That's exactly how I thought it would work
> > in the first place. I'd call that a fix. Full ACK from my side.
>
> Feel free to merge this into your patch. Or do you want to do
> this in a separate one, please?
My Kconfig/Makefile changes depend on it, but OTOH this change (Fix!)
is independent.
IMHO the right thing would be you resend your second mail from Feb-05,
with your sign-off, my ack, FWIW, and Steven checks it in ;-)
Torsten
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-08 12:12 ` Torsten Duwe
@ 2016-02-08 15:23 ` Petr Mladek
2016-02-08 15:49 ` Steven Rostedt
0 siblings, 1 reply; 26+ messages in thread
From: Petr Mladek @ 2016-02-08 15:23 UTC (permalink / raw)
To: Torsten Duwe
Cc: Steven Rostedt, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Mon 2016-02-08 13:12:45, Torsten Duwe wrote:
> On Mon, Feb 08, 2016 at 11:34:06AM +0100, Petr Mladek wrote:
> > On Sat 2016-02-06 11:32:43, Torsten Duwe wrote:
> > > On Fri, Feb 05, 2016 at 05:18:34PM +0100, Petr Mladek wrote:
> > > [...]
> > > > more complicated. Whem I think about it, the change below does similar
> > > > job and looks more strightforwad:
> > >
> > > Had I only looked closer. That's exactly how I thought it would work
> > > in the first place. I'd call that a fix. Full ACK from my side.
> >
> > Feel free to merge this into your patch. Or do you want to do
> > this in a separate one, please?
>
> My Kconfig/Makefile changes depend on it, but OTOH this change (Fix!)
> is independent.
>
> IMHO the right thing would be you resend your second mail from Feb-05,
> with your sign-off, my ack, FWIW, and Steven checks it in ;-)
Please, find it below. I guess that it should be applied before
the check causing the build error. It will help to keep
the tree bisectable.
>From 2b0fcb678d7720d03f9c9f233b61ed9ed4d420b3 Mon Sep 17 00:00:00 2001
From: Petr Mladek <pmladek@suse.com>
Date: Mon, 8 Feb 2016 16:03:03 +0100
Subject: [PATCH] ftrace: Allow to explicitly disable the build of the dynamic
ftrace with regs
This patch allows to explicitly disable
CONFIG_DYNAMIC_FTRACE_WITH_REGS. We will need to do so on
PPC with a broken gcc. This situation will be detected at
buildtime and could not be handled by Kbuild automatically.
Also it fixes the prompt of DYNAMIC_FTRACE. The uppercase
better fits the style of the other menu entries.
This patch does not change the default value.
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Torsten Duwe <duwe@lst.de>
---
kernel/trace/Kconfig | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a138f6d866ae..de6dab0f74f2 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -449,7 +449,7 @@ config PROBE_EVENTS
def_bool n
config DYNAMIC_FTRACE
- bool "enable/disable function tracing dynamically"
+ bool "Enable/Disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
default y
@@ -472,9 +472,17 @@ config DYNAMIC_FTRACE
otherwise has native performance as long as no tracing is active.
config DYNAMIC_FTRACE_WITH_REGS
- def_bool y
+ bool "Pass registers to function tracer"
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+ default y
+ help
+ This option enables passing the current state of processor
+ registers to the function tracer. It allows to do a more
+ detailed analyze and print more information.
+
+ Say Y here if you are unsure. The only exception is if
+ you want to pass a build error caused by a broken compiler.
config FUNCTION_PROFILER
bool "Kernel function profiler"
--
1.8.5.6
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-08 15:23 ` Petr Mladek
@ 2016-02-08 15:49 ` Steven Rostedt
2016-02-08 16:32 ` Petr Mladek
2016-02-09 9:02 ` Torsten Duwe
0 siblings, 2 replies; 26+ messages in thread
From: Steven Rostedt @ 2016-02-08 15:49 UTC (permalink / raw)
To: Petr Mladek
Cc: Torsten Duwe, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Mon, 8 Feb 2016 16:23:06 +0100
Petr Mladek <pmladek@suse.com> wrote:
> >From 2b0fcb678d7720d03f9c9f233b61ed9ed4d420b3 Mon Sep 17 00:00:00 2001
> From: Petr Mladek <pmladek@suse.com>
> Date: Mon, 8 Feb 2016 16:03:03 +0100
> Subject: [PATCH] ftrace: Allow to explicitly disable the build of the dynamic
> ftrace with regs
>
> This patch allows to explicitly disable
> CONFIG_DYNAMIC_FTRACE_WITH_REGS. We will need to do so on
> PPC with a broken gcc. This situation will be detected at
> buildtime and could not be handled by Kbuild automatically.
Wait. Can it be detected at build time? That is, does it cause a build
error? If so, then you can have Kbuild automatically detect this and
set the proper value. We do this with 'asm goto'. There's tricks in the
build system that can change the configs based on if a compiler is
broken or not.
>
> Also it fixes the prompt of DYNAMIC_FTRACE. The uppercase
> better fits the style of the other menu entries.
s/fixes/updates/
-- Steve
>
> This patch does not change the default value.
>
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> Acked-by: Torsten Duwe <duwe@lst.de>
> ---
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-08 15:49 ` Steven Rostedt
@ 2016-02-08 16:32 ` Petr Mladek
2016-02-09 9:02 ` Torsten Duwe
1 sibling, 0 replies; 26+ messages in thread
From: Petr Mladek @ 2016-02-08 16:32 UTC (permalink / raw)
To: Steven Rostedt
Cc: Torsten Duwe, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Mon 2016-02-08 10:49:28, Steven Rostedt wrote:
> On Mon, 8 Feb 2016 16:23:06 +0100
> Petr Mladek <pmladek@suse.com> wrote:
>
> > >From 2b0fcb678d7720d03f9c9f233b61ed9ed4d420b3 Mon Sep 17 00:00:00 2001
> > From: Petr Mladek <pmladek@suse.com>
> > Date: Mon, 8 Feb 2016 16:03:03 +0100
> > Subject: [PATCH] ftrace: Allow to explicitly disable the build of the dynamic
> > ftrace with regs
> >
> > This patch allows to explicitly disable
> > CONFIG_DYNAMIC_FTRACE_WITH_REGS. We will need to do so on
> > PPC with a broken gcc. This situation will be detected at
> > buildtime and could not be handled by Kbuild automatically.
>
> Wait. Can it be detected at build time? That is, does it cause a build
> error? If so, then you can have Kbuild automatically detect this and
> set the proper value. We do this with 'asm goto'. There's tricks in the
> build system that can change the configs based on if a compiler is
> broken or not.
Just to be sure.
Do you suggest to define CONFIG_DYNAMIC_FTRACE_WITH_REGS
via a check in Makefile and rename it to e.g.
CC_FTRACE_WITH_REGS.
Or should we define another variable, e.g.
CC_BROKEN_DYNAMC_FTRACE_WITH_REGS? And then replace all occurences in
the code by something like:
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && \
+ !defined(CC_BROKEN_DYNAMC_FTRACE_WITH_REGS)
?
Best Regards,
Petr
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables
2016-02-08 15:49 ` Steven Rostedt
2016-02-08 16:32 ` Petr Mladek
@ 2016-02-09 9:02 ` Torsten Duwe
1 sibling, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-02-09 9:02 UTC (permalink / raw)
To: Steven Rostedt
Cc: Petr Mladek, Michael Ellerman, Jiri Kosina, Miroslav Benes,
Jessica Yu, linuxppc-dev, linux-kernel, live-patching
On Mon, Feb 08, 2016 at 10:49:28AM -0500, Steven Rostedt wrote:
> On Mon, 8 Feb 2016 16:23:06 +0100
> Petr Mladek <pmladek@suse.com> wrote:
>
> > >From 2b0fcb678d7720d03f9c9f233b61ed9ed4d420b3 Mon Sep 17 00:00:00 2001
> > From: Petr Mladek <pmladek@suse.com>
> > Date: Mon, 8 Feb 2016 16:03:03 +0100
> > Subject: [PATCH] ftrace: Allow to explicitly disable the build of the dynamic
> > ftrace with regs
> >
> > This patch allows to explicitly disable
> > CONFIG_DYNAMIC_FTRACE_WITH_REGS. We will need to do so on
> > PPC with a broken gcc. This situation will be detected at
> > buildtime and could not be handled by Kbuild automatically.
>
> Wait. Can it be detected at build time? That is, does it cause a build
Yes, I wrote a test to detect it at build time. It is similar to "asm goto"
and part of the v7 patch set.
> error? If so, then you can have Kbuild automatically detect this and
> set the proper value. We do this with 'asm goto'. There's tricks in the
> build system that can change the configs based on if a compiler is
> broken or not.
Please clarify. All I could find is Makefile magic that does it. AFAICS
This runs _after_ Kconfig.
But what I'd like to see is to offer the user the full choice, where possible,
e.g.
Kernel Tracing ...
0) none
1) static FTRACE
2) DYNAMIC_FTRACE
3) DYNAMIC_FTRACE_WITH_REGS
Can such a test be used to simply reduce these options?
With Petr's patch, it comes quite close to the above, and if you select "3"
and your compiler is broken, compilation will fail. For "2", it will just do
the right thing ( fall back to plain "-pg" ).
Without Petr's patch you have *no* choice between "2" and "3".
(That's what I'd call a bug :)
So, the question is, can such a test be used to provide _input_ to
"make config" ? I can see the "env=" mechanism, but it seems not to be used
very heavily. That would then be a prerequisite to all "make *config".
Even if it can provide this input, you can still not choose between 2 and 3
where both are available.
Torsten
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files
2016-01-25 15:31 ` [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files Torsten Duwe
@ 2016-02-10 0:33 ` Michael Ellerman
2016-02-10 17:50 ` Torsten Duwe
0 siblings, 1 reply; 26+ messages in thread
From: Michael Ellerman @ 2016-02-10 0:33 UTC (permalink / raw)
To: Torsten Duwe
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
On Mon, 2016-01-25 at 16:31 +0100, Torsten Duwe wrote:
> This patch complements the "notrace" attribute for selected functions.
> It adds -mprofile-kernel to the cc flags to be stripped from the command
> line for code-patching.o and feature-fixups.o, in addition to "-pg"
This could probably be folded into patch 5, and the combined patch would be
"remove -mprofile-kernel in all the same places we remove -pg and for the same
reasons".
I can't think of anywhere we would want to disable -pg but not disable
-mprofile-kernel? Or vice versa.
cheers
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions
2016-01-25 15:31 ` [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions Torsten Duwe
@ 2016-02-10 1:50 ` Michael Ellerman
2016-02-10 18:01 ` Torsten Duwe
0 siblings, 1 reply; 26+ messages in thread
From: Michael Ellerman @ 2016-02-10 1:50 UTC (permalink / raw)
To: Torsten Duwe
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
On Mon, 2016-01-25 at 16:31 +0100, Torsten Duwe wrote:
> At least POWER7/8 have MMUs that don't completely autoload;
> a normal, recoverable memory fault might pass through these functions.
> If a dynamic tracer function causes such a fault, any of these functions
> being traced with -mprofile-kernel may cause an endless recursion.
I'm not really happy with this one, still :)
At the moment I can trace these without any problems, with either ftrace or
kprobes, but obviously it was causing you some trouble. So I'd like to
understand why you were having issues when regular tracing doesn't.
If it's the case that tracing can work for these functions, but live patching
doesn't (for some reason), then maybe these should be blocked by the live
patching infrastructure rather than at the ftrace/kprobes level.
cheers
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files
2016-02-10 0:33 ` Michael Ellerman
@ 2016-02-10 17:50 ` Torsten Duwe
0 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-02-10 17:50 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
On Wed, Feb 10, 2016 at 11:33:33AM +1100, Michael Ellerman wrote:
> On Mon, 2016-01-25 at 16:31 +0100, Torsten Duwe wrote:
>
> > This patch complements the "notrace" attribute for selected functions.
> > It adds -mprofile-kernel to the cc flags to be stripped from the command
> > line for code-patching.o and feature-fixups.o, in addition to "-pg"
>
> This could probably be folded into patch 5, and the combined patch would be
> "remove -mprofile-kernel in all the same places we remove -pg and for the same
> reasons".
That's right. It has shrunk a lot...
> I can't think of anywhere we would want to disable -pg but not disable
> -mprofile-kernel? Or vice versa.
On patch creation, I had handled them literally, individually. Now they're
blended into CC_FLAGS_FTRACE, which greatly simplified things.
Done.
Torsten
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions
2016-02-10 1:50 ` Michael Ellerman
@ 2016-02-10 18:01 ` Torsten Duwe
0 siblings, 0 replies; 26+ messages in thread
From: Torsten Duwe @ 2016-02-10 18:01 UTC (permalink / raw)
To: Michael Ellerman
Cc: Jiri Kosina, Miroslav Benes, Petr Mladek, Jessica Yu,
Steven Rostedt, linuxppc-dev, linux-kernel, live-patching
On Wed, Feb 10, 2016 at 12:50:38PM +1100, Michael Ellerman wrote:
> On Mon, 2016-01-25 at 16:31 +0100, Torsten Duwe wrote:
>
> > At least POWER7/8 have MMUs that don't completely autoload;
> > a normal, recoverable memory fault might pass through these functions.
> > If a dynamic tracer function causes such a fault, any of these functions
> > being traced with -mprofile-kernel may cause an endless recursion.
>
> I'm not really happy with this one, still :)
I understand :)
> At the moment I can trace these without any problems, with either ftrace or
> kprobes, but obviously it was causing you some trouble. So I'd like to
> understand why you were having issues when regular tracing doesn't.
It was causing huge trouble during development. Make the smallest mistake
and the machine appears to freeze, unless you happen to have a gdb
beneath the VM, which tells you that it's "only" locking rock-hard into
an endless recursion. Hint: printk() is part of the recurse, usually :-/
> If it's the case that tracing can work for these functions, but live patching
> doesn't (for some reason), then maybe these should be blocked by the live
> patching infrastructure rather than at the ftrace/kprobes level.
Now with a matured patch set, the machine survives up to the point where the
NOPs are placed and all is fine. (until someone places probes there :)
But I still have a bad feeling about this. On other architectures, this
functionality is done in hardware; the functions cannot be traced either,
and the don't break (usually ;) I would rather not instrument them.
But if you don't like this patch, and it strictly isn't neccessary --
here you go: v8.
Torsten
^ permalink raw reply [flat|nested] 26+ messages in thread
end of thread, other threads:[~2016-02-10 18:01 UTC | newest]
Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-04 14:29 [PATCH v7 00/10] ftrace with regs + live patching for ppc64 LE (ABI v2) Torsten Duwe
2016-01-25 15:26 ` [PATCH v7 01/10] ppc64 (le): prepare for -mprofile-kernel Torsten Duwe
2016-01-25 15:27 ` [PATCH v7 02/10] ppc64le FTRACE_WITH_REGS implementation Torsten Duwe
2016-01-25 15:29 ` [PATCH v7 03/10] ppc use ftrace_modify_all_code default Torsten Duwe
2016-01-25 15:29 ` [PATCH v7 04/10] ppc64 ftrace_with_regs configuration variables Torsten Duwe
2016-02-05 14:05 ` Petr Mladek
2016-02-05 14:48 ` Steven Rostedt
2016-02-05 16:18 ` Petr Mladek
2016-02-05 16:30 ` Steven Rostedt
2016-02-06 10:32 ` Torsten Duwe
2016-02-08 10:34 ` Petr Mladek
2016-02-08 12:12 ` Torsten Duwe
2016-02-08 15:23 ` Petr Mladek
2016-02-08 15:49 ` Steven Rostedt
2016-02-08 16:32 ` Petr Mladek
2016-02-09 9:02 ` Torsten Duwe
2016-01-25 15:30 ` [PATCH v7 05/10] ppc64 ftrace_with_regs: spare early boot and low level Torsten Duwe
2016-01-25 15:31 ` [PATCH v7 06/10] ppc64 ftrace: disable profiling for some functions Torsten Duwe
2016-02-10 1:50 ` Michael Ellerman
2016-02-10 18:01 ` Torsten Duwe
2016-01-25 15:31 ` [PATCH v7 07/10] ppc64 ftrace: disable profiling for some files Torsten Duwe
2016-02-10 0:33 ` Michael Ellerman
2016-02-10 17:50 ` Torsten Duwe
2016-01-25 15:33 ` [PATCH v7 08/10] Implement kernel live patching for ppc64le (ABIv2) Torsten Duwe
2016-01-25 15:33 ` [PATCH v7 09/10] Enable LIVEPATCH to be configured on ppc64le and add livepatch.o if it is selected Torsten Duwe
2016-01-28 15:32 ` [PATCH v7 10/10] livepatch: Detect offset for the ftrace location during build Petr Mladek
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.