All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE
@ 2017-04-12 11:09 Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 1/5] powerpc: ftrace: minor cleanup Naveen N. Rao
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

v2:
https://www.mail-archive.com/linuxppc-dev@lists.ozlabs.org/msg114659.html

For v3, this has only been rebased on top of powerpc/next and carries a
minor change to patch 4/5. No other changes.

Also, though patch 3/5 is generic, it needs to be carried in this
series as we crash on powerpc without that patch.


- Naveen


Masami Hiramatsu (1):
  kprobes: Skip preparing optprobe if the probe is ftrace-based

Naveen N. Rao (4):
  powerpc: ftrace: minor cleanup
  powerpc: ftrace: restore LR from pt_regs
  powerpc: kprobes: add support for KPROBES_ON_FTRACE
  powerpc: kprobes: prefer ftrace when probing function entry

 .../debug/kprobes-on-ftrace/arch-support.txt       |   2 +-
 arch/powerpc/Kconfig                               |   1 +
 arch/powerpc/include/asm/kprobes.h                 |  10 ++
 arch/powerpc/kernel/Makefile                       |   3 +
 arch/powerpc/kernel/entry_64.S                     |  19 ++--
 arch/powerpc/kernel/kprobes-ftrace.c               | 104 +++++++++++++++++++++
 arch/powerpc/kernel/kprobes.c                      |  25 ++++-
 kernel/kprobes.c                                   |  11 ++-
 8 files changed, 159 insertions(+), 16 deletions(-)
 create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

-- 
2.12.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/5] powerpc: ftrace: minor cleanup
  2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
@ 2017-04-12 11:09 ` Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 2/5] powerpc: ftrace: restore LR from pt_regs Naveen N. Rao
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

Move the stack setup and teardown code to the ftrace_graph_caller().
This way, we don't incur the cost of setting it up unless function graph
is enabled for this function.

Also, remove the extraneous LR restore code after the function graph
stub. LR has previously been restored and neither livepatch_handler()
nor ftrace_graph_caller() return back here.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/entry_64.S | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4bf08c8..8fd8718722a1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1313,16 +1313,12 @@ ftrace_call:
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	stdu	r1, -112(r1)
 .globl ftrace_graph_call
 ftrace_graph_call:
 	b	ftrace_graph_stub
 _GLOBAL(ftrace_graph_stub)
-	addi	r1, r1, 112
 #endif
 
-	ld	r0,LRSAVE(r1)	/* restore callee's lr at _mcount site */
-	mtlr	r0
 	bctr			/* jump after _mcount site */
 #endif /* CC_USING_MPROFILE_KERNEL */
 
@@ -1446,6 +1442,7 @@ _GLOBAL(ftrace_stub)
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifndef CC_USING_MPROFILE_KERNEL
 _GLOBAL(ftrace_graph_caller)
+	stdu	r1, -112(r1)
 	/* load r4 with local address */
 	ld	r4, 128(r1)
 	subi	r4, r4, MCOUNT_INSN_SIZE
@@ -1471,6 +1468,7 @@ _GLOBAL(ftrace_graph_caller)
 
 #else /* CC_USING_MPROFILE_KERNEL */
 _GLOBAL(ftrace_graph_caller)
+	stdu	r1, -112(r1)
 	/* with -mprofile-kernel, parameter regs are still alive at _mcount */
 	std	r10, 104(r1)
 	std	r9, 96(r1)
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 2/5] powerpc: ftrace: restore LR from pt_regs
  2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 1/5] powerpc: ftrace: minor cleanup Naveen N. Rao
@ 2017-04-12 11:09 ` Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 3/5] kprobes: Skip preparing optprobe if the probe is ftrace-based Naveen N. Rao
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

Pass the real LR to the ftrace handler. This is needed for
KPROBES_ON_FTRACE for the pre handlers.

Also, with KPROBES_ON_FTRACE, the link register may be updated by the
pre handlers or by a registed kretprobe. Honor updated LR by restoring
it from pt_regs, rather than from the stack save area.

Live patch and function graph continue to work fine with this change.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/entry_64.S | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8fd8718722a1..744b2f91444a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1248,9 +1248,10 @@ _GLOBAL(ftrace_caller)
 
 	/* Get the _mcount() call site out of LR */
 	mflr	r7
-	/* Save it as pt_regs->nip & pt_regs->link */
+	/* Save it as pt_regs->nip */
 	std     r7, _NIP(r1)
-	std     r7, _LINK(r1)
+	/* Save the read LR in pt_regs->link */
+	std     r0, _LINK(r1)
 
 	/* Save callee's TOC in the ABI compliant location */
 	std	r2, 24(r1)
@@ -1297,16 +1298,16 @@ ftrace_call:
 	REST_8GPRS(16,r1)
 	REST_8GPRS(24,r1)
 
+	/* Restore possibly modified LR */
+	ld	r0, _LINK(r1)
+	mtlr	r0
+
 	/* Restore callee's TOC */
 	ld	r2, 24(r1)
 
 	/* Pop our stack frame */
 	addi r1, r1, SWITCH_FRAME_SIZE
 
-	/* Restore original LR for return to B */
-	ld	r0, LRSAVE(r1)
-	mtlr	r0
-
 #ifdef CONFIG_LIVEPATCH
         /* Based on the cmpd above, if the NIP was altered handle livepatch */
 	bne-	livepatch_handler
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 3/5] kprobes: Skip preparing optprobe if the probe is ftrace-based
  2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 1/5] powerpc: ftrace: minor cleanup Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 2/5] powerpc: ftrace: restore LR from pt_regs Naveen N. Rao
@ 2017-04-12 11:09 ` Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 4/5] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 5/5] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao
  4 siblings, 0 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

From: Masami Hiramatsu <mhiramat@kernel.org>

Skip preparing optprobe if the probe is ftrace-based, since anyway, it
must not be optimized (or already optimized by ftrace).

Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
---
Though this patch is generic, it is needed for KPROBES_ON_FTRACE to work
on powerpc.

- Naveen


 kernel/kprobes.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6a128f3a7ed1..406889889ce5 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -743,13 +743,20 @@ static void kill_optimized_kprobe(struct kprobe *p)
 	arch_remove_optimized_kprobe(op);
 }
 
+static inline
+void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
+{
+	if (!kprobe_ftrace(p))
+		arch_prepare_optimized_kprobe(op, p);
+}
+
 /* Try to prepare optimized instructions */
 static void prepare_optimized_kprobe(struct kprobe *p)
 {
 	struct optimized_kprobe *op;
 
 	op = container_of(p, struct optimized_kprobe, kp);
-	arch_prepare_optimized_kprobe(op, p);
+	__prepare_optimized_kprobe(op, p);
 }
 
 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
@@ -763,7 +770,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 
 	INIT_LIST_HEAD(&op->list);
 	op->kp.addr = p->addr;
-	arch_prepare_optimized_kprobe(op, p);
+	__prepare_optimized_kprobe(op, p);
 
 	return &op->kp;
 }
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 4/5] powerpc: kprobes: add support for KPROBES_ON_FTRACE
  2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
                   ` (2 preceding siblings ...)
  2017-04-12 11:09 ` [PATCH v3 3/5] kprobes: Skip preparing optprobe if the probe is ftrace-based Naveen N. Rao
@ 2017-04-12 11:09 ` Naveen N. Rao
  2017-04-12 11:09 ` [PATCH v3 5/5] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao
  4 siblings, 0 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

Allow kprobes to be placed on ftrace _mcount() call sites. This
optimization avoids the use of a trap, by riding on ftrace
infrastructure.

This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
newer toolchains.

Based on the x86 code by Masami.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 .../debug/kprobes-on-ftrace/arch-support.txt       |   2 +-
 arch/powerpc/Kconfig                               |   1 +
 arch/powerpc/include/asm/kprobes.h                 |  10 ++
 arch/powerpc/kernel/Makefile                       |   3 +
 arch/powerpc/kernel/kprobes-ftrace.c               | 104 +++++++++++++++++++++
 arch/powerpc/kernel/kprobes.c                      |   8 +-
 6 files changed, 126 insertions(+), 2 deletions(-)
 create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 40f44d041fb4..930430c6aef6 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -27,7 +27,7 @@
     |       nios2: | TODO |
     |    openrisc: | TODO |
     |      parisc: | TODO |
-    |     powerpc: | TODO |
+    |     powerpc: |  ok  |
     |        s390: | TODO |
     |       score: | TODO |
     |          sh: | TODO |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9ff731f50a29..a55a776a1a43 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -142,6 +142,7 @@ config PPC
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
 	select HAVE_KERNEL_GZIP
 	select HAVE_KPROBES
+	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_KRETPROBES
 	select HAVE_LIVEPATCH			if HAVE_DYNAMIC_FTRACE_WITH_REGS
 	select HAVE_MEMBLOCK
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a843884aafaf..a83821f33ea3 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,16 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+			   struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+				  struct kprobe_ctlblk *kcb)
+{
+	return 0;
+}
+#endif
 #else
 static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
 static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 811f441a125f..3e461637b64d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_OPTPROBES)		+= optprobes.o optprobes_head.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)	+= kprobes-ftrace.o
 obj-$(CONFIG_UPROBES)		+= uprobes.o
 obj-$(CONFIG_PPC_UDBG_16550)	+= legacy_serial.o udbg_16550.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
@@ -150,6 +151,8 @@ GCOV_PROFILE_machine_kexec_32.o := n
 UBSAN_SANITIZE_machine_kexec_32.o := n
 GCOV_PROFILE_kprobes.o := n
 UBSAN_SANITIZE_kprobes.o := n
+GCOV_PROFILE_kprobes-ftrace.o := n
+UBSAN_SANITIZE_kprobes-ftrace.o := n
 UBSAN_SANITIZE_vdso.o := n
 
 extra-$(CONFIG_PPC_FPU)		+= fpu.o
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
new file mode 100644
index 000000000000..6c089d9757c9
--- /dev/null
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -0,0 +1,104 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ *		  IBM Corporation
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+static nokprobe_inline
+int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		      struct kprobe_ctlblk *kcb, unsigned long orig_nip)
+{
+	/*
+	 * Emulate singlestep (and also recover regs->nip)
+	 * as if there is a nop
+	 */
+	regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+	if (unlikely(p->post_handler)) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		p->post_handler(p, regs, 0);
+	}
+	__this_cpu_write(current_kprobe, NULL);
+	if (orig_nip)
+		regs->nip = orig_nip;
+	return 1;
+}
+
+int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		    struct kprobe_ctlblk *kcb)
+{
+	if (kprobe_ftrace(p))
+		return __skip_singlestep(p, regs, kcb, 0);
+	else
+		return 0;
+}
+NOKPROBE_SYMBOL(skip_singlestep);
+
+/* Ftrace callback handler for kprobes */
+void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+			   struct ftrace_ops *ops, struct pt_regs *regs)
+{
+	struct kprobe *p;
+	struct kprobe_ctlblk *kcb;
+	unsigned long flags;
+
+	/* Disable irq for emulating a breakpoint and avoiding preempt */
+	local_irq_save(flags);
+	hard_irq_disable();
+
+	p = get_kprobe((kprobe_opcode_t *)nip);
+	if (unlikely(!p) || kprobe_disabled(p))
+		goto end;
+
+	kcb = get_kprobe_ctlblk();
+	if (kprobe_running()) {
+		kprobes_inc_nmissed_count(p);
+	} else {
+		unsigned long orig_nip = regs->nip;
+
+		/*
+		 * On powerpc, NIP is *before* this instruction for the
+		 * pre handler
+		 */
+		regs->nip -= MCOUNT_INSN_SIZE;
+
+		__this_cpu_write(current_kprobe, p);
+		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+		if (!p->pre_handler || !p->pre_handler(p, regs))
+			__skip_singlestep(p, regs, kcb, orig_nip);
+		/*
+		 * If pre_handler returns !0, it sets regs->nip and
+		 * resets current kprobe.
+		 */
+	}
+end:
+	local_irq_restore(flags);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+	p->ainsn.insn = NULL;
+	p->ainsn.boostable = -1;
+	return 0;
+}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 005bd4a75902..b78b274e1d6e 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -192,7 +192,11 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 bool arch_function_offset_within_entry(unsigned long offset)
 {
 #ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_KPROBES_ON_FTRACE
+	return offset <= 16;
+#else
 	return offset <= 8;
+#endif
 #else
 	return !offset;
 #endif
@@ -300,7 +304,9 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
 			}
 			p = __this_cpu_read(current_kprobe);
 			if (p->break_handler && p->break_handler(p, regs)) {
-				goto ss_probe;
+				if (!skip_singlestep(p, regs, kcb))
+					goto ss_probe;
+				ret = 1;
 			}
 		}
 		goto no_kprobe;
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v3 5/5] powerpc: kprobes: prefer ftrace when probing function entry
  2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
                   ` (3 preceding siblings ...)
  2017-04-12 11:09 ` [PATCH v3 4/5] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
@ 2017-04-12 11:09 ` Naveen N. Rao
  4 siblings, 0 replies; 6+ messages in thread
From: Naveen N. Rao @ 2017-04-12 11:09 UTC (permalink / raw)
  To: Michael Ellerman
  Cc: Ananth N Mavinakayanahalli, Masami Hiramatsu, linuxppc-dev, linux-kernel

KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

Though OPTPROBES provides us with similar performance, we have limited
optprobes trampoline slots. As such, when asked to probe at a function
entry, default to using the ftrace infrastructure.

With:
	# cd /sys/kernel/debug/tracing
	# echo 'p _do_fork' > kprobe_events

before patch:
	# cat ../kprobes/list
	c0000000000daf08  k  _do_fork+0x8    [DISABLED]
	c000000000044fc0  k  kretprobe_trampoline+0x0    [OPTIMIZED]

and after patch:
	# cat ../kprobes/list
	c0000000000d074c  k  _do_fork+0xc    [DISABLED][FTRACE]
	c0000000000412b0  k  kretprobe_trampoline+0x0    [OPTIMIZED]

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/kprobes.c | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b78b274e1d6e..23d19678a56f 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -49,8 +49,21 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
 #ifdef PPC64_ELF_ABI_v2
 	/* PPC64 ABIv2 needs local entry point */
 	addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
-	if (addr && !offset)
-		addr = (kprobe_opcode_t *)ppc_function_entry(addr);
+	if (addr && !offset) {
+#ifdef CONFIG_KPROBES_ON_FTRACE
+		unsigned long faddr;
+		/*
+		 * Per livepatch.h, ftrace location is always within the first
+		 * 16 bytes of a function on powerpc with -mprofile-kernel.
+		 */
+		faddr = ftrace_location_range((unsigned long)addr,
+					      (unsigned long)addr + 16);
+		if (faddr)
+			addr = (kprobe_opcode_t *)faddr;
+		else
+#endif
+			addr = (kprobe_opcode_t *)ppc_function_entry(addr);
+	}
 #elif defined(PPC64_ELF_ABI_v1)
 	/*
 	 * 64bit powerpc ABIv1 uses function descriptors:
-- 
2.12.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-04-12 11:11 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-12 11:09 [PATCH v3 0/5] powerpc: add support for KPROBES_ON_FTRACE Naveen N. Rao
2017-04-12 11:09 ` [PATCH v3 1/5] powerpc: ftrace: minor cleanup Naveen N. Rao
2017-04-12 11:09 ` [PATCH v3 2/5] powerpc: ftrace: restore LR from pt_regs Naveen N. Rao
2017-04-12 11:09 ` [PATCH v3 3/5] kprobes: Skip preparing optprobe if the probe is ftrace-based Naveen N. Rao
2017-04-12 11:09 ` [PATCH v3 4/5] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
2017-04-12 11:09 ` [PATCH v3 5/5] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.