linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE
@ 2017-02-14 18:58 Naveen N. Rao
  2017-02-14 18:58 ` [PATCH 2/3] powerpc: introduce a new helper to obtain function entry points Naveen N. Rao
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Naveen N. Rao @ 2017-02-14 18:58 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli, Masami Hiramatsu, Michael Ellerman
  Cc: linux-kernel, linuxppc-dev

Allow kprobes to be placed on ftrace _mcount() call sites. This
optimization avoids the use of a trap, by riding on ftrace
infrastructure.

This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
newer toolchains.

Based on the x86 code by Masami.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/Kconfig                 |   1 +
 arch/powerpc/include/asm/kprobes.h   |  10 ++++
 arch/powerpc/kernel/Makefile         |   3 ++
 arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/kprobes.c        |   4 +-
 arch/powerpc/kernel/optprobes.c      |   3 ++
 6 files changed, 120 insertions(+), 1 deletion(-)
 create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 260dd6a371e0..78419919556d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -101,6 +101,7 @@ config PPC
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
 	select HAVE_KPROBES
 	select HAVE_OPTPROBES if PPC64
+	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_ARCH_KGDB
 	select HAVE_KRETPROBES
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index e7ada061aa12..3305a12286fa 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -153,6 +153,16 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+			   struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+				  struct kprobe_ctlblk *kcb)
+{
+	return 0;
+}
+#endif
 #else
 static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
 static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a048b37b9b27..88b21427ccc7 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_OPTPROBES)		+= optprobes.o optprobes_head.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)	+= kprobes-ftrace.o
 obj-$(CONFIG_UPROBES)		+= uprobes.o
 obj-$(CONFIG_PPC_UDBG_16550)	+= legacy_serial.o udbg_16550.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
@@ -154,6 +155,8 @@ GCOV_PROFILE_machine_kexec_32.o := n
 UBSAN_SANITIZE_machine_kexec_32.o := n
 GCOV_PROFILE_kprobes.o := n
 UBSAN_SANITIZE_kprobes.o := n
+GCOV_PROFILE_kprobes-ftrace.o := n
+UBSAN_SANITIZE_kprobes-ftrace.o := n
 UBSAN_SANITIZE_vdso.o := n
 
 extra-$(CONFIG_PPC_FPU)		+= fpu.o
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
new file mode 100644
index 000000000000..0377b3013723
--- /dev/null
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -0,0 +1,100 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ *		  IBM Corporation
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+static nokprobe_inline
+int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		      struct kprobe_ctlblk *kcb, unsigned long orig_nip)
+{
+	/*
+	 * Emulate singlestep (and also recover regs->nip)
+	 * as if there is a nop
+	 */
+	regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+	if (unlikely(p->post_handler)) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		p->post_handler(p, regs, 0);
+	}
+	__this_cpu_write(current_kprobe, NULL);
+	if (orig_nip)
+		regs->nip = orig_nip;
+	return 1;
+}
+
+int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+		    struct kprobe_ctlblk *kcb)
+{
+	if (kprobe_ftrace(p))
+		return __skip_singlestep(p, regs, kcb, 0);
+	else
+		return 0;
+}
+NOKPROBE_SYMBOL(skip_singlestep);
+
+/* Ftrace callback handler for kprobes */
+void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+			   struct ftrace_ops *ops, struct pt_regs *regs)
+{
+	struct kprobe *p;
+	struct kprobe_ctlblk *kcb;
+	unsigned long flags;
+
+	/* Disable irq for emulating a breakpoint and avoiding preempt */
+	local_irq_save(flags);
+	hard_irq_disable();
+
+	p = get_kprobe((kprobe_opcode_t *)nip);
+	if (unlikely(!p) || kprobe_disabled(p))
+		goto end;
+
+	kcb = get_kprobe_ctlblk();
+	if (kprobe_running()) {
+		kprobes_inc_nmissed_count(p);
+	} else {
+		unsigned long orig_nip = regs->nip;
+		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */
+		regs->nip = nip + sizeof(kprobe_opcode_t);
+
+		__this_cpu_write(current_kprobe, p);
+		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+		if (!p->pre_handler || !p->pre_handler(p, regs))
+			__skip_singlestep(p, regs, kcb, orig_nip);
+		/*
+		 * If pre_handler returns !0, it sets regs->nip and
+		 * resets current kprobe.
+		 */
+	}
+end:
+	local_irq_restore(flags);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+	p->ainsn.insn = NULL;
+	p->ainsn.boostable = -1;
+	return 0;
+}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c213637b9d25..dab5e54f949c 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -233,7 +233,9 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
 			}
 			p = __this_cpu_read(current_kprobe);
 			if (p->break_handler && p->break_handler(p, regs)) {
-				goto ss_probe;
+				if (!skip_singlestep(p, regs, kcb))
+					goto ss_probe;
+				ret = 1;
 			}
 		}
 		goto no_kprobe;
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index e51a045f3d3b..a8f414a0b141 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -70,6 +70,9 @@ static unsigned long can_optimize(struct kprobe *p)
 	struct instruction_op op;
 	unsigned long nip = 0;
 
+	if (unlikely(kprobe_ftrace(p)))
+		return 0;
+
 	/*
 	 * kprobe placed for kretprobe during boot time
 	 * has a 'nop' instruction, which can be emulated.
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] powerpc: introduce a new helper to obtain function entry points
  2017-02-14 18:58 [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
@ 2017-02-14 18:58 ` Naveen N. Rao
  2017-02-14 18:58 ` [PATCH 3/3] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Naveen N. Rao @ 2017-02-14 18:58 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli, Masami Hiramatsu, Michael Ellerman
  Cc: linux-kernel, linuxppc-dev

kprobe_lookup_name() is specific to the kprobe subsystem and may not
always return the function entry point (in a subsequent patch). For
looking up function entry points, introduce a separate helper and use
the same in optprobes.c

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/code-patching.h | 37 ++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/optprobes.c          |  4 ++--
 2 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 8ab937771068..3e994f404434 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -12,6 +12,8 @@
 
 #include <asm/types.h>
 #include <asm/ppc-opcode.h>
+#include <linux/string.h>
+#include <linux/kallsyms.h>
 
 /* Flags for create_branch:
  * "b"   == create_branch(addr, target, 0);
@@ -99,6 +101,41 @@ static inline unsigned long ppc_global_function_entry(void *func)
 #endif
 }
 
+/*
+ * Wrapper around kallsyms_lookup() to return function entry address:
+ * - For ABIv1, we lookup the dot variant.
+ * - For ABIv2, we return the local entry point.
+ */
+static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
+{
+	unsigned long addr;
+#ifdef PPC64_ELF_ABI_v1
+	/* check for dot variant */
+	char dot_name[1 + KSYM_NAME_LEN];
+	bool dot_appended = false;
+	if (name[0] != '.') {
+		dot_name[0] = '.';
+		dot_name[1] = '\0';
+		strncat(dot_name, name, KSYM_NAME_LEN - 2);
+		dot_appended = true;
+	} else {
+		dot_name[0] = '\0';
+		strncat(dot_name, name, KSYM_NAME_LEN - 1);
+	}
+	addr = kallsyms_lookup_name(dot_name);
+	if (!addr && dot_appended)
+		/* Let's try the original non-dot symbol lookup	*/
+		addr = kallsyms_lookup_name(name);
+#elif defined(PPC64_ELF_ABI_v2)
+	addr = kallsyms_lookup_name(name);
+	if (addr)
+		addr = ppc_function_entry((void *)addr);
+#else
+	addr = kallsyms_lookup_name(name);
+#endif
+	return addr;
+}
+
 #ifdef CONFIG_PPC64
 /*
  * Some instruction encodings commonly used in dynamic ftracing
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index a8f414a0b141..a2f6f28e5205 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -246,8 +246,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 	/*
 	 * 2. branch to optimized_callback() and emulate_step()
 	 */
-	kprobe_lookup_name("optimized_callback", op_callback_addr, 0);
-	kprobe_lookup_name("emulate_step", emulate_step_addr, 0);
+	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
+	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
 	if (!op_callback_addr || !emulate_step_addr) {
 		WARN(1, "kprobe_lookup_name() failed\n");
 		goto error;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] powerpc: kprobes: prefer ftrace when probing function entry
  2017-02-14 18:58 [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
  2017-02-14 18:58 ` [PATCH 2/3] powerpc: introduce a new helper to obtain function entry points Naveen N. Rao
@ 2017-02-14 18:58 ` Naveen N. Rao
  2017-02-15  4:28 ` [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Ananth N Mavinakayanahalli
  2017-02-15  7:11 ` Masami Hiramatsu
  3 siblings, 0 replies; 7+ messages in thread
From: Naveen N. Rao @ 2017-02-14 18:58 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli, Masami Hiramatsu, Michael Ellerman
  Cc: linux-kernel, linuxppc-dev

KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

Though OPTPROBES provides us with similar performance, we have limited
optprobes trampoline slots. As such, when asked to probe at a function
entry, default to using the ftrace infrastructure.

With:
	# cd /sys/kernel/debug/tracing
	# echo 'p _do_fork' > kprobe_events

before patch:
	# cat ../kprobes/list
	c0000000000daf08  k  _do_fork+0x8    [DISABLED]
	c000000000044fc0  k  kretprobe_trampoline+0x0    [OPTIMIZED]

and after patch:
	# cat ../kprobes/list
	c0000000000d074c  k  _do_fork+0xc    [DISABLED][FTRACE]
	c0000000000412b0  k  kretprobe_trampoline+0x0    [OPTIMIZED]

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/kprobes.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 3305a12286fa..09e74edee625 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -60,12 +60,32 @@ extern kprobe_opcode_t optprobe_template_end[];
 
 #ifdef PPC64_ELF_ABI_v2
 /* PPC64 ABIv2 needs local entry point */
+#ifdef CONFIG_KPROBES_ON_FTRACE
+/*
+ * Per livepatch.h, ftrace location is always within the first 16 bytes
+ * of a function on powerpc with -mprofile-kernel.
+ */
+#define kprobe_lookup_name(name, addr, offset)				\
+{									\
+	addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);		\
+	if (addr && !(offset)) {					\
+		unsigned long faddr;					\
+		faddr = ftrace_location_range((unsigned long)addr,	\
+					      (unsigned long)addr + 16);\
+		if (faddr)						\
+			addr = (kprobe_opcode_t *)faddr;		\
+		else							\
+			addr = (kprobe_opcode_t *)ppc_function_entry(addr);	\
+	}								\
+}
+#else
 #define kprobe_lookup_name(name, addr, offset)				\
 {									\
 	addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);		\
 	if (addr && !(offset))						\
 		addr = (kprobe_opcode_t *)ppc_function_entry(addr);	\
 }
+#endif
 #elif defined(PPC64_ELF_ABI_v1)
 /*
  * 64bit powerpc ABIv1 uses function descriptors:
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE
  2017-02-14 18:58 [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
  2017-02-14 18:58 ` [PATCH 2/3] powerpc: introduce a new helper to obtain function entry points Naveen N. Rao
  2017-02-14 18:58 ` [PATCH 3/3] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao
@ 2017-02-15  4:28 ` Ananth N Mavinakayanahalli
  2017-02-16 19:48   ` Naveen N. Rao
  2017-02-15  7:11 ` Masami Hiramatsu
  3 siblings, 1 reply; 7+ messages in thread
From: Ananth N Mavinakayanahalli @ 2017-02-15  4:28 UTC (permalink / raw)
  To: Naveen N. Rao
  Cc: Masami Hiramatsu, Michael Ellerman, linux-kernel, linuxppc-dev

On Wed, Feb 15, 2017 at 12:28:34AM +0530, Naveen N. Rao wrote:
> Allow kprobes to be placed on ftrace _mcount() call sites. This
> optimization avoids the use of a trap, by riding on ftrace
> infrastructure.
> 
> This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
> MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
> newer toolchains.
> 
> Based on the x86 code by Masami.
> 
> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> ---
>  arch/powerpc/Kconfig                 |   1 +
>  arch/powerpc/include/asm/kprobes.h   |  10 ++++
>  arch/powerpc/kernel/Makefile         |   3 ++
>  arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
>  arch/powerpc/kernel/kprobes.c        |   4 +-
>  arch/powerpc/kernel/optprobes.c      |   3 ++
>  6 files changed, 120 insertions(+), 1 deletion(-)
>  create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c

You'll also need to update
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt

> +/* Ftrace callback handler for kprobes */
> +void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
> +			   struct ftrace_ops *ops, struct pt_regs *regs)
> +{
> +	struct kprobe *p;
> +	struct kprobe_ctlblk *kcb;
> +	unsigned long flags;
> +
> +	/* Disable irq for emulating a breakpoint and avoiding preempt */
> +	local_irq_save(flags);
> +	hard_irq_disable();
> +
> +	p = get_kprobe((kprobe_opcode_t *)nip);
> +	if (unlikely(!p) || kprobe_disabled(p))
> +		goto end;
> +
> +	kcb = get_kprobe_ctlblk();
> +	if (kprobe_running()) {
> +		kprobes_inc_nmissed_count(p);
> +	} else {
> +		unsigned long orig_nip = regs->nip;
> +		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */

Can you clarify this? On powerpc, the regs->nip at the time of
breakpoint hit points to the probed instruction, not the one after.

Ananth

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE
  2017-02-14 18:58 [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
                   ` (2 preceding siblings ...)
  2017-02-15  4:28 ` [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Ananth N Mavinakayanahalli
@ 2017-02-15  7:11 ` Masami Hiramatsu
  2017-02-16 19:49   ` Naveen N. Rao
  3 siblings, 1 reply; 7+ messages in thread
From: Masami Hiramatsu @ 2017-02-15  7:11 UTC (permalink / raw)
  To: Naveen N. Rao
  Cc: Ananth N Mavinakayanahalli, Michael Ellerman, linux-kernel, linuxppc-dev

Hi Naveen,

On Wed, 15 Feb 2017 00:28:34 +0530
"Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com> wrote:

> diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
> index e51a045f3d3b..a8f414a0b141 100644
> --- a/arch/powerpc/kernel/optprobes.c
> +++ b/arch/powerpc/kernel/optprobes.c
> @@ -70,6 +70,9 @@ static unsigned long can_optimize(struct kprobe *p)
>  	struct instruction_op op;
>  	unsigned long nip = 0;
>  
> +	if (unlikely(kprobe_ftrace(p)))
> +		return 0;
> +

Hmm, this should not be checked in arch-depend code, since it may duplicate
code in each arch. Please try below.

Thanks,

commit 897bb304974c1b0c19a4274fea220b175b07f353
Author: Masami Hiramatsu <mhiramat@kernel.org>
Date:   Wed Feb 15 15:48:14 2017 +0900

    kprobes: Skip preparing optprobe if the probe is ftrace-based
    
    Skip preparing optprobe if the probe is ftrace-based, since
    anyway, it must not be optimized (or already optimized by
    ftrace).
    
    Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ebb4dad..546a8b1 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -746,13 +746,20 @@ static void kill_optimized_kprobe(struct kprobe *p)
 	arch_remove_optimized_kprobe(op);
 }
 
+static inline
+void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
+{
+	if (!kprobe_ftrace(p))
+		arch_prepare_optimized_kprobe(op, p);
+}
+
 /* Try to prepare optimized instructions */
 static void prepare_optimized_kprobe(struct kprobe *p)
 {
 	struct optimized_kprobe *op;
 
 	op = container_of(p, struct optimized_kprobe, kp);
-	arch_prepare_optimized_kprobe(op, p);
+	__prepare_optimized_kprobe(op, p);
 }
 
 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
@@ -766,7 +773,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 
 	INIT_LIST_HEAD(&op->list);
 	op->kp.addr = p->addr;
-	arch_prepare_optimized_kprobe(op, p);
+	__prepare_optimized_kprobe(op, p);
 
 	return &op->kp;
 }



-- 
Masami Hiramatsu <mhiramat@kernel.org>

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE
  2017-02-15  4:28 ` [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Ananth N Mavinakayanahalli
@ 2017-02-16 19:48   ` Naveen N. Rao
  0 siblings, 0 replies; 7+ messages in thread
From: Naveen N. Rao @ 2017-02-16 19:48 UTC (permalink / raw)
  To: Ananth N Mavinakayanahalli; +Cc: linuxppc-dev, Masami Hiramatsu, linux-kernel

On 2017/02/15 09:58AM, Ananth N Mavinakayanahalli wrote:
> On Wed, Feb 15, 2017 at 12:28:34AM +0530, Naveen N. Rao wrote:
> > Allow kprobes to be placed on ftrace _mcount() call sites. This
> > optimization avoids the use of a trap, by riding on ftrace
> > infrastructure.
> > 
> > This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
> > MPROFILE_KERNEL, which is only currently enabled on powerpc64le with
> > newer toolchains.
> > 
> > Based on the x86 code by Masami.
> > 
> > Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
> > ---
> >  arch/powerpc/Kconfig                 |   1 +
> >  arch/powerpc/include/asm/kprobes.h   |  10 ++++
> >  arch/powerpc/kernel/Makefile         |   3 ++
> >  arch/powerpc/kernel/kprobes-ftrace.c | 100 +++++++++++++++++++++++++++++++++++
> >  arch/powerpc/kernel/kprobes.c        |   4 +-
> >  arch/powerpc/kernel/optprobes.c      |   3 ++
> >  6 files changed, 120 insertions(+), 1 deletion(-)
> >  create mode 100644 arch/powerpc/kernel/kprobes-ftrace.c
> 
> You'll also need to update
> Documentation/features/debug/kprobes-on-ftrace/arch-support.txt

Sure.

> 
> > +/* Ftrace callback handler for kprobes */
> > +void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
> > +			   struct ftrace_ops *ops, struct pt_regs *regs)
> > +{
> > +	struct kprobe *p;
> > +	struct kprobe_ctlblk *kcb;
> > +	unsigned long flags;
> > +
> > +	/* Disable irq for emulating a breakpoint and avoiding preempt */
> > +	local_irq_save(flags);
> > +	hard_irq_disable();
> > +
> > +	p = get_kprobe((kprobe_opcode_t *)nip);
> > +	if (unlikely(!p) || kprobe_disabled(p))
> > +		goto end;
> > +
> > +	kcb = get_kprobe_ctlblk();
> > +	if (kprobe_running()) {
> > +		kprobes_inc_nmissed_count(p);
> > +	} else {
> > +		unsigned long orig_nip = regs->nip;
> > +		/* Kprobe handler expects regs->nip = nip + 1 as breakpoint hit */
> 
> Can you clarify this? On powerpc, the regs->nip at the time of
> breakpoint hit points to the probed instruction, not the one after.

Ah -- great catch! As it turns out, we actually need to set this back by 
an instruction due to the way ftrace works. I'll make the change.


Thanks,
Naveen

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE
  2017-02-15  7:11 ` Masami Hiramatsu
@ 2017-02-16 19:49   ` Naveen N. Rao
  0 siblings, 0 replies; 7+ messages in thread
From: Naveen N. Rao @ 2017-02-16 19:49 UTC (permalink / raw)
  To: Masami Hiramatsu
  Cc: Ananth N Mavinakayanahalli, Michael Ellerman, linux-kernel, linuxppc-dev

On 2017/02/15 04:11PM, Masami Hiramatsu wrote:
> Hi Naveen,
> 
> On Wed, 15 Feb 2017 00:28:34 +0530
> "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com> wrote:
> 
> > diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
> > index e51a045f3d3b..a8f414a0b141 100644
> > --- a/arch/powerpc/kernel/optprobes.c
> > +++ b/arch/powerpc/kernel/optprobes.c
> > @@ -70,6 +70,9 @@ static unsigned long can_optimize(struct kprobe *p)
> >  	struct instruction_op op;
> >  	unsigned long nip = 0;
> >  
> > +	if (unlikely(kprobe_ftrace(p)))
> > +		return 0;
> > +
> 
> Hmm, this should not be checked in arch-depend code, since it may duplicate
> code in each arch. Please try below.

Thanks, Masami!

> 
> Thanks,
> 
> commit 897bb304974c1b0c19a4274fea220b175b07f353
> Author: Masami Hiramatsu <mhiramat@kernel.org>
> Date:   Wed Feb 15 15:48:14 2017 +0900
> 
>     kprobes: Skip preparing optprobe if the probe is ftrace-based
>     
>     Skip preparing optprobe if the probe is ftrace-based, since
>     anyway, it must not be optimized (or already optimized by
>     ftrace).
>     
>     Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>

This works for me.
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>


Regards,
Naveen


> 
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index ebb4dad..546a8b1 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -746,13 +746,20 @@ static void kill_optimized_kprobe(struct kprobe *p)
>  	arch_remove_optimized_kprobe(op);
>  }
> 
> +static inline
> +void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
> +{
> +	if (!kprobe_ftrace(p))
> +		arch_prepare_optimized_kprobe(op, p);
> +}
> +
>  /* Try to prepare optimized instructions */
>  static void prepare_optimized_kprobe(struct kprobe *p)
>  {
>  	struct optimized_kprobe *op;
> 
>  	op = container_of(p, struct optimized_kprobe, kp);
> -	arch_prepare_optimized_kprobe(op, p);
> +	__prepare_optimized_kprobe(op, p);
>  }
> 
>  /* Allocate new optimized_kprobe and try to prepare optimized instructions */
> @@ -766,7 +773,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
> 
>  	INIT_LIST_HEAD(&op->list);
>  	op->kp.addr = p->addr;
> -	arch_prepare_optimized_kprobe(op, p);
> +	__prepare_optimized_kprobe(op, p);
> 
>  	return &op->kp;
>  }
> 
> 
> 
> -- 
> Masami Hiramatsu <mhiramat@kernel.org>
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2017-02-16 19:50 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-14 18:58 [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Naveen N. Rao
2017-02-14 18:58 ` [PATCH 2/3] powerpc: introduce a new helper to obtain function entry points Naveen N. Rao
2017-02-14 18:58 ` [PATCH 3/3] powerpc: kprobes: prefer ftrace when probing function entry Naveen N. Rao
2017-02-15  4:28 ` [PATCH 1/3] powerpc: kprobes: add support for KPROBES_ON_FTRACE Ananth N Mavinakayanahalli
2017-02-16 19:48   ` Naveen N. Rao
2017-02-15  7:11 ` Masami Hiramatsu
2017-02-16 19:49   ` Naveen N. Rao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).