All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] powerpc: Enhance error handling with patch_instruction()
@ 2020-04-28 14:03 Naveen N. Rao
  2020-04-28 14:03 ` [PATCH v2 1/2] powerpc/ftrace: Simplify error checking when patching instructions Naveen N. Rao
  2020-04-28 14:03 ` [PATCH v2 2/2] powerpc/kprobes: Check return value of patch_instruction() Naveen N. Rao
  0 siblings, 2 replies; 3+ messages in thread
From: Naveen N. Rao @ 2020-04-28 14:03 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Steven Rostedt

Changes in v2:
- Change macro to use 'goto' instead of 'return'
- Rename macro to indicate use of 'goto'
- Convert more patch_instruction() uses in optprobes to use the new 
  macro.
- Drop 1st patch, which added error checking in do_patch_instruction() 
  since that is being covered in a separate patchset.

v1:
http://lkml.kernel.org/r/cover.1587654213.git.naveen.n.rao@linux.vnet.ibm.com


- Naveen

Naveen N. Rao (2):
  powerpc/ftrace: Simplify error checking when patching instructions
  powerpc/kprobes: Check return value of patch_instruction()

 arch/powerpc/kernel/kprobes.c      |  13 +++-
 arch/powerpc/kernel/optprobes.c    | 109 +++++++++++++++++++++--------
 arch/powerpc/kernel/trace/ftrace.c |  96 +++++++++++++++----------
 3 files changed, 149 insertions(+), 69 deletions(-)


base-commit: 8299da600ad05b8aa0f15ec0f5f03bd40e37d6f0
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2 1/2] powerpc/ftrace: Simplify error checking when patching instructions
  2020-04-28 14:03 [PATCH v2 0/2] powerpc: Enhance error handling with patch_instruction() Naveen N. Rao
@ 2020-04-28 14:03 ` Naveen N. Rao
  2020-04-28 14:03 ` [PATCH v2 2/2] powerpc/kprobes: Check return value of patch_instruction() Naveen N. Rao
  1 sibling, 0 replies; 3+ messages in thread
From: Naveen N. Rao @ 2020-04-28 14:03 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Steven Rostedt

Introduce a macro PATCH_INSN_OR_GOTO() to simplify instruction patching,
and to make the error messages more uniform and useful:
- print an error message that includes the original return value
- print the function name and line numbers, so that the offending
  location is clear
- goto a label which always return -EPERM, which ftrace_bug() expects
  for proper error handling

Also eliminate use of patch_branch() since most such uses already call
create_branch() for error checking before patching. Instead, use the
return value from create_branch() with PATCH_INSN_OR_GOTO().

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/trace/ftrace.c | 96 +++++++++++++++++++-----------
 1 file changed, 60 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 7ea0ca044b65..63edbd48af42 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -31,6 +31,17 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+#define PATCH_INSN_OR_GOTO(addr, instr, label)				     \
+do {									     \
+	int rc = patch_instruction((unsigned int *)(addr), instr);	     \
+	if (rc) {							     \
+		pr_err("%s:%d Error patching instruction at 0x%pK (%pS): %d\n", \
+				__func__, __LINE__,			     \
+				(void *)(addr), (void *)(addr), rc);	     \
+		goto label;						     \
+	}								     \
+} while (0)
+
 /*
  * We generally only have a single long_branch tramp and at most 2 or 3 plt
  * tramps generated. But, we don't use the plt tramps currently. We also allot
@@ -78,10 +89,12 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
 	}
 
 	/* replace the text with the new text */
-	if (patch_instruction((unsigned int *)ip, new))
-		return -EPERM;
+	PATCH_INSN_OR_GOTO(ip, new, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 /*
@@ -204,12 +217,12 @@ __ftrace_make_nop(struct module *mod,
 	}
 #endif /* CONFIG_MPROFILE_KERNEL */
 
-	if (patch_instruction((unsigned int *)ip, pop)) {
-		pr_err("Patching NOP failed.\n");
-		return -EPERM;
-	}
+	PATCH_INSN_OR_GOTO(ip, pop, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 #else /* !PPC64 */
@@ -276,10 +289,12 @@ __ftrace_make_nop(struct module *mod,
 
 	op = PPC_INST_NOP;
 
-	if (patch_instruction((unsigned int *)ip, op))
-		return -EPERM;
+	PATCH_INSN_OR_GOTO(ip, op, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 #endif /* PPC64 */
 #endif /* CONFIG_MODULES */
@@ -322,7 +337,7 @@ static int add_ftrace_tramp(unsigned long tramp)
  */
 static int setup_mcount_compiler_tramp(unsigned long tramp)
 {
-	int i, op;
+	unsigned int i, op;
 	unsigned long ptr;
 	static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
 
@@ -366,16 +381,14 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
 #else
 	ptr = ppc_global_function_entry((void *)ftrace_caller);
 #endif
-	if (!create_branch((void *)tramp, ptr, 0)) {
+	op = create_branch((void *)tramp, ptr, 0);
+	if (!op) {
 		pr_debug("%ps is not reachable from existing mcount tramp\n",
 				(void *)ptr);
 		return -1;
 	}
 
-	if (patch_branch((unsigned int *)tramp, ptr, 0)) {
-		pr_debug("REL24 out of range!\n");
-		return -1;
-	}
+	PATCH_INSN_OR_GOTO(tramp, op, patch_err);
 
 	if (add_ftrace_tramp(tramp)) {
 		pr_debug("No tramp locations left\n");
@@ -383,6 +396,9 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
 	}
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
@@ -416,12 +432,12 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
 		}
 	}
 
-	if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
-		pr_err("Patching NOP failed.\n");
-		return -EPERM;
-	}
+	PATCH_INSN_OR_GOTO(ip, PPC_INST_NOP, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 int ftrace_make_nop(struct module *mod,
@@ -557,17 +573,18 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 	}
 
 	/* Ensure branch is within 24 bits */
-	if (!create_branch(ip, tramp, BRANCH_SET_LINK)) {
+	op[0] = create_branch(ip, tramp, BRANCH_SET_LINK);
+	if (!op[0]) {
 		pr_err("Branch out of range\n");
 		return -EINVAL;
 	}
 
-	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
-		pr_err("REL24 out of range!\n");
-		return -EINVAL;
-	}
+	PATCH_INSN_OR_GOTO(ip, op[0], patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 #else  /* !CONFIG_PPC64: */
@@ -603,10 +620,12 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 	pr_devel("write to %lx\n", rec->ip);
 
-	if (patch_instruction((unsigned int *)ip, op))
-		return -EPERM;
+	PATCH_INSN_OR_GOTO(ip, op, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 #endif /* CONFIG_PPC64 */
 #endif /* CONFIG_MODULES */
@@ -650,12 +669,18 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
 		return -EINVAL;
 	}
 
-	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
-		pr_err("Error patching branch to ftrace tramp!\n");
+	op = create_branch(ip, tramp, BRANCH_SET_LINK);
+	if (!op) {
+		pr_err("Branch out of range\n");
 		return -EINVAL;
 	}
 
+	PATCH_INSN_OR_GOTO(ip, op, patch_err);
+
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -748,10 +773,8 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 	/* The new target may be within range */
 	if (test_24bit_addr(ip, addr)) {
 		/* within range */
-		if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) {
-			pr_err("REL24 out of range!\n");
-			return -EINVAL;
-		}
+		op = create_branch((unsigned int *)ip, addr, BRANCH_SET_LINK);
+		PATCH_INSN_OR_GOTO(ip, op, patch_err);
 
 		return 0;
 	}
@@ -776,17 +799,18 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 	}
 
 	/* Ensure branch is within 24 bits */
-	if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
+	op = create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK);
+	if (!op) {
 		pr_err("Branch out of range\n");
 		return -EINVAL;
 	}
 
-	if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
-		pr_err("REL24 out of range!\n");
-		return -EINVAL;
-	}
+	PATCH_INSN_OR_GOTO(ip, op, patch_err);
 
 	return 0;
+
+patch_err:
+	return -EPERM;
 }
 #endif
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH v2 2/2] powerpc/kprobes: Check return value of patch_instruction()
  2020-04-28 14:03 [PATCH v2 0/2] powerpc: Enhance error handling with patch_instruction() Naveen N. Rao
  2020-04-28 14:03 ` [PATCH v2 1/2] powerpc/ftrace: Simplify error checking when patching instructions Naveen N. Rao
@ 2020-04-28 14:03 ` Naveen N. Rao
  1 sibling, 0 replies; 3+ messages in thread
From: Naveen N. Rao @ 2020-04-28 14:03 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Steven Rostedt

patch_instruction() can fail in some scenarios. Add appropriate error
checking so that such failures are caught and logged, and suitable error
code is returned.

Fixes: d07df82c43be8 ("powerpc/kprobes: Move kprobes over to patch_instruction()")
Fixes: f3eca95638931 ("powerpc/kprobes/optprobes: Use patch_instruction()")
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/kprobes.c   |  13 +++-
 arch/powerpc/kernel/optprobes.c | 109 +++++++++++++++++++++++---------
 2 files changed, 89 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 81efb605113e..9eb1cc05ddd5 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,6 +19,7 @@
 #include <linux/extable.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
+#include <linux/bug.h>
 #include <asm/code-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/sstep.h>
@@ -138,13 +139,21 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
 
 void arch_arm_kprobe(struct kprobe *p)
 {
-	patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+	int rc = patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+
+	if (WARN_ON(rc))
+		pr_err("Failed to patch trap at 0x%pK: %d\n",
+				(void *)p->addr, rc);
 }
 NOKPROBE_SYMBOL(arch_arm_kprobe);
 
 void arch_disarm_kprobe(struct kprobe *p)
 {
-	patch_instruction(p->addr, p->opcode);
+	int rc = patch_instruction(p->addr, p->opcode);
+
+	if (WARN_ON(rc))
+		pr_err("Failed to remove trap at 0x%pK: %d\n",
+				(void *)p->addr, rc);
 }
 NOKPROBE_SYMBOL(arch_disarm_kprobe);
 
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 024f7aad1952..20897600db2e 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -139,52 +139,80 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
 	}
 }
 
+#define PATCH_INSN_OR_GOTO(addr, instr, label)				     \
+do {									     \
+	int rc = patch_instruction((unsigned int *)(addr), instr);	     \
+	if (rc) {							     \
+		pr_err("%s:%d Error patching instruction at 0x%pK (%pS): %d\n", \
+				__func__, __LINE__,			     \
+				(void *)(addr), (void *)(addr), rc);	     \
+		goto label;						     \
+	}								     \
+} while (0)
+
 /*
  * emulate_step() requires insn to be emulated as
  * second parameter. Load register 'r4' with the
  * instruction.
  */
-void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
+static int patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
 {
 	/* addis r4,0,(insn)@h */
-	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
-			  ((val >> 16) & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
+			  ((val >> 16) & 0xffff),
+			  patch_err);
 	addr++;
 
 	/* ori r4,r4,(insn)@l */
-	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
-			  ___PPC_RS(4) | (val & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(4) |
+			  ___PPC_RS(4) | (val & 0xffff),
+			  patch_err);
+
+	return 0;
+
+patch_err:
+	return -EFAULT;
 }
 
 /*
  * Generate instructions to load provided immediate 64-bit value
  * to register 'r3' and patch these instructions at 'addr'.
  */
-void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
+static int patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
 {
 	/* lis r3,(op)@highest */
-	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
-			  ((val >> 48) & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
+			  ((val >> 48) & 0xffff),
+			  patch_err);
 	addr++;
 
 	/* ori r3,r3,(op)@higher */
-	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
-			  ___PPC_RS(3) | ((val >> 32) & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(3) |
+			  ___PPC_RS(3) | ((val >> 32) & 0xffff),
+			  patch_err);
 	addr++;
 
 	/* rldicr r3,r3,32,31 */
-	patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
-			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
+			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31),
+			  patch_err);
 	addr++;
 
 	/* oris r3,r3,(op)@h */
-	patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
-			  ___PPC_RS(3) | ((val >> 16) & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ORIS | ___PPC_RA(3) |
+			  ___PPC_RS(3) | ((val >> 16) & 0xffff),
+			  patch_err);
 	addr++;
 
 	/* ori r3,r3,(op)@l */
-	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
-			  ___PPC_RS(3) | (val & 0xffff));
+	PATCH_INSN_OR_GOTO(addr, PPC_INST_ORI | ___PPC_RA(3) |
+			  ___PPC_RS(3) | (val & 0xffff),
+			  patch_err);
+
+	return 0;
+
+patch_err:
+	return -EFAULT;
 }
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
@@ -216,30 +244,33 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 	 * be within 32MB on either side of the current instruction.
 	 */
 	b_offset = (unsigned long)buff - (unsigned long)p->addr;
-	if (!is_offset_in_branch_range(b_offset))
+	if (!is_offset_in_branch_range(b_offset)) {
+		rc = -ERANGE;
 		goto error;
+	}
 
 	/* Check if the return address is also within 32MB range */
 	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
 			(unsigned long)nip;
-	if (!is_offset_in_branch_range(b_offset))
+	if (!is_offset_in_branch_range(b_offset)) {
+		rc = -ERANGE;
 		goto error;
+	}
 
 	/* Setup template */
 	/* We can optimize this via patch_instruction_window later */
 	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
 	pr_devel("Copying template to %p, size %lu\n", buff, size);
-	for (i = 0; i < size; i++) {
-		rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
-		if (rc < 0)
-			goto error;
-	}
+	for (i = 0; i < size; i++)
+		PATCH_INSN_OR_GOTO(buff + i, *(optprobe_template_entry + i),
+				patch_err);
 
 	/*
 	 * Fixup the template with instructions to:
 	 * 1. load the address of the actual probepoint
 	 */
-	patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
+	if (patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX))
+		goto patch_err;
 
 	/*
 	 * 2. branch to optimized_callback() and emulate_step()
@@ -248,6 +279,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
 	if (!op_callback_addr || !emulate_step_addr) {
 		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
+		rc = -ERANGE;
 		goto error;
 	}
 
@@ -259,21 +291,29 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 				(unsigned long)emulate_step_addr,
 				BRANCH_SET_LINK);
 
-	if (!branch_op_callback || !branch_emulate_step)
+	if (!branch_op_callback || !branch_emulate_step) {
+		rc = -ERANGE;
 		goto error;
+	}
+
+	PATCH_INSN_OR_GOTO(buff + TMPL_CALL_HDLR_IDX, branch_op_callback,
+			patch_err);
 
-	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
-	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
+	PATCH_INSN_OR_GOTO(buff + TMPL_EMULATE_IDX, branch_emulate_step,
+			patch_err);
 
 	/*
 	 * 3. load instruction to be emulated into relevant register, and
 	 */
-	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
+	if (patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX))
+		goto patch_err;
 
 	/*
 	 * 4. branch back from trampoline
 	 */
-	patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
+	PATCH_INSN_OR_GOTO(buff + TMPL_RET_IDX,
+		create_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0),
+		patch_err);
 
 	flush_icache_range((unsigned long)buff,
 			   (unsigned long)(&buff[TMPL_END_IDX]));
@@ -282,9 +322,11 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 
 	return 0;
 
+patch_err:
+	rc = -EFAULT;
 error:
 	free_ppc_optinsn_slot(buff, 0);
-	return -ERANGE;
+	return rc;
 
 }
 
@@ -307,6 +349,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
 {
 	struct optimized_kprobe *op;
 	struct optimized_kprobe *tmp;
+	int rc;
 
 	list_for_each_entry_safe(op, tmp, oplist, list) {
 		/*
@@ -315,9 +358,13 @@ void arch_optimize_kprobes(struct list_head *oplist)
 		 */
 		memcpy(op->optinsn.copied_insn, op->kp.addr,
 					       RELATIVEJUMP_SIZE);
-		patch_instruction(op->kp.addr,
+		rc = patch_instruction(op->kp.addr,
 			create_branch((unsigned int *)op->kp.addr,
 				      (unsigned long)op->optinsn.insn, 0));
+		if (rc)
+			pr_err("%s:%d: Error patching instruction at 0x%pK: %d\n",
+					__func__, __LINE__,
+					(void *)(op->kp.addr), rc);
 		list_del_init(&op->list);
 	}
 }
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-04-28 14:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-28 14:03 [PATCH v2 0/2] powerpc: Enhance error handling with patch_instruction() Naveen N. Rao
2020-04-28 14:03 ` [PATCH v2 1/2] powerpc/ftrace: Simplify error checking when patching instructions Naveen N. Rao
2020-04-28 14:03 ` [PATCH v2 2/2] powerpc/kprobes: Check return value of patch_instruction() Naveen N. Rao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.