All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] MIPS: ftrace: Fix N32 save registers
@ 2021-01-31  8:14 Jinyang He
  2021-01-31  8:14 ` [PATCH 2/3] MIPS: ftrace: Combine ftrace_modify_code* into one function Jinyang He
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Jinyang He @ 2021-01-31  8:14 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar
  Cc: Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Jiaxun Yang,
	Tiezhu Yang

CONFIG_64BIT is confusing. N32 also pass parameters by a0~a7.

Signed-off-by: Jinyang He <hejinyang@loongson.cn>
---
 arch/mips/kernel/mcount.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index cff52b2..808257a 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -27,7 +27,7 @@
 	PTR_S	a1, PT_R5(sp)
 	PTR_S	a2, PT_R6(sp)
 	PTR_S	a3, PT_R7(sp)
-#ifdef CONFIG_64BIT
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
 	PTR_S	a4, PT_R8(sp)
 	PTR_S	a5, PT_R9(sp)
 	PTR_S	a6, PT_R10(sp)
@@ -42,7 +42,7 @@
 	PTR_L	a1, PT_R5(sp)
 	PTR_L	a2, PT_R6(sp)
 	PTR_L	a3, PT_R7(sp)
-#ifdef CONFIG_64BIT
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
 	PTR_L	a4, PT_R8(sp)
 	PTR_L	a5, PT_R9(sp)
 	PTR_L	a6, PT_R10(sp)
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/3] MIPS: ftrace: Combine ftrace_modify_code* into one function
  2021-01-31  8:14 [PATCH 1/3] MIPS: ftrace: Fix N32 save registers Jinyang He
@ 2021-01-31  8:14 ` Jinyang He
  2021-01-31  8:14 ` [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support Jinyang He
       [not found] ` <b1a5eae4-2032-4ace-aa48-a21893e47528@www.fastmail.com>
  2 siblings, 0 replies; 8+ messages in thread
From: Jinyang He @ 2021-01-31  8:14 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar
  Cc: Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Jiaxun Yang,
	Tiezhu Yang

Behavior of ftrace_modify_code_2r() is similar to ftrace_modify_code_2(),
just the order of modification changed. Add a new struct ftrace_insn
to combine ftrace_modify_code with above two functions. The function is
same with the original ftrace_modify_code when ftrace_insn.code[1]
is DONT_SET.

Signed-off-by: Jinyang He <hejinyang@loongson.cn>
---
 arch/mips/include/asm/ftrace.h |   3 +
 arch/mips/kernel/ftrace.c      | 157 +++++++++++++++++------------------------
 2 files changed, 68 insertions(+), 92 deletions(-)

diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index b463f2a..636c640 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -84,6 +84,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
 struct dyn_arch_ftrace {
 };
 
+struct ftrace_insn {
+	unsigned int code[2];
+};
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index f57e68f..fd6d1da 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -44,89 +44,70 @@ void arch_ftrace_update_code(int command)
 #define INSN_NOP 0x00000000	/* nop */
 #define INSN_JAL(addr)	\
 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
+#define DONT_SET 0xffffffff
+
+static struct ftrace_insn jal_ftrace_caller __read_mostly;
+static struct ftrace_insn la_mcount __read_mostly;
+static struct ftrace_insn nop_kernel __read_mostly;
+static struct ftrace_insn nop_module __read_mostly;
 
-static unsigned int insn_jal_ftrace_caller __read_mostly;
-static unsigned int insn_la_mcount[2] __read_mostly;
-static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static struct ftrace_insn j_ftrace_graph_caller __read_mostly;
+#endif
 
 static inline void ftrace_dyn_arch_init_insns(void)
 {
 	u32 *buf;
-	unsigned int v1;
+	unsigned int v1 = 3;
 
 	/* la v1, _mcount */
-	v1 = 3;
-	buf = (u32 *)&insn_la_mcount[0];
+	buf = (u32 *)&la_mcount;
 	UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+#ifdef CONFIG_64BIT
+	la_mcount.code[1] = DONT_SET;
+#endif
 
 	/* jal (ftrace_caller + 8), jump over the first two instruction */
-	buf = (u32 *)&insn_jal_ftrace_caller;
+	buf = (u32 *)&jal_ftrace_caller;
 	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+	jal_ftrace_caller.code[1] = DONT_SET;
+
+	nop_kernel.code[0] = INSN_NOP;
+	nop_module.code[0] = INSN_B_1F;
+
+#ifdef CONFIG_64BIT
+	nop_kernel.code[1] = DONT_SET;
+	nop_module.code[1] = DONT_SET;
+#else
+	nop_kernel.code[1] = INSN_NOP;
+	nop_module.code[1] = INSN_NOP;
+#endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	/* j ftrace_graph_caller */
-	buf = (u32 *)&insn_j_ftrace_graph_caller;
+	buf = (u32 *)&j_ftrace_graph_caller;
 	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
+	j_ftrace_graph_caller.code[1] = DONT_SET;
 #endif
 }
 
-static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
-{
-	int faulted;
-	mm_segment_t old_fs;
-
-	/* *(unsigned int *)ip = new_code; */
-	safe_store_code(new_code, ip, faulted);
-
-	if (unlikely(faulted))
-		return -EFAULT;
-
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	flush_icache_range(ip, ip + 8);
-	set_fs(old_fs);
-
-	return 0;
-}
-
-#ifndef CONFIG_64BIT
-static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
-				unsigned int new_code2)
-{
-	int faulted;
-	mm_segment_t old_fs;
-
-	safe_store_code(new_code1, ip, faulted);
-	if (unlikely(faulted))
-		return -EFAULT;
-
-	ip += 4;
-	safe_store_code(new_code2, ip, faulted);
-	if (unlikely(faulted))
-		return -EFAULT;
-
-	ip -= 4;
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	flush_icache_range(ip, ip + 8);
-	set_fs(old_fs);
-
-	return 0;
-}
-
-static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
-				 unsigned int new_code2)
+static int ftrace_modify_code(unsigned long ip, struct ftrace_insn insns)
 {
 	int faulted;
 	mm_segment_t old_fs;
 
-	ip += 4;
-	safe_store_code(new_code2, ip, faulted);
-	if (unlikely(faulted))
-		return -EFAULT;
+	if (insns.code[1] != DONT_SET) {
+		ip += 4;
+		safe_store_code(insns.code[1], ip, faulted);
+		if (unlikely(faulted))
+			return -EFAULT;
+		ip -= 4;
+	}
 
-	ip -= 4;
-	safe_store_code(new_code1, ip, faulted);
+	/* *(unsigned int *)ip = insns.code[0]; */
+	safe_store_code(insns.code[0], ip, faulted);
 	if (unlikely(faulted))
 		return -EFAULT;
 
@@ -137,7 +118,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
 
 	return 0;
 }
-#endif
 
 /*
  * The details about the calling site of mcount on MIPS
@@ -169,66 +149,55 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
  *				    1: offset = 4 instructions
  */
 
-#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
-
 int ftrace_make_nop(struct module *mod,
 		    struct dyn_ftrace *rec, unsigned long addr)
 {
-	unsigned int new;
+	struct ftrace_insn insns;
 	unsigned long ip = rec->ip;
 
 	/*
 	 * If ip is in kernel space, no long call, otherwise, long call is
 	 * needed.
 	 */
-	new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
-#ifdef CONFIG_64BIT
-	return ftrace_modify_code(ip, new);
-#else
-	/*
-	 * On 32 bit MIPS platforms, gcc adds a stack adjust
-	 * instruction in the delay slot after the branch to
-	 * mcount and expects mcount to restore the sp on return.
-	 * This is based on a legacy API and does nothing but
-	 * waste instructions so it's being removed at runtime.
-	 */
-	return ftrace_modify_code_2(ip, new, INSN_NOP);
-#endif
+	insns = core_kernel_text(ip) ? nop_kernel : nop_module;
+
+	return ftrace_modify_code(ip, insns);
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
-	unsigned int new;
+	struct ftrace_insn insns;
 	unsigned long ip = rec->ip;
 
-	new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+	insns = core_kernel_text(ip) ? jal_ftrace_caller : la_mcount;
 
-#ifdef CONFIG_64BIT
-	return ftrace_modify_code(ip, new);
-#else
-	return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
-						INSN_NOP : insn_la_mcount[1]);
-#endif
+	return ftrace_modify_code(ip, insns);
 }
 
 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
-	unsigned int new;
+	struct ftrace_insn insns;
 
-	new = INSN_JAL((unsigned long)func);
+	insns.code[0] = INSN_JAL((unsigned long)func);
+	insns.code[1] = DONT_SET;
 
-	return ftrace_modify_code(FTRACE_CALL_IP, new);
+	return ftrace_modify_code(FTRACE_CALL_IP, insns);
 }
 
 int __init ftrace_dyn_arch_init(void)
 {
+	struct ftrace_insn insns;
+
+	insns.code[0] = INSN_NOP;
+	insns.code[1] = DONT_SET;
+
 	/* Encode the instructions when booting */
 	ftrace_dyn_arch_init_insns();
 
 	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
-	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+	ftrace_modify_code(MCOUNT_ADDR, insns);
 
 	return 0;
 }
@@ -243,13 +212,17 @@ extern void ftrace_graph_call(void);
 
 int ftrace_enable_ftrace_graph_caller(void)
 {
-	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
-			insn_j_ftrace_graph_caller);
+	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, j_ftrace_graph_caller);
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
+	struct ftrace_insn insns;
+
+	insns.code[0] = INSN_NOP;
+	insns.code[1] = DONT_SET;
+
+	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, insns);
 }
 
 #endif	/* CONFIG_DYNAMIC_FTRACE */
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  2021-01-31  8:14 [PATCH 1/3] MIPS: ftrace: Fix N32 save registers Jinyang He
  2021-01-31  8:14 ` [PATCH 2/3] MIPS: ftrace: Combine ftrace_modify_code* into one function Jinyang He
@ 2021-01-31  8:14 ` Jinyang He
  2021-02-01 14:56   ` Steven Rostedt
       [not found] ` <b1a5eae4-2032-4ace-aa48-a21893e47528@www.fastmail.com>
  2 siblings, 1 reply; 8+ messages in thread
From: Jinyang He @ 2021-01-31  8:14 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar
  Cc: Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Jiaxun Yang,
	Tiezhu Yang

In the past, we have always used the address of _mcount as the address of
ftrace_caller. It reduces one ftrace_modify_code operation when do ftrace
on modules on 64Bit platform in this way. In order to provide
DYNAMIC_FTRACE_WITH_REGS, we have to take _mcount out of ftrace_caller and
add a new definition of _mcount. It is necessary to modify 2 instructions.
Also add the definition of ftrace_regs_caller. ftrace_regs_caller will
store and restore more registers. Of course, some functions in ftrace.c
also need to consider ftrace_regs_caller. Modify these functions and add
the related code of ftrace_regs_caller.

Signed-off-by: Jinyang He <hejinyang@loongson.cn>
---
 arch/mips/Kconfig              |   1 +
 arch/mips/include/asm/ftrace.h |   5 ++
 arch/mips/kernel/ftrace.c      | 159 ++++++++++++++++++++++++++++-------------
 arch/mips/kernel/mcount.S      | 137 +++++++++++++++++++++++++++++------
 4 files changed, 229 insertions(+), 73 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 62475fc..00d36dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -58,6 +58,7 @@ config MIPS
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_DMA_CONTIGUOUS
 	select HAVE_DYNAMIC_FTRACE
+	select HAVE_DYNAMIC_FTRACE_WITH_REGS
 	select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
 	select HAVE_EXIT_THREAD
 	select HAVE_FAST_GUP
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 636c640..8afd1bc 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -76,6 +76,11 @@ do {						\
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
 	return addr;
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index fd6d1da..890429a 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -49,40 +49,89 @@ void arch_ftrace_update_code(int command)
 #define DONT_SET 0xffffffff
 
 static struct ftrace_insn jal_ftrace_caller __read_mostly;
-static struct ftrace_insn la_mcount __read_mostly;
+static struct ftrace_insn la_ftrace_caller __read_mostly;
 static struct ftrace_insn nop_kernel __read_mostly;
 static struct ftrace_insn nop_module __read_mostly;
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+static struct ftrace_insn jal_ftrace_regs_caller __read_mostly;
+static struct ftrace_insn la_ftrace_regs_caller __read_mostly;
+#endif
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static struct ftrace_insn j_ftrace_graph_caller __read_mostly;
 #endif
 
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount		--> nop
+ *  sub sp, sp, 8	--> nop  (CONFIG_32BIT)
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ *  sub sp, sp, 8
+ *				    1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
+ * move at, ra
+ * jalr v1
+ *  nop | move $12, ra_address | sub sp, sp, 8
+ *				    1: offset = 4 instructions
+ */
+
 static inline void ftrace_dyn_arch_init_insns(void)
 {
 	u32 *buf;
 	unsigned int v1 = 3;
 
-	/* la v1, _mcount */
-	buf = (u32 *)&la_mcount;
-	UASM_i_LA(&buf, v1, MCOUNT_ADDR);
-#ifdef CONFIG_64BIT
-	la_mcount.code[1] = DONT_SET;
-#endif
+	/* la v1, ftrace_caller */
+	buf = (u32 *)&la_ftrace_caller;
+	UASM_i_LA(&buf, v1, FTRACE_ADDR);
 
-	/* jal (ftrace_caller + 8), jump over the first two instruction */
 	buf = (u32 *)&jal_ftrace_caller;
-	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+#ifdef CONFIG_32BIT
+	/* jal (ftrace_caller + 4), jump over the sp restore instruction */
+	uasm_i_jal(&buf, (FTRACE_ADDR + 4) & JUMP_RANGE_MASK);
+#else
+	uasm_i_jal(&buf, FTRACE_ADDR & JUMP_RANGE_MASK);
+#endif
 	jal_ftrace_caller.code[1] = DONT_SET;
 
 	nop_kernel.code[0] = INSN_NOP;
-	nop_module.code[0] = INSN_B_1F;
-
 #ifdef CONFIG_64BIT
 	nop_kernel.code[1] = DONT_SET;
-	nop_module.code[1] = DONT_SET;
 #else
 	nop_kernel.code[1] = INSN_NOP;
+#endif
+	nop_module.code[0] = INSN_B_1F;
 	nop_module.code[1] = INSN_NOP;
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	/* la v1, ftrace_regs_caller */
+	buf = (u32 *)&la_ftrace_regs_caller;
+	UASM_i_LA(&buf, v1, FTRACE_REGS_ADDR);
+
+	/* jal ftrace_regs_caller */
+	buf = (u32 *)&jal_ftrace_regs_caller;
+#ifdef CONFIG_32BIT
+	uasm_i_jal(&buf, (FTRACE_REGS_ADDR + 4) & JUMP_RANGE_MASK);
+#else
+	uasm_i_jal(&buf, FTRACE_REGS_ADDR & JUMP_RANGE_MASK);
+#endif
+	jal_ftrace_regs_caller.code[1] = DONT_SET;
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -119,36 +168,6 @@ static int ftrace_modify_code(unsigned long ip, struct ftrace_insn insns)
 	return 0;
 }
 
-/*
- * The details about the calling site of mcount on MIPS
- *
- * 1. For kernel:
- *
- * move at, ra
- * jal _mcount		--> nop
- *  sub sp, sp, 8	--> nop  (CONFIG_32BIT)
- *
- * 2. For modules:
- *
- * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
- *
- * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
- * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
- * move at, ra
- * move $12, ra_address
- * jalr v1
- *  sub sp, sp, 8
- *				    1: offset = 5 instructions
- * 2.2 For the Other situations
- *
- * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
- * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
- * move at, ra
- * jalr v1
- *  nop | move $12, ra_address | sub sp, sp, 8
- *				    1: offset = 4 instructions
- */
-
 int ftrace_make_nop(struct module *mod,
 		    struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -164,41 +183,79 @@ int ftrace_make_nop(struct module *mod,
 	return ftrace_modify_code(ip, insns);
 }
 
+static int __ftrace_make_call(unsigned long ip, unsigned long addr)
+{
+	u32 *buf;
+	struct ftrace_insn insns;
+	unsigned int v1 = 3;
+
+	if (core_kernel_text(ip)) {
+		/* PC-region */
+		if ((addr & ~JUMP_RANGE_MASK) != (ip & ~JUMP_RANGE_MASK))
+			return -EINVAL;
+
+		insns.code[0] = INSN_JAL(addr);
+		insns.code[1] = DONT_SET;
+	} else {
+		buf = (u32 *)&insns;
+		UASM_i_LA(&buf, v1, addr);
+	}
+
+	return ftrace_modify_code(ip, insns);
+}
+
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
 	struct ftrace_insn insns;
 	unsigned long ip = rec->ip;
 
-	insns = core_kernel_text(ip) ? jal_ftrace_caller : la_mcount;
+	if (addr == FTRACE_ADDR)
+		insns = core_kernel_text(ip) ? jal_ftrace_caller : la_ftrace_caller;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	else if (addr == FTRACE_REGS_ADDR)
+		insns = core_kernel_text(ip) ? jal_ftrace_regs_caller : la_ftrace_regs_caller;
+#endif
+	else
+		return __ftrace_make_call(ip, addr);
 
 	return ftrace_modify_code(ip, insns);
 }
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+			unsigned long addr)
+{
+	unsigned long ip = rec->ip;
+
+	return __ftrace_make_call(ip, addr);
+}
+#endif
+
 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
+	int faulted;
 	struct ftrace_insn insns;
 
 	insns.code[0] = INSN_JAL((unsigned long)func);
 	insns.code[1] = DONT_SET;
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define FTRACE_REGS_CALL_IP ((unsigned long)(&ftrace_regs_call))
+	faulted = ftrace_modify_code(FTRACE_REGS_CALL_IP, insns);
+	if (unlikely(faulted))
+		return -EFAULT;
+#endif
+
 	return ftrace_modify_code(FTRACE_CALL_IP, insns);
 }
 
 int __init ftrace_dyn_arch_init(void)
 {
-	struct ftrace_insn insns;
-
-	insns.code[0] = INSN_NOP;
-	insns.code[1] = DONT_SET;
-
 	/* Encode the instructions when booting */
 	ftrace_dyn_arch_init_insns();
 
-	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
-	ftrace_modify_code(MCOUNT_ADDR, insns);
-
 	return 0;
 }
 #endif	/* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 808257a..2c9c061 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -51,11 +51,83 @@
 	PTR_ADDIU	sp, PT_SIZE
 	.endm
 
+	.macro MCOUNT_SAVE_MORE_REGS
+	PTR_SUBU	sp, PT_SIZE
+	PTR_S	ra, PT_R31(sp)
+	PTR_S	AT, PT_R1(sp)
+	PTR_S	a0, PT_R4(sp)
+	PTR_S	a1, PT_R5(sp)
+	PTR_S	a2, PT_R6(sp)
+	PTR_S	a3, PT_R7(sp)
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+	PTR_S	a4, PT_R8(sp)
+	PTR_S	a5, PT_R9(sp)
+	PTR_S	a6, PT_R10(sp)
+	PTR_S	a7, PT_R11(sp)
+#endif
+	PTR_S	s0, PT_R16(sp)
+	PTR_S	s1, PT_R17(sp)
+	PTR_S	s2, PT_R18(sp)
+	PTR_S	s3, PT_R19(sp)
+	PTR_S	s4, PT_R20(sp)
+	PTR_S	s5, PT_R21(sp)
+	PTR_S	s6, PT_R22(sp)
+	PTR_S	s7, PT_R23(sp)
+	PTR_S	gp, PT_R28(sp)
+	PTR_S	sp, PT_R29(sp)
+	PTR_S	s8, PT_R30(sp)
+	.endm
+
+	.macro MCOUNT_RESTORE_MORE_REGS
+	PTR_L	ra, PT_R31(sp)
+	PTR_L	AT, PT_R1(sp)
+	PTR_L	v0, PT_R2(sp)
+	PTR_L	v1, PT_R3(sp)
+	PTR_L	a0, PT_R4(sp)
+	PTR_L	a1, PT_R5(sp)
+	PTR_L	a2, PT_R6(sp)
+	PTR_L	a3, PT_R7(sp)
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+	PTR_L	a4, PT_R8(sp)
+	PTR_L	a5, PT_R9(sp)
+	PTR_L	a6, PT_R10(sp)
+	PTR_L	a7, PT_R11(sp)
+#endif
+	PTR_L	s0, PT_R16(sp)
+	PTR_L	s1, PT_R17(sp)
+	PTR_L	s2, PT_R18(sp)
+	PTR_L	s3, PT_R19(sp)
+	PTR_L	s4, PT_R20(sp)
+	PTR_L	s5, PT_R21(sp)
+	PTR_L	s6, PT_R22(sp)
+	PTR_L	s7, PT_R23(sp)
+	PTR_L	gp, PT_R28(sp)
+	PTR_L	sp, PT_R29(sp)
+	PTR_L	s8, PT_R30(sp)
+	PTR_ADDIU	sp, PT_SIZE
+	.endm
+
 	.macro RETURN_BACK
 	jr ra
 	 move ra, AT
 	.endm
 
+	.macro is_in_module addr res temp1 temp2
+	PTR_LA   \res, _stext
+	sltu     \temp1, \addr, \res	/* temp1 = (addr < _stext) */
+	PTR_LA   \res, _etext
+	sltu     \temp2, \res, \addr	/* temp2 = (addr > _etext) */
+	or       \res, \temp1, \temp2
+	.endm
+
+	.macro adjust_module_callsite addr
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+	PTR_SUBU \addr, \addr, 16	/* arg1: adjust to module's recorded callsite */
+#else
+	PTR_SUBU \addr, \addr, 12
+#endif
+	.endm
+
 /*
  * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
  * the location of the parent's return address.
@@ -64,55 +136,76 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
-NESTED(ftrace_caller, PT_SIZE, ra)
-	.globl _mcount
-_mcount:
+NESTED(_mcount, PT_SIZE, ra)
 EXPORT_SYMBOL(_mcount)
-	b	ftrace_stub
+#ifdef CONFIG_32BIT
+	addiu sp, sp, 8
+#endif
+	.globl ftrace_stub
+ftrace_stub:
+	RETURN_BACK
+	END(_mcount)
+
+NESTED(ftrace_caller, PT_SIZE, ra)
 #ifdef CONFIG_32BIT
 	 addiu sp,sp,8
-#else
-	 nop
 #endif
 
-	/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
 	MCOUNT_SAVE_REGS
 #ifdef KBUILD_MCOUNT_RA_ADDRESS
 	PTR_S	MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
 #endif
 
 	PTR_SUBU a0, ra, 8	/* arg1: self address */
-	PTR_LA   t1, _stext
-	sltu     t2, a0, t1	/* t2 = (a0 < _stext) */
-	PTR_LA   t1, _etext
-	sltu     t3, t1, a0	/* t3 = (a0 > _etext) */
-	or       t1, t2, t3
+	is_in_module a0, t1, t8, t9
 	beqz     t1, ftrace_call
-	 nop
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-	PTR_SUBU a0, a0, 16	/* arg1: adjust to module's recorded callsite */
-#else
-	PTR_SUBU a0, a0, 12
-#endif
+	nop
+	adjust_module_callsite a0
 
 	.globl ftrace_call
 ftrace_call:
 	nop	/* a placeholder for the call to a real tracing function */
-	 move	a1, AT		/* arg2: parent's return address */
+	move	a1, AT		/* arg2: parent's return address */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	.globl ftrace_graph_call
 ftrace_graph_call:
 	nop
-	 nop
+	nop
 #endif
 
 	MCOUNT_RESTORE_REGS
-	.globl ftrace_stub
-ftrace_stub:
 	RETURN_BACK
 	END(ftrace_caller)
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+NESTED(ftrace_regs_caller, PT_SIZE, ra)
+#ifdef CONFIG_32BIT
+	addiu sp, sp, 8
+#endif
+	MCOUNT_SAVE_MORE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+	PTR_S	MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
+#endif
+
+	move     a2, zero		/* arg3: NULL */
+	move     a3, sp         /* arg4: fregs address */
+	PTR_SUBU a0, ra, 8		/* arg1: self address */
+	is_in_module a0, t1, t8, t9
+	beqz     t1, ftrace_regs_call
+	nop
+	adjust_module_callsite a0
+
+	.globl ftrace_regs_call
+ftrace_regs_call:
+	nop
+	move	a1, AT			/* arg2: parent's return address */
+
+	MCOUNT_RESTORE_REGS
+	RETURN_BACK
+	END(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
 #else	/* ! CONFIG_DYNAMIC_FTRACE */
 
 NESTED(_mcount, PT_SIZE, ra)
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] MIPS: ftrace: Fix N32 save registers
       [not found] ` <b1a5eae4-2032-4ace-aa48-a21893e47528@www.fastmail.com>
@ 2021-02-01  1:12   ` Jinyang He
  2021-02-01  4:03     ` Jiaxun Yang
  0 siblings, 1 reply; 8+ messages in thread
From: Jinyang He @ 2021-02-01  1:12 UTC (permalink / raw)
  To: Jiaxun Yang, Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar
  Cc: Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Tiezhu Yang

On 01/31/2021 06:38 PM, Jiaxun Yang wrote:

>
> On Sun, Jan 31, 2021, at 4:14 PM, Jinyang He wrote:
>> CONFIG_64BIT is confusing. N32 also pass parameters by a0~a7.
> Do we have NEW kernel build?
> CONFIG_64BIT assumed N64 as kernel ABI.
>
>
> -Jiaxun
Hi, Jiaxun,

Thank you for your reply, and now I know. Before that, I saw the macro
from arch/mips/include/asm/regdef.h and thought it needed to be modified
here. But that seems have no sence.
Please ignore this patch.

Thanks,
Jinyang

>> Signed-off-by: Jinyang He <hejinyang@loongson.cn>
>> ---
>>   arch/mips/kernel/mcount.S | 4 ++--
>>   1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
>> index cff52b2..808257a 100644
>> --- a/arch/mips/kernel/mcount.S
>> +++ b/arch/mips/kernel/mcount.S
>> @@ -27,7 +27,7 @@
>>   	PTR_S	a1, PT_R5(sp)
>>   	PTR_S	a2, PT_R6(sp)
>>   	PTR_S	a3, PT_R7(sp)
>> -#ifdef CONFIG_64BIT
>> +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
>>   	PTR_S	a4, PT_R8(sp)
>>   	PTR_S	a5, PT_R9(sp)
>>   	PTR_S	a6, PT_R10(sp)
>> @@ -42,7 +42,7 @@
>>   	PTR_L	a1, PT_R5(sp)
>>   	PTR_L	a2, PT_R6(sp)
>>   	PTR_L	a3, PT_R7(sp)
>> -#ifdef CONFIG_64BIT
>> +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
>>   	PTR_L	a4, PT_R8(sp)
>>   	PTR_L	a5, PT_R9(sp)
>>   	PTR_L	a6, PT_R10(sp)
>> -- 
>> 2.1.0
>>
>>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] MIPS: ftrace: Fix N32 save registers
  2021-02-01  1:12   ` [PATCH 1/3] MIPS: ftrace: Fix N32 save registers Jinyang He
@ 2021-02-01  4:03     ` Jiaxun Yang
  2021-02-13 15:17       ` Maciej W. Rozycki
  0 siblings, 1 reply; 8+ messages in thread
From: Jiaxun Yang @ 2021-02-01  4:03 UTC (permalink / raw)
  To: Jinyang He, Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar
  Cc: Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Tiezhu Yang



On Mon, Feb 1, 2021, at 9:12 AM, Jinyang He wrote:
> On 01/31/2021 06:38 PM, Jiaxun Yang wrote:
> 
> >
> > On Sun, Jan 31, 2021, at 4:14 PM, Jinyang He wrote:
> >> CONFIG_64BIT is confusing. N32 also pass parameters by a0~a7.
> > Do we have NEW kernel build?
> > CONFIG_64BIT assumed N64 as kernel ABI.
> >
> >
> > -Jiaxun
> Hi, Jiaxun,
> 
> Thank you for your reply, and now I know. Before that, I saw the macro
> from arch/mips/include/asm/regdef.h and thought it needed to be modified
> here. But that seems have no sence.
> Please ignore this patch.

I guess that's for uapi consideration.

Thanks.


- Jiaxun



> 
> Thanks,
> Jinyang
> 
> >> Signed-off-by: Jinyang He <hejinyang@loongson.cn>
> >> ---
> >>   arch/mips/kernel/mcount.S | 4 ++--
> >>   1 file changed, 2 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> >> index cff52b2..808257a 100644
> >> --- a/arch/mips/kernel/mcount.S
> >> +++ b/arch/mips/kernel/mcount.S
> >> @@ -27,7 +27,7 @@
> >>   	PTR_S	a1, PT_R5(sp)
> >>   	PTR_S	a2, PT_R6(sp)
> >>   	PTR_S	a3, PT_R7(sp)
> >> -#ifdef CONFIG_64BIT
> >> +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
> >>   	PTR_S	a4, PT_R8(sp)
> >>   	PTR_S	a5, PT_R9(sp)
> >>   	PTR_S	a6, PT_R10(sp)
> >> @@ -42,7 +42,7 @@
> >>   	PTR_L	a1, PT_R5(sp)
> >>   	PTR_L	a2, PT_R6(sp)
> >>   	PTR_L	a3, PT_R7(sp)
> >> -#ifdef CONFIG_64BIT
> >> +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
> >>   	PTR_L	a4, PT_R8(sp)
> >>   	PTR_L	a5, PT_R9(sp)
> >>   	PTR_L	a6, PT_R10(sp)
> >> -- 
> >> 2.1.0
> >>
> >>
> 
>

-- 
- Jiaxun

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  2021-01-31  8:14 ` [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support Jinyang He
@ 2021-02-01 14:56   ` Steven Rostedt
  2021-02-02 12:21     ` Jinyang He
  0 siblings, 1 reply; 8+ messages in thread
From: Steven Rostedt @ 2021-02-01 14:56 UTC (permalink / raw)
  To: Jinyang He
  Cc: Thomas Bogendoerfer, Ingo Molnar, Wu Zhangjin, linux-mips,
	linux-kernel, Huacai Chen, Jiaxun Yang, Tiezhu Yang

On Sun, 31 Jan 2021 16:14:38 +0800
Jinyang He <hejinyang@loongson.cn> wrote:

> In the past, we have always used the address of _mcount as the address of
> ftrace_caller. It reduces one ftrace_modify_code operation when do ftrace
> on modules on 64Bit platform in this way. In order to provide
> DYNAMIC_FTRACE_WITH_REGS, we have to take _mcount out of ftrace_caller and
> add a new definition of _mcount. It is necessary to modify 2 instructions.
> Also add the definition of ftrace_regs_caller. ftrace_regs_caller will
> store and restore more registers. Of course, some functions in ftrace.c
> also need to consider ftrace_regs_caller. Modify these functions and add
> the related code of ftrace_regs_caller.

Note, while you are making these changes, you may want to look at the new
feature of ftrace that has HAVE_DYNAMIC_FTRACE_WITH_ARGS.

I noticed that with x86 (and probably all other archs), you need to save
the arguments before calling the ftrace callbacks in the ftrace trampoline.
If done properly, this means that the callbacks should be able to access
the function arguments. What happens then, it structures the arguments in a
way that if the function was called with "WITH_REGS" set, its the full
pt_regs structure. Otherwise, it's a partial structure called "ftrace_regs".


See arch/x86/include/asm/ftrace.h for ftrace_regs.

Then the ftrace_regs is passed to the callback instead of pt_regs (for all
callbacks!).

If a callback has the REGS flag set (ftrace_caller_regs), then to get the
pt_regs, it needs to call:

	struct pt_regs *regs = arch_ftrace_get_regs(ftrace_regs);

Where arch_ftrace_get_regs() returns the full pt_regs if the callback was
called from a ftrace_caller_regs trampoline, otherwise it must return NULL.

The reason to return NULL is that we don't want callbacks using pt_regs,
thinking it's fully populated when it is not.

But if HAVE_DYNAMIC_FTRACE_ARGS is set, then all ftrace callbacks
(regardless of REGS flag being set) has access to the arguments from the
ftrace_regs.

-- Steve

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  2021-02-01 14:56   ` Steven Rostedt
@ 2021-02-02 12:21     ` Jinyang He
  0 siblings, 0 replies; 8+ messages in thread
From: Jinyang He @ 2021-02-02 12:21 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Thomas Bogendoerfer, Ingo Molnar, Wu Zhangjin, linux-mips,
	linux-kernel, Huacai Chen, Jiaxun Yang, Tiezhu Yang

On 02/01/2021 10:56 PM, Steven Rostedt wrote:

> On Sun, 31 Jan 2021 16:14:38 +0800
> Jinyang He <hejinyang@loongson.cn> wrote:
>
>> In the past, we have always used the address of _mcount as the address of
>> ftrace_caller. It reduces one ftrace_modify_code operation when do ftrace
>> on modules on 64Bit platform in this way. In order to provide
>> DYNAMIC_FTRACE_WITH_REGS, we have to take _mcount out of ftrace_caller and
>> add a new definition of _mcount. It is necessary to modify 2 instructions.
>> Also add the definition of ftrace_regs_caller. ftrace_regs_caller will
>> store and restore more registers. Of course, some functions in ftrace.c
>> also need to consider ftrace_regs_caller. Modify these functions and add
>> the related code of ftrace_regs_caller.
> Note, while you are making these changes, you may want to look at the new
> feature of ftrace that has HAVE_DYNAMIC_FTRACE_WITH_ARGS.
>
> I noticed that with x86 (and probably all other archs), you need to save
> the arguments before calling the ftrace callbacks in the ftrace trampoline.
> If done properly, this means that the callbacks should be able to access
> the function arguments. What happens then, it structures the arguments in a
> way that if the function was called with "WITH_REGS" set, its the full
> pt_regs structure. Otherwise, it's a partial structure called "ftrace_regs".
>
>
> See arch/x86/include/asm/ftrace.h for ftrace_regs.
>
> Then the ftrace_regs is passed to the callback instead of pt_regs (for all
> callbacks!).
>
> If a callback has the REGS flag set (ftrace_caller_regs), then to get the
> pt_regs, it needs to call:
>
> 	struct pt_regs *regs = arch_ftrace_get_regs(ftrace_regs);
>
> Where arch_ftrace_get_regs() returns the full pt_regs if the callback was
> called from a ftrace_caller_regs trampoline, otherwise it must return NULL.
>
> The reason to return NULL is that we don't want callbacks using pt_regs,
> thinking it's fully populated when it is not.
>
> But if HAVE_DYNAMIC_FTRACE_ARGS is set, then all ftrace callbacks
> (regardless of REGS flag being set) has access to the arguments from the
> ftrace_regs.
>
> -- Steve
Hi, Steve,

Thank you for your comment. It really helps a lot. It's time to learn more!


Thanks, :-)
Jinyang


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] MIPS: ftrace: Fix N32 save registers
  2021-02-01  4:03     ` Jiaxun Yang
@ 2021-02-13 15:17       ` Maciej W. Rozycki
  0 siblings, 0 replies; 8+ messages in thread
From: Maciej W. Rozycki @ 2021-02-13 15:17 UTC (permalink / raw)
  To: Jiaxun Yang
  Cc: Jinyang He, Thomas Bogendoerfer, Steven Rostedt, Ingo Molnar,
	Wu Zhangjin, linux-mips, linux-kernel, Huacai Chen, Tiezhu Yang

On Mon, 1 Feb 2021, Jiaxun Yang wrote:

> > Thank you for your reply, and now I know. Before that, I saw the macro
> > from arch/mips/include/asm/regdef.h and thought it needed to be modified
> > here. But that seems have no sence.
> > Please ignore this patch.
> 
> I guess that's for uapi consideration.

 Nope, it's not under arch/mips/include/uapi/, but this is a common MIPS 
header used across many projects (see the copyright notices at the top 
going back to 1985), so there has been no sense to make Linux-specific 
changes to it just for the sake of it given that it works as it stands.  
Don't fix what ain't broke.  Don't make changes just because you can.

  Maciej

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-02-13 15:18 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-31  8:14 [PATCH 1/3] MIPS: ftrace: Fix N32 save registers Jinyang He
2021-01-31  8:14 ` [PATCH 2/3] MIPS: ftrace: Combine ftrace_modify_code* into one function Jinyang He
2021-01-31  8:14 ` [PATCH 3/3] MIPS: ftrace: Add DYNAMIC_FTRACE_WITH_REGS support Jinyang He
2021-02-01 14:56   ` Steven Rostedt
2021-02-02 12:21     ` Jinyang He
     [not found] ` <b1a5eae4-2032-4ace-aa48-a21893e47528@www.fastmail.com>
2021-02-01  1:12   ` [PATCH 1/3] MIPS: ftrace: Fix N32 save registers Jinyang He
2021-02-01  4:03     ` Jiaxun Yang
2021-02-13 15:17       ` Maciej W. Rozycki

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.