linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 0/5] livepatch: add support on arm64
@ 2015-05-28  5:51 Li Bin
  2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
                   ` (6 more replies)
  0 siblings, 7 replies; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

This patchset propose a method for gcc -mfentry feature(profile
before prologue) implementation for arm64, and propose the livepatch
implementation for arm64 based on this feature.
The gcc implementation about this feature will be post to the gcc 
community soon.

With this -mfentry feature, the entry of each function like:

foo:
    mov x9, x30 
    bl __fentry__
    mov x30, x9
    [prologue]
    ... 

The x9 is a callee corruptible register, and the __fentry__ function
is responsible to protect all registers, so it can be used to protect
the x30. And the added two instructions which is register mov operation
have ralatively small impact on performance.

This patchset has been tested on arm64 platform.

Li Bin (4):
  livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
  livepatch: ftrace: add ftrace_function_stub_ip function
  livepatch: ftrace: arm64: Add support for -mfentry on arm64
  livepatch: arm64: add support for livepatch on arm64

Xie XiuQi (1):
  livepatch: arm64: support relocation in a module

 arch/arm64/Kconfig                 |    5 +
 arch/arm64/include/asm/ftrace.h    |    9 +
 arch/arm64/include/asm/livepatch.h |   45 +++++
 arch/arm64/kernel/Makefile         |    1 +
 arch/arm64/kernel/arm64ksyms.c     |    4 +
 arch/arm64/kernel/entry-ftrace.S   |  154 +++++++++++++++-
 arch/arm64/kernel/ftrace.c         |   28 +++-
 arch/arm64/kernel/livepatch.c      |   41 ++++
 arch/arm64/kernel/module.c         |  355 ++++++++++++++++++------------------
 include/linux/ftrace.h             |    1 +
 kernel/livepatch/core.c            |   17 ++-
 kernel/trace/ftrace.c              |   32 ++++
 scripts/recordmcount.pl            |    2 +-
 13 files changed, 508 insertions(+), 186 deletions(-)
 create mode 100644 arch/arm64/include/asm/livepatch.h
 create mode 100644 arch/arm64/kernel/livepatch.c


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
@ 2015-05-28  5:51 ` Li Bin
  2015-05-29  7:14   ` Paul Bolle
  2015-05-28  5:51 ` [RFC PATCH 2/5] livepatch: ftrace: add ftrace_function_stub_ip function Li Bin
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

If ftrace_ops is registered with flag FTRACE_OPS_FL_SAVE_REGS, the
arch that support DYNAMIC_FTRACE_WITH_REGS will pass a full set of
pt_regs to the ftrace_ops callback function, which may read or change
the value of the pt_regs and the pt_regs will be restored to support
work flow redirection such as kernel live patching.

This patch adds DYNAMIC_FTRACE_WITH_REGS feature support for arm64
architecture.

Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 arch/arm64/Kconfig               |    1 +
 arch/arm64/include/asm/ftrace.h  |    4 ++
 arch/arm64/kernel/entry-ftrace.S |   95 ++++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/ftrace.c       |   28 ++++++++++-
 4 files changed, 126 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7796af4..ea435c9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -57,6 +57,7 @@ config ARM64
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_CONTIGUOUS
 	select HAVE_DYNAMIC_FTRACE
+	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_TRACER
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index c5534fa..a7722b9 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -16,6 +16,10 @@
 #define MCOUNT_ADDR		((unsigned long)_mcount)
 #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
 #ifndef __ASSEMBLY__
 #include <linux/compat.h>
 
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc5..fde793b 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -12,6 +12,8 @@
 #include <linux/linkage.h>
 #include <asm/ftrace.h>
 #include <asm/insn.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
 
 /*
  * Gcc with -pg will put the following code in the beginning of each function:
@@ -50,11 +52,37 @@
 	mov	x29, sp
 	.endm
 
+	/* save parameter registers & corruptible registers */
+	.macro save_mcount_regs
+	sub	sp, sp, #S_FRAME_SIZE
+	stp	x0, x1, [sp]
+	stp	x2, x3, [sp, #16]
+	stp	x4, x5, [sp, #32]
+	stp	x6, x7, [sp, #48]
+	stp	x8, x9, [sp, #64]
+	stp	x10, x11, [sp, #80]
+	stp	x12, x13, [sp, #96]
+	stp	x14, x15, [sp, #112]
+	.endm
+
 	.macro mcount_exit
 	ldp	x29, x30, [sp], #16
 	ret
 	.endm
 
+	/* restore parameter registers & corruptible registers */
+	.macro restore_mcount_regs
+	ldp	x0, x1, [sp]
+	ldp	x2, x3, [sp, #16]
+	ldp	x4, x5, [sp, #32]
+	ldp	x6, x7, [sp, #48]
+	ldp	x8, x9, [sp, #64]
+	ldp	x10, x11, [sp, #80]
+	ldp	x12, x13, [sp, #96]
+	ldp	x14, x15, [sp, #112]
+	add	sp, sp, #S_FRAME_SIZE
+	.endm
+
 	.macro mcount_adjust_addr rd, rn
 	sub	\rd, \rn, #AARCH64_INSN_SIZE
 	.endm
@@ -97,6 +125,7 @@
  */
 ENTRY(_mcount)
 	mcount_enter
+	save_mcount_regs
 
 	adrp	x0, ftrace_trace_function
 	ldr	x2, [x0, #:lo12:ftrace_trace_function]
@@ -110,8 +139,10 @@ ENTRY(_mcount)
 
 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
 skip_ftrace_call:			//   return;
+	restore_mcount_regs
 	mcount_exit			// }
 #else
+	restore_mcount_regs
 	mcount_exit			//   return;
 					// }
 skip_ftrace_call:
@@ -127,6 +158,7 @@ skip_ftrace_call:
 	cmp	x0, x2
 	b.ne	ftrace_graph_caller	//     ftrace_graph_caller();
 
+	restore_mcount_regs
 	mcount_exit
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 ENDPROC(_mcount)
@@ -153,15 +185,20 @@ ENDPROC(_mcount)
  */
 ENTRY(ftrace_caller)
 	mcount_enter
+	save_mcount_regs
 
+	adrp	x0, function_trace_op
+	ldr	x2, [x0, #:lo12:function_trace_op]
 	mcount_get_pc0	x0		//     function's pc
 	mcount_get_lr	x1		//     function's lr
+	mov	x3, #0
 
 	.global ftrace_call
 ftrace_call:				// tracer(pc, lr);
 	nop				// This will be replaced with "bl xxx"
 					// where xxx can be any kind of tracer.
 
+ftrace_return:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	.global ftrace_graph_call
 ftrace_graph_call:			// ftrace_graph_caller();
@@ -169,8 +206,65 @@ ftrace_graph_call:			// ftrace_graph_caller();
 					// "b ftrace_graph_caller"
 #endif
 
+	restore_mcount_regs
 	mcount_exit
 ENDPROC(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+	mcount_enter
+	save_mcount_regs
+
+	/* Save the rest of pt_regs */
+	stp	x16, x17, [sp, #128]
+	stp	x18, x19, [sp, #144]
+	stp	x20, x21, [sp, #160]
+	stp	x22, x23, [sp, #176]
+	stp	x24, x25, [sp, #192]
+	stp	x26, x27, [sp, #208]
+	stp	x28, x29, [sp, #224]
+	str	x30, [sp, #S_LR]
+
+	/* Save sp before profile calling */
+	add	x9, sp, #S_FRAME_SIZE + 16
+	str	x9, [sp, #S_SP]
+
+	/* PC of pt_regs saving lr, and can be changed by handler */
+	str x30, [sp, #S_PC]
+
+	/* Save flags */
+	mrs	x9, spsr_el1
+	str	x9, [sp, #S_PSTATE]
+
+	adrp	x0, function_trace_op
+	ldr	x2, [x0, #:lo12:function_trace_op]
+	mcount_get_pc0	x0		//     function's pc
+	mcount_get_lr	x1		//     function's lr
+	mov	x3, sp
+
+	.global ftrace_regs_call
+ftrace_regs_call:			// tracer(pc, lr);
+	nop				// This will be replaced with "bl xxx"
+					// where xxx can be any kind of tracer.
+	/* Handlers can change the PC */
+	ldr	x9, [sp, #S_PC]
+	str	x9, [x29, #8]
+
+	/* Restore the rest of pt_regs */
+	ldp	x16, x17, [sp, #128]
+	ldp	x18, x19, [sp, #144]
+	ldp	x20, x21, [sp, #160]
+	ldp	x22, x23, [sp, #176]
+	ldp	x24, x25, [sp, #192]
+	ldp	x26, x27, [sp, #208]
+	ldr	x28, [sp, #224]
+	/* x29 & x30 should be restored by mcount_exit*/
+
+	/* Restore flags */
+	ldr	x9, [sp, #S_PSTATE]
+	msr	spsr_el1, x9
+
+	b	ftrace_return
+ENDPROC(ftrace_regs_caller)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(ftrace_stub)
@@ -193,6 +287,7 @@ ENTRY(ftrace_graph_caller)
 	mcount_get_parent_fp	  x2	//     parent's fp
 	bl	prepare_ftrace_return	// prepare_ftrace_return(&lr, pc, fp)
 
+	restore_mcount_regs
 	mcount_exit
 ENDPROC(ftrace_graph_caller)
 
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index c851be7..07175bd 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -56,12 +56,24 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 {
 	unsigned long pc;
 	u32 new;
+	int ret;
 
 	pc = (unsigned long)&ftrace_call;
 	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
-					  AARCH64_INSN_BRANCH_LINK);
+			AARCH64_INSN_BRANCH_LINK);
 
-	return ftrace_modify_code(pc, 0, new, false);
+	ret = ftrace_modify_code(pc, 0, new, false);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	if (!ret) {
+		pc = (unsigned long)&ftrace_regs_call;
+		new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
+				AARCH64_INSN_BRANCH_LINK);
+
+		ret = ftrace_modify_code(pc, 0, new, false);
+	}
+#endif
+	return ret;
 }
 
 /*
@@ -78,6 +90,18 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 	return ftrace_modify_code(pc, old, new, true);
 }
 
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+		unsigned long addr)
+{
+	unsigned long pc = rec->ip;
+	u32 old, new;
+
+	old = aarch64_insn_gen_branch_imm(pc, old_addr, true);
+	new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+	return ftrace_modify_code(pc, old, new, true);
+}
+
 /*
  * Turn off the call to ftrace_caller() in instrumented function
  */
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [RFC PATCH 2/5] livepatch: ftrace: add ftrace_function_stub_ip function
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
  2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
@ 2015-05-28  5:51 ` Li Bin
  2015-05-28  5:51 ` [RFC PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64 Li Bin
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

The function of ftrace_function_stub_ip is to convert the function
address to the ftrace stub calling instruction address.

This is needed for the platform that the complier does not support
"profile before prologue" feature and the profile calling instruction
is not at begin of the function.

EXAMPLES:
...
stub_ip = ftrace_function_stub_ip(func_addr);
ftrace_set_filter_ip(&ftrace_ops, stub_ip, 0, 0);
register_ftrace_function(&ftrace_ops);
...

Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 include/linux/ftrace.h |    1 +
 kernel/trace/ftrace.c  |   32 ++++++++++++++++++++++++++++++++
 2 files changed, 33 insertions(+), 0 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da6029..38a2811 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -360,6 +360,7 @@ struct dyn_ftrace {
 int ftrace_force_update(void);
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 			 int remove, int reset);
+unsigned long ftrace_function_stub_ip(unsigned long addr);
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
 		       int len, int reset);
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4..4d8692c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4161,6 +4161,38 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
 
+/**
+ * ftrace_function_stub_ip - get the profile stub calling location by the
+ * function address. It is useful for the platform that doesn't place the
+ * function profiling call at the start of the function.
+ * @addr - the function address to get the stub ip
+ *
+ * It returns the corresponding profile stub calling location if founded, else
+ * return zero.
+ */
+unsigned long ftrace_function_stub_ip(unsigned long addr)
+{
+	struct ftrace_page *pg;
+	struct dyn_ftrace *rec;
+	unsigned long ret = 0;
+
+	mutex_lock(&ftrace_lock);
+
+	do_for_each_ftrace_rec(pg, rec) {
+		unsigned long offset;
+
+		if (kallsyms_lookup_size_offset(rec->ip, NULL, &offset)
+				&& addr + offset == rec->ip) {
+			ret = rec->ip;
+			goto out_unlock;
+		}
+	} while_for_each_ftrace_rec()
+out_unlock:
+	mutex_unlock(&ftrace_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ftrace_function_stub_ip);
+
 static int
 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 		 int reset, int enable)
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [RFC PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
  2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
  2015-05-28  5:51 ` [RFC PATCH 2/5] livepatch: ftrace: add ftrace_function_stub_ip function Li Bin
@ 2015-05-28  5:51 ` Li Bin
  2015-05-28  5:51 ` [RFC PATCH 4/5] livepatch: arm64: add support for livepatch " Li Bin
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

This patch depends on the compiler's mfentry feature for arm64 that
proposed by this patchset. If the kernel is compiled with this feature,
the entry of each function like:
   foo:
       mov x9, x30
       bl __fentry__
       mov x30, x9
When -mfentry is used, the call is to '__fentry__' and not '_mcount'
and is done before the function's stack frame is set up. So __fentry__
is responsibel to protect parameter registers and corruptible registers.

Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 arch/arm64/Kconfig               |    1 +
 arch/arm64/include/asm/ftrace.h  |    5 +++
 arch/arm64/kernel/arm64ksyms.c   |    4 ++
 arch/arm64/kernel/entry-ftrace.S |   59 +++++++++++++++++++++++++++++++++++--
 scripts/recordmcount.pl          |    2 +-
 5 files changed, 66 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ea435c9..7bb2468 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -60,6 +60,7 @@ config ARM64
 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
 	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FENTRY
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_GENERIC_DMA_COHERENT
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index a7722b9..08eab52 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -13,7 +13,11 @@
 
 #include <asm/insn.h>
 
+#ifdef CC_USING_FENTRY
+#define MCOUNT_ADDR		((unsigned long)__fentry__)
+#else
 #define MCOUNT_ADDR		((unsigned long)_mcount)
+#endif
 #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -24,6 +28,7 @@
 #include <linux/compat.h>
 
 extern void _mcount(unsigned long);
+extern void __fentry__(unsigned long);
 extern void *return_address(unsigned int);
 
 struct dyn_arch_ftrace {
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index a85843d..f0455d3 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -63,5 +63,9 @@ EXPORT_SYMBOL(change_bit);
 EXPORT_SYMBOL(test_and_change_bit);
 
 #ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_FENTRY
+EXPORT_SYMBOL(__fentry__);
+#else
 EXPORT_SYMBOL(_mcount);
 #endif
+#endif
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index fde793b..18cfe5b 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -93,27 +93,57 @@
 	ldr	\reg, [\reg]
 	.endm
 
+	/* for instrumented function's parent */
+	.macro fentry_get_parent_fp reg
+	ldr	\reg, [x29]
+	.endm
+
 	/* for instrumented function */
 	.macro mcount_get_pc0 reg
 	mcount_adjust_addr	\reg, x30
 	.endm
 
+	/* for instrumented function */
+	.macro fentry_get_pc0 reg
+	mcount_adjust_addr	\reg, x30
+	.endm
+
 	.macro mcount_get_pc reg
 	ldr	\reg, [x29, #8]
 	mcount_adjust_addr	\reg, \reg
 	.endm
 
+	.macro fentry_get_pc reg
+	ldr	\reg, [x29, #8]
+	mcount_adjust_addr	\reg, \reg
+	.endm
+
 	.macro mcount_get_lr reg
 	ldr	\reg, [x29]
 	ldr	\reg, [\reg, #8]
 	mcount_adjust_addr	\reg, \reg
 	.endm
 
+	.macro fentry_get_lr reg, base
+	ldr	\reg, [\base, #72]	//S_X9
+	mcount_adjust_addr	\reg, \reg
+	.endm
+
 	.macro mcount_get_lr_addr reg
 	ldr	\reg, [x29]
 	add	\reg, \reg, #8
 	.endm
 
+	.macro fentry_get_lr_addr reg, base
+	add	\reg, \base, #72	//S_X9
+	.endm
+
+#ifdef	CC_USING_FENTRY
+#define	function_hook	__fentry__
+#else
+#define	function_hook	_mcount
+#endif
+
 #ifndef CONFIG_DYNAMIC_FTRACE
 /*
  * void _mcount(unsigned long return_address)
@@ -123,7 +153,7 @@
  *     - tracer function to probe instrumented function's entry,
  *     - ftrace_graph_caller to set up an exit hook
  */
-ENTRY(_mcount)
+ENTRY(function_hook)
 	mcount_enter
 	save_mcount_regs
 
@@ -133,8 +163,13 @@ ENTRY(_mcount)
 	cmp	x0, x2			// if (ftrace_trace_function
 	b.eq	skip_ftrace_call	//     != ftrace_stub) {
 
+#ifdef CC_USING_FENTRY
+	fentry_get_pc	x0		//       function's pc
+	fentry_get_lr	x1, sp		//       function's lr (= parent's pc)
+#else
 	mcount_get_pc	x0		//       function's pc
 	mcount_get_lr	x1		//       function's lr (= parent's pc)
+#endif
 	blr	x2			//   (*ftrace_trace_function)(pc, lr);
 
 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
@@ -161,7 +196,7 @@ skip_ftrace_call:
 	restore_mcount_regs
 	mcount_exit
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-ENDPROC(_mcount)
+ENDPROC(function_hook)
 
 #else /* CONFIG_DYNAMIC_FTRACE */
 /*
@@ -170,9 +205,9 @@ ENDPROC(_mcount)
  * and later on, NOP to branch to ftrace_caller() when enabled or branch to
  * NOP when disabled per-function base.
  */
-ENTRY(_mcount)
+ENTRY(function_hook)
 	ret
-ENDPROC(_mcount)
+ENDPROC(function_hook)
 
 /*
  * void ftrace_caller(unsigned long return_address)
@@ -189,8 +224,13 @@ ENTRY(ftrace_caller)
 
 	adrp	x0, function_trace_op
 	ldr	x2, [x0, #:lo12:function_trace_op]
+#ifdef CC_USING_FENTRY
+	fentry_get_pc0	x0		//     function's pc
+	fentry_get_lr	x1, sp		//     function's lr
+#else
 	mcount_get_pc0	x0		//     function's pc
 	mcount_get_lr	x1		//     function's lr
+#endif
 	mov	x3, #0
 
 	.global ftrace_call
@@ -237,8 +277,13 @@ ENTRY(ftrace_regs_caller)
 
 	adrp	x0, function_trace_op
 	ldr	x2, [x0, #:lo12:function_trace_op]
+#ifdef CC_USING_FENTRY
+	fentry_get_pc0	x0		//     function's pc
+	fentry_get_lr	x1, sp		//     function's lr
+#else
 	mcount_get_pc0	x0		//     function's pc
 	mcount_get_lr	x1		//     function's lr
+#endif
 	mov	x3, sp
 
 	.global ftrace_regs_call
@@ -282,9 +327,15 @@ ENDPROC(ftrace_stub)
  * and run return_to_handler() later on its exit.
  */
 ENTRY(ftrace_graph_caller)
+#ifdef CC_USING_FENTRY
+	fentry_get_lr_addr	x0, sp	//     pointer to function's saved lr
+	fentry_get_pc		x1	//     function's pc
+	fentry_get_parent_fp	x2	//     parent's fp
+#else
 	mcount_get_lr_addr	  x0	//     pointer to function's saved lr
 	mcount_get_pc		  x1	//     function's pc
 	mcount_get_parent_fp	  x2	//     parent's fp
+#endif
 	bl	prepare_ftrace_return	// prepare_ftrace_return(&lr, pc, fp)
 
 	restore_mcount_regs
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 826470d..5020d96 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -279,7 +279,7 @@ if ($arch eq "x86_64") {
 } elsif ($arch eq "arm64") {
     $alignment = 3;
     $section_type = '%progbits';
-    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$";
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+(_mcount|__fentry__)\$";
     $type = ".quad";
 } elsif ($arch eq "ia64") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [RFC PATCH 4/5] livepatch: arm64: add support for livepatch on arm64
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
                   ` (2 preceding siblings ...)
  2015-05-28  5:51 ` [RFC PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64 Li Bin
@ 2015-05-28  5:51 ` Li Bin
  2015-05-28  5:51 ` [RFC PATCH 5/5] livepatch: arm64: support relocation in a module Li Bin
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

This patch add support for livepatch on arm64 based on the gcc -mfentry
feature and the ftrace DYNAMIC_FTRACE_WITH_REGS feature.

Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 arch/arm64/Kconfig                 |    3 ++
 arch/arm64/include/asm/livepatch.h |   45 ++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/Makefile         |    1 +
 arch/arm64/kernel/livepatch.c      |   38 ++++++++++++++++++++++++++++++
 kernel/livepatch/core.c            |   17 +++++++++----
 5 files changed, 99 insertions(+), 5 deletions(-)
 create mode 100644 arch/arm64/include/asm/livepatch.h
 create mode 100644 arch/arm64/kernel/livepatch.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7bb2468..8a3845c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -85,6 +85,7 @@ config ARM64
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
 	select HAVE_CONTEXT_TRACKING
+	select HAVE_LIVEPATCH
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -159,6 +160,8 @@ source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
+source "kernel/livepatch/Kconfig"
+
 menu "Platform selection"
 
 config ARCH_EXYNOS
diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h
new file mode 100644
index 0000000..1890413
--- /dev/null
+++ b/arch/arm64/include/asm/livepatch.h
@@ -0,0 +1,45 @@
+/*
+ * livepatch.h - arm64-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Li Bin <huawei.libin@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_ARM64_LIVEPATCH_H
+#define _ASM_ARM64_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+#ifndef CC_USING_FENTRY
+	return 1;
+#endif
+	return 0;
+}
+extern int klp_write_module_reloc(struct module *mod, unsigned long type,
+				  unsigned long loc, unsigned long value);
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->pc = pc;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif /* _ASM_ARM64_LIVEPATCH_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 426d076..30d307b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -23,6 +23,7 @@ arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o entry32.o		\
 					   ../../arm/kernel/opcodes.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
+arm64-obj-$(CONFIG_LIVEPATCH) += livepatch.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
 arm64-obj-$(CONFIG_SMP)			+= smp.o smp_spin_table.o topology.o
 arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c
new file mode 100644
index 0000000..2a55532
--- /dev/null
+++ b/arch/arm64/kernel/livepatch.c
@@ -0,0 +1,38 @@
+/*
+ * livepatch.c - arm64-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Li Bin <huawei.libin@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <asm/livepatch.h>
+
+/**
+ * klp_write_module_reloc() - write a relocation in a module
+ * @mod:	module in which the section to be modified is found
+ * @type:	ELF relocation type (see asm/elf.h)
+ * @loc:	address that the relocation should be written to
+ * @value:	relocation value (sym address + addend)
+ *
+ * This function writes a relocation to the specified location for
+ * a particular module.
+ */
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+			   unsigned long loc, unsigned long value)
+{
+	pr_err("lpc_write_module_reloc has not supported now\n");
+	return -ENOSYS;
+}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 284e269..945065f 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -321,6 +321,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 {
 	struct klp_ops *ops;
 	struct klp_func *func;
+	unsigned long new_ip;
 
 	ops = container_of(fops, struct klp_ops, fops);
 
@@ -330,7 +331,8 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 	if (WARN_ON_ONCE(!func))
 		goto unlock;
 
-	klp_arch_set_pc(regs, (unsigned long)func->new_func);
+	new_ip = ftrace_function_stub_ip((unsigned long)func->new_func);
+	klp_arch_set_pc(regs, new_ip);
 unlock:
 	rcu_read_unlock();
 }
@@ -338,6 +340,8 @@ unlock:
 static void klp_disable_func(struct klp_func *func)
 {
 	struct klp_ops *ops;
+	int ret;
+	unsigned long ip;
 
 	WARN_ON(func->state != KLP_ENABLED);
 	WARN_ON(!func->old_addr);
@@ -347,8 +351,9 @@ static void klp_disable_func(struct klp_func *func)
 		return;
 
 	if (list_is_singular(&ops->func_stack)) {
-		WARN_ON(unregister_ftrace_function(&ops->fops));
-		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
+		ip = ftrace_function_stub_ip(func->old_addr);
+	 	WARN_ON(unregister_ftrace_function(&ops->fops));
+		WARN_ON(ftrace_set_filter_ip(&ops->fops, ip, 1, 0));
 
 		list_del_rcu(&func->stack_node);
 		list_del(&ops->node);
@@ -364,6 +369,7 @@ static int klp_enable_func(struct klp_func *func)
 {
 	struct klp_ops *ops;
 	int ret;
+	unsigned long ip;
 
 	if (WARN_ON(!func->old_addr))
 		return -EINVAL;
@@ -387,7 +393,8 @@ static int klp_enable_func(struct klp_func *func)
 		INIT_LIST_HEAD(&ops->func_stack);
 		list_add_rcu(&func->stack_node, &ops->func_stack);
 
-		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+		ip = ftrace_function_stub_ip(func->old_addr);
+		ret = ftrace_set_filter_ip(&ops->fops, ip, 0, 0);
 		if (ret) {
 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 			       func->old_name, ret);
@@ -398,7 +405,7 @@ static int klp_enable_func(struct klp_func *func)
 		if (ret) {
 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 			       func->old_name, ret);
-			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+			ftrace_set_filter_ip(&ops->fops, ip, 1, 0);
 			goto err;
 		}
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [RFC PATCH 5/5] livepatch: arm64: support relocation in a module
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
                   ` (3 preceding siblings ...)
  2015-05-28  5:51 ` [RFC PATCH 4/5] livepatch: arm64: add support for livepatch " Li Bin
@ 2015-05-28  5:51 ` Li Bin
  2015-05-29 11:52 ` [RFC PATCH 0/5] livepatch: add support on arm64 Jiri Kosina
  2015-05-30  0:01 ` Masami Hiramatsu
  6 siblings, 0 replies; 13+ messages in thread
From: Li Bin @ 2015-05-28  5:51 UTC (permalink / raw)
  To: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, huawei.libin

From: Xie XiuQi <xiexiuqi@huawei.com>

This patch implement klp_write_module_reloc on arm64 platform.

Signed-off-by: Xie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 arch/arm64/kernel/livepatch.c |    7 +-
 arch/arm64/kernel/module.c    |  355 +++++++++++++++++++++--------------------
 2 files changed, 186 insertions(+), 176 deletions(-)

diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c
index 2a55532..ad674f0 100644
--- a/arch/arm64/kernel/livepatch.c
+++ b/arch/arm64/kernel/livepatch.c
@@ -18,8 +18,11 @@
  */
 
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/livepatch.h>
 
+extern int static_relocate(struct module *mod, unsigned long type,
+			   void * loc, unsigned long value);
 /**
  * klp_write_module_reloc() - write a relocation in a module
  * @mod:	module in which the section to be modified is found
@@ -33,6 +36,6 @@
 int klp_write_module_reloc(struct module *mod, unsigned long type,
 			   unsigned long loc, unsigned long value)
 {
-	pr_err("lpc_write_module_reloc has not supported now\n");
-	return -ENOSYS;
+	/* Perform the static relocation. */
+	return static_relocate(mod, type, (void *)loc, value);
 }
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf410..7781241 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -193,6 +193,182 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
 	return 0;
 }
 
+int static_relocate(struct module *me, unsigned long type, void *loc,
+		    unsigned long val)
+{
+	int ovf = 0;
+	bool overflow_check = true;
+	/* Perform the static relocation. */
+	switch (type) {
+	/* Null relocations. */
+	case R_ARM_NONE:
+	case R_AARCH64_NONE:
+		ovf = 0;
+		break;
+
+		/* Data relocations. */
+	case R_AARCH64_ABS64:
+		overflow_check = false;
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
+		break;
+	case R_AARCH64_ABS32:
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
+		break;
+	case R_AARCH64_ABS16:
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
+		break;
+	case R_AARCH64_PREL64:
+		overflow_check = false;
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
+		break;
+	case R_AARCH64_PREL32:
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
+		break;
+	case R_AARCH64_PREL16:
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
+		break;
+
+		/* MOVW instruction relocations. */
+	case R_AARCH64_MOVW_UABS_G0_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G0:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G1_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G1:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G2_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G2:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G3:
+		/* We're using the top bits so we can't overflow. */
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_SABS_G0:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_SABS_G1:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_SABS_G2:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G0_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G0:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G1_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G1:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G2_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G2:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G3:
+		/* We're using the top bits so we can't overflow. */
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+
+		/* Immediate instruction relocations. */
+	case R_AARCH64_LD_PREL_LO19:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+				     AARCH64_INSN_IMM_19);
+		break;
+	case R_AARCH64_ADR_PREL_LO21:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+				     AARCH64_INSN_IMM_ADR);
+		break;
+	case R_AARCH64_ADR_PREL_PG_HI21_NC:
+		overflow_check = false;
+	case R_AARCH64_ADR_PREL_PG_HI21:
+		ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+				     AARCH64_INSN_IMM_ADR);
+		break;
+	case R_AARCH64_ADD_ABS_LO12_NC:
+	case R_AARCH64_LDST8_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST16_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST32_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST64_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST128_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_TSTBR14:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
+				     AARCH64_INSN_IMM_14);
+		break;
+	case R_AARCH64_CONDBR19:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+				     AARCH64_INSN_IMM_19);
+		break;
+	case R_AARCH64_JUMP26:
+	case R_AARCH64_CALL26:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
+				     AARCH64_INSN_IMM_26);
+		break;
+
+	default:
+		pr_err("module %s: unsupported RELA relocation: %lu\n",
+			me->name, type);
+		return -ENOEXEC;
+	}
+
+	if (overflow_check && ovf == -ERANGE) {
+		pr_err("module %s: overflow in relocation type %lu val %lx\n",
+			me->name, type, val);
+		return -ENOEXEC;
+	}
+
+	return 0;
+}
+
 int apply_relocate_add(Elf64_Shdr *sechdrs,
 		       const char *strtab,
 		       unsigned int symindex,
@@ -200,12 +376,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		       struct module *me)
 {
 	unsigned int i;
-	int ovf;
-	bool overflow_check;
 	Elf64_Sym *sym;
 	void *loc;
 	u64 val;
 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	int type, ret;
 
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 		/* loc corresponds to P in the AArch64 ELF document. */
@@ -219,182 +394,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		/* val corresponds to (S + A) in the AArch64 ELF document. */
 		val = sym->st_value + rel[i].r_addend;
 
+		type = ELF64_R_TYPE(rel[i].r_info);
 		/* Check for overflow by default. */
-		overflow_check = true;
-
-		/* Perform the static relocation. */
-		switch (ELF64_R_TYPE(rel[i].r_info)) {
-		/* Null relocations. */
-		case R_ARM_NONE:
-		case R_AARCH64_NONE:
-			ovf = 0;
-			break;
-
-		/* Data relocations. */
-		case R_AARCH64_ABS64:
-			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
-			break;
-		case R_AARCH64_ABS32:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
-			break;
-		case R_AARCH64_ABS16:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
-			break;
-		case R_AARCH64_PREL64:
-			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
-			break;
-		case R_AARCH64_PREL32:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
-			break;
-		case R_AARCH64_PREL16:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
-			break;
-
-		/* MOVW instruction relocations. */
-		case R_AARCH64_MOVW_UABS_G0_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G0:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G1_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G1:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G2_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G2:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G3:
-			/* We're using the top bits so we can't overflow. */
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_SABS_G0:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_SABS_G1:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_SABS_G2:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G0_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G0:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G1_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G1:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G2_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G2:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G3:
-			/* We're using the top bits so we can't overflow. */
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-
-		/* Immediate instruction relocations. */
-		case R_AARCH64_LD_PREL_LO19:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
-			break;
-		case R_AARCH64_ADR_PREL_LO21:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
-					     AARCH64_INSN_IMM_ADR);
-			break;
-		case R_AARCH64_ADR_PREL_PG_HI21_NC:
-			overflow_check = false;
-		case R_AARCH64_ADR_PREL_PG_HI21:
-			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
-					     AARCH64_INSN_IMM_ADR);
-			break;
-		case R_AARCH64_ADD_ABS_LO12_NC:
-		case R_AARCH64_LDST8_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST16_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST32_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST64_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST128_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_TSTBR14:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
-					     AARCH64_INSN_IMM_14);
-			break;
-		case R_AARCH64_CONDBR19:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
-			break;
-		case R_AARCH64_JUMP26:
-		case R_AARCH64_CALL26:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
-					     AARCH64_INSN_IMM_26);
-			break;
-
-		default:
-			pr_err("module %s: unsupported RELA relocation: %llu\n",
-			       me->name, ELF64_R_TYPE(rel[i].r_info));
-			return -ENOEXEC;
-		}
-
-		if (overflow_check && ovf == -ERANGE)
-			goto overflow;
-
+		ret = static_relocate(me, type, loc, val);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
-
-overflow:
-	pr_err("module %s: overflow in relocation type %d val %Lx\n",
-	       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
-	return -ENOEXEC;
 }
 
 int module_finalize(const Elf_Ehdr *hdr,
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
  2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
@ 2015-05-29  7:14   ` Paul Bolle
  2015-05-29  8:01     ` Li Bin
  0 siblings, 1 reply; 13+ messages in thread
From: Paul Bolle @ 2015-05-29  7:14 UTC (permalink / raw)
  To: Li Bin
  Cc: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt, live-patching,
	linux-arm-kernel, linux-kernel, lizefan, felix.yang, guohanjun,
	xiexiuqi

On Thu, 2015-05-28 at 13:51 +0800, Li Bin wrote:
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig

>  	select HAVE_DYNAMIC_FTRACE
> +	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE

What's the point of "if HAVE_DYNAMIC_FTRACE" here? That test should
always evaluate to true, because HAVE_DYNAMIC_FTRACE is also selected.


Paul Bolle




^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
  2015-05-29  7:14   ` Paul Bolle
@ 2015-05-29  8:01     ` Li Bin
  0 siblings, 0 replies; 13+ messages in thread
From: Li Bin @ 2015-05-29  8:01 UTC (permalink / raw)
  To: Paul Bolle
  Cc: rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon, masami.hiramatsu.pt, live-patching,
	linux-arm-kernel, linux-kernel, lizefan, felix.yang, guohanjun,
	xiexiuqi

On 2015/5/29 15:14, Paul Bolle wrote:
> On Thu, 2015-05-28 at 13:51 +0800, Li Bin wrote:
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
> 
>>  	select HAVE_DYNAMIC_FTRACE
>> +	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
> 
> What's the point of "if HAVE_DYNAMIC_FTRACE" here? That test should
> always evaluate to true, because HAVE_DYNAMIC_FTRACE is also selected.
> 

Ah, I'll modify it.
Thanks!
	Li Bin

> 
> Paul Bolle
> 
> 
> 
> 
> .
> 



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 0/5] livepatch: add support on arm64
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
                   ` (4 preceding siblings ...)
  2015-05-28  5:51 ` [RFC PATCH 5/5] livepatch: arm64: support relocation in a module Li Bin
@ 2015-05-29 11:52 ` Jiri Kosina
  2015-05-30  0:01 ` Masami Hiramatsu
  6 siblings, 0 replies; 13+ messages in thread
From: Jiri Kosina @ 2015-05-29 11:52 UTC (permalink / raw)
  To: Li Bin
  Cc: rostedt, mingo, jpoimboe, sjenning, vojtech, catalin.marinas,
	will.deacon, masami.hiramatsu.pt, live-patching,
	linux-arm-kernel, linux-kernel, lizefan, felix.yang, guohanjun,
	xiexiuqi

On Thu, 28 May 2015, Li Bin wrote:

> This patchset propose a method for gcc -mfentry feature(profile
> before prologue) implementation for arm64, and propose the livepatch
> implementation for arm64 based on this feature.
> The gcc implementation about this feature will be post to the gcc 
> community soon.
> 
> With this -mfentry feature, the entry of each function like:
> 
> foo:
>     mov x9, x30 
>     bl __fentry__
>     mov x30, x9
>     [prologue]
>     ... 

Thanks a lot for working on arm64 support. I am putting this patchset on 
hold before gcc support is merged. Once that happens, and once the arm64 
parts are Acked by the arch maintainers, we could proceed with this patch.

-- 
Jiri Kosina
SUSE Labs

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 0/5] livepatch: add support on arm64
  2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
                   ` (5 preceding siblings ...)
  2015-05-29 11:52 ` [RFC PATCH 0/5] livepatch: add support on arm64 Jiri Kosina
@ 2015-05-30  0:01 ` Masami Hiramatsu
  2015-06-02  2:15   ` AKASHI Takahiro
  6 siblings, 1 reply; 13+ messages in thread
From: Masami Hiramatsu @ 2015-05-30  0:01 UTC (permalink / raw)
  To: Li Bin, rostedt, mingo, jpoimboe, sjenning, jkosina, vojtech,
	catalin.marinas, will.deacon
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, takahiro.akashi, David Long

On 2015/05/28 14:51, Li Bin wrote:
> This patchset propose a method for gcc -mfentry feature(profile
> before prologue) implementation for arm64, and propose the livepatch
> implementation for arm64 based on this feature.
> The gcc implementation about this feature will be post to the gcc 
> community soon.
> 
> With this -mfentry feature, the entry of each function like:
> 
> foo:
>     mov x9, x30 
>     bl __fentry__
>     mov x30, x9
>     [prologue]
>     ... 
> 
> The x9 is a callee corruptible register, and the __fentry__ function
> is responsible to protect all registers, so it can be used to protect
> the x30. And the added two instructions which is register mov operation
> have ralatively small impact on performance.

Hm, this implementation looks good to me :)
This also enables us to KPROBES_ON_FTRACE too.

Thanks,

> 
> This patchset has been tested on arm64 platform.
> 
> Li Bin (4):
>   livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
>   livepatch: ftrace: add ftrace_function_stub_ip function
>   livepatch: ftrace: arm64: Add support for -mfentry on arm64
>   livepatch: arm64: add support for livepatch on arm64
> 
> Xie XiuQi (1):
>   livepatch: arm64: support relocation in a module
> 
>  arch/arm64/Kconfig                 |    5 +
>  arch/arm64/include/asm/ftrace.h    |    9 +
>  arch/arm64/include/asm/livepatch.h |   45 +++++
>  arch/arm64/kernel/Makefile         |    1 +
>  arch/arm64/kernel/arm64ksyms.c     |    4 +
>  arch/arm64/kernel/entry-ftrace.S   |  154 +++++++++++++++-
>  arch/arm64/kernel/ftrace.c         |   28 +++-
>  arch/arm64/kernel/livepatch.c      |   41 ++++
>  arch/arm64/kernel/module.c         |  355 ++++++++++++++++++------------------
>  include/linux/ftrace.h             |    1 +
>  kernel/livepatch/core.c            |   17 ++-
>  kernel/trace/ftrace.c              |   32 ++++
>  scripts/recordmcount.pl            |    2 +-
>  13 files changed, 508 insertions(+), 186 deletions(-)
>  create mode 100644 arch/arm64/include/asm/livepatch.h
>  create mode 100644 arch/arm64/kernel/livepatch.c
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
> 


-- 
Masami HIRAMATSU
Linux Technology Research Center, System Productivity Research Dept.
Center for Technology Innovation - Systems Engineering
Hitachi, Ltd., Research & Development Group
E-mail: masami.hiramatsu.pt@hitachi.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 0/5] livepatch: add support on arm64
  2015-05-30  0:01 ` Masami Hiramatsu
@ 2015-06-02  2:15   ` AKASHI Takahiro
  2015-06-02 11:00     ` Li Bin
  0 siblings, 1 reply; 13+ messages in thread
From: AKASHI Takahiro @ 2015-06-02  2:15 UTC (permalink / raw)
  To: Masami Hiramatsu, Li Bin, rostedt, mingo, jpoimboe, sjenning,
	jkosina, vojtech, catalin.marinas, will.deacon
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, David Long

On 05/30/2015 09:01 AM, Masami Hiramatsu wrote:
> On 2015/05/28 14:51, Li Bin wrote:
>> This patchset propose a method for gcc -mfentry feature(profile
>> before prologue) implementation for arm64, and propose the livepatch
>> implementation for arm64 based on this feature.
>> The gcc implementation about this feature will be post to the gcc
>> community soon.
>>
>> With this -mfentry feature, the entry of each function like:
>>
>> foo:
>>      mov x9, x30
>>      bl __fentry__
>>      mov x30, x9
>>      [prologue]
>>      ...
>>
>> The x9 is a callee corruptible register, and the __fentry__ function
>> is responsible to protect all registers, so it can be used to protect
>> the x30. And the added two instructions which is register mov operation
>> have ralatively small impact on performance.
> 
> Hm, this implementation looks good to me :)
> This also enables us to KPROBES_ON_FTRACE too.

Even if x9 is a callee-saved register, there is no way to restore its original
value in setting up a pt_regs in ftrace_reg_caller.
It's not the right thing for KPROBES_ON_FTRACE, is it?

Saving Link register in stack is not a big deal since the overhead of ftrace
is much bigger.

-Takahiro AKASHI


> Thanks,
> 
>>
>> This patchset has been tested on arm64 platform.
>>
>> Li Bin (4):
>>    livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
>>    livepatch: ftrace: add ftrace_function_stub_ip function
>>    livepatch: ftrace: arm64: Add support for -mfentry on arm64
>>    livepatch: arm64: add support for livepatch on arm64
>>
>> Xie XiuQi (1):
>>    livepatch: arm64: support relocation in a module
>>
>>   arch/arm64/Kconfig                 |    5 +
>>   arch/arm64/include/asm/ftrace.h    |    9 +
>>   arch/arm64/include/asm/livepatch.h |   45 +++++
>>   arch/arm64/kernel/Makefile         |    1 +
>>   arch/arm64/kernel/arm64ksyms.c     |    4 +
>>   arch/arm64/kernel/entry-ftrace.S   |  154 +++++++++++++++-
>>   arch/arm64/kernel/ftrace.c         |   28 +++-
>>   arch/arm64/kernel/livepatch.c      |   41 ++++
>>   arch/arm64/kernel/module.c         |  355 ++++++++++++++++++------------------
>>   include/linux/ftrace.h             |    1 +
>>   kernel/livepatch/core.c            |   17 ++-
>>   kernel/trace/ftrace.c              |   32 ++++
>>   scripts/recordmcount.pl            |    2 +-
>>   13 files changed, 508 insertions(+), 186 deletions(-)
>>   create mode 100644 arch/arm64/include/asm/livepatch.h
>>   create mode 100644 arch/arm64/kernel/livepatch.c
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>> Please read the FAQ at  http://www.tux.org/lkml/
>>
> 
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH 0/5] livepatch: add support on arm64
  2015-06-02  2:15   ` AKASHI Takahiro
@ 2015-06-02 11:00     ` Li Bin
  2015-06-02 21:04       ` Masami Hiramatsu
  0 siblings, 1 reply; 13+ messages in thread
From: Li Bin @ 2015-06-02 11:00 UTC (permalink / raw)
  To: AKASHI Takahiro, Masami Hiramatsu, rostedt, mingo, jpoimboe,
	sjenning, jkosina, vojtech, catalin.marinas, will.deacon
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, David Long

On 2015/6/2 10:15, AKASHI Takahiro wrote:
> On 05/30/2015 09:01 AM, Masami Hiramatsu wrote:
>> On 2015/05/28 14:51, Li Bin wrote:
>>> This patchset propose a method for gcc -mfentry feature(profile
>>> before prologue) implementation for arm64, and propose the livepatch
>>> implementation for arm64 based on this feature.
>>> The gcc implementation about this feature will be post to the gcc
>>> community soon.
>>>
>>> With this -mfentry feature, the entry of each function like:
>>>
>>> foo:
>>>      mov x9, x30
>>>      bl __fentry__
>>>      mov x30, x9
>>>      [prologue]
>>>      ...
>>>
>>> The x9 is a callee corruptible register, and the __fentry__ function
>>> is responsible to protect all registers, so it can be used to protect
>>> the x30. And the added two instructions which is register mov operation
>>> have ralatively small impact on performance.
>>
>> Hm, this implementation looks good to me :)
>> This also enables us to KPROBES_ON_FTRACE too.
> 
> Even if x9 is a callee-saved register, there is no way to restore its original
> value in setting up a pt_regs in ftrace_reg_caller.

Hi, Takahiro AKASHI

Firstly, x9 is not a callee-saved but a caller-saved register(or being called
corruptible register).
Secondly, I think x9 is already protected properly, please reference the patch:
[PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
[PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64

> It's not the right thing for KPROBES_ON_FTRACE, is it?
> 
> Saving Link register in stack is not a big deal since the overhead of ftrace
> is much bigger.

Performance overhead is only one aspect of the problem, and more importantly,
even worse is that it would break the arm64 ABI rules.

Thanks,
	Li Bin
> 
> -Takahiro AKASHI
> 
> 
>> Thanks,
>>
>>>
>>> This patchset has been tested on arm64 platform.
>>>
>>> Li Bin (4):
>>>    livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
>>>    livepatch: ftrace: add ftrace_function_stub_ip function
>>>    livepatch: ftrace: arm64: Add support for -mfentry on arm64
>>>    livepatch: arm64: add support for livepatch on arm64
>>>
>>> Xie XiuQi (1):
>>>    livepatch: arm64: support relocation in a module
>>>
>>>   arch/arm64/Kconfig                 |    5 +
>>>   arch/arm64/include/asm/ftrace.h    |    9 +
>>>   arch/arm64/include/asm/livepatch.h |   45 +++++
>>>   arch/arm64/kernel/Makefile         |    1 +
>>>   arch/arm64/kernel/arm64ksyms.c     |    4 +
>>>   arch/arm64/kernel/entry-ftrace.S   |  154 +++++++++++++++-
>>>   arch/arm64/kernel/ftrace.c         |   28 +++-
>>>   arch/arm64/kernel/livepatch.c      |   41 ++++
>>>   arch/arm64/kernel/module.c         |  355 ++++++++++++++++++------------------
>>>   include/linux/ftrace.h             |    1 +
>>>   kernel/livepatch/core.c            |   17 ++-
>>>   kernel/trace/ftrace.c              |   32 ++++
>>>   scripts/recordmcount.pl            |    2 +-
>>>   13 files changed, 508 insertions(+), 186 deletions(-)
>>>   create mode 100644 arch/arm64/include/asm/livepatch.h
>>>   create mode 100644 arch/arm64/kernel/livepatch.c
>>>
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>> Please read the FAQ at  http://www.tux.org/lkml/
>>>
>>
>>
> --
> To unsubscribe from this list: send the line "unsubscribe live-patching" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> .
> 



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: Re: [RFC PATCH 0/5] livepatch: add support on arm64
  2015-06-02 11:00     ` Li Bin
@ 2015-06-02 21:04       ` Masami Hiramatsu
  0 siblings, 0 replies; 13+ messages in thread
From: Masami Hiramatsu @ 2015-06-02 21:04 UTC (permalink / raw)
  To: Li Bin, AKASHI Takahiro, rostedt, mingo, jpoimboe, sjenning,
	jkosina, vojtech, catalin.marinas, will.deacon
  Cc: live-patching, linux-arm-kernel, linux-kernel, lizefan,
	felix.yang, guohanjun, xiexiuqi, David Long

On 2015/06/02 20:00, Li Bin wrote:
> On 2015/6/2 10:15, AKASHI Takahiro wrote:
>> On 05/30/2015 09:01 AM, Masami Hiramatsu wrote:
>>> On 2015/05/28 14:51, Li Bin wrote:
>>>> This patchset propose a method for gcc -mfentry feature(profile
>>>> before prologue) implementation for arm64, and propose the livepatch
>>>> implementation for arm64 based on this feature.
>>>> The gcc implementation about this feature will be post to the gcc
>>>> community soon.
>>>>
>>>> With this -mfentry feature, the entry of each function like:
>>>>
>>>> foo:
>>>>      mov x9, x30
>>>>      bl __fentry__
>>>>      mov x30, x9
>>>>      [prologue]
>>>>      ...
>>>>
>>>> The x9 is a callee corruptible register, and the __fentry__ function
>>>> is responsible to protect all registers, so it can be used to protect
>>>> the x30. And the added two instructions which is register mov operation
>>>> have ralatively small impact on performance.
>>>
>>> Hm, this implementation looks good to me :)
>>> This also enables us to KPROBES_ON_FTRACE too.
>>
>> Even if x9 is a callee-saved register, there is no way to restore its original
>> value in setting up a pt_regs in ftrace_reg_caller.

Good point :)

> 
> Hi, Takahiro AKASHI
> 
> Firstly, x9 is not a callee-saved but a caller-saved register(or being called
> corruptible register).
> Secondly, I think x9 is already protected properly, please reference the patch:
> [PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
> [PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64

I guess he concern about the x9 value at the function entrance is lost. For
example, regs->x9 at the handler of ftrace_regs_call is always same as flags
(if I correctly understand).
If it is right, it should be documented in the commit log and
Documentation/trace/ftrace.txt. However, that is practically no problem,
since;
- x9 is caller saved register, so functions MUST not depend on its value.
 (this means ftrace handlers also should not expect any meaningful value
  in regs->x9)
- Even if a function is wrongly coded and access x9, it is always same as
 caller address (link register). easy to debug :)

So, finally, I think it's OK to use x9 for this purpose.

Thank you,

> 
>> It's not the right thing for KPROBES_ON_FTRACE, is it?
>>
>> Saving Link register in stack is not a big deal since the overhead of ftrace
>> is much bigger.
> 
> Performance overhead is only one aspect of the problem, and more importantly,
> even worse is that it would break the arm64 ABI rules.
> 
> Thanks,
> 	Li Bin
>>
>> -Takahiro AKASHI
>>
>>
>>> Thanks,
>>>
>>>>
>>>> This patchset has been tested on arm64 platform.
>>>>
>>>> Li Bin (4):
>>>>    livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS
>>>>    livepatch: ftrace: add ftrace_function_stub_ip function
>>>>    livepatch: ftrace: arm64: Add support for -mfentry on arm64
>>>>    livepatch: arm64: add support for livepatch on arm64
>>>>
>>>> Xie XiuQi (1):
>>>>    livepatch: arm64: support relocation in a module
>>>>
>>>>   arch/arm64/Kconfig                 |    5 +
>>>>   arch/arm64/include/asm/ftrace.h    |    9 +
>>>>   arch/arm64/include/asm/livepatch.h |   45 +++++
>>>>   arch/arm64/kernel/Makefile         |    1 +
>>>>   arch/arm64/kernel/arm64ksyms.c     |    4 +
>>>>   arch/arm64/kernel/entry-ftrace.S   |  154 +++++++++++++++-
>>>>   arch/arm64/kernel/ftrace.c         |   28 +++-
>>>>   arch/arm64/kernel/livepatch.c      |   41 ++++
>>>>   arch/arm64/kernel/module.c         |  355 ++++++++++++++++++------------------
>>>>   include/linux/ftrace.h             |    1 +
>>>>   kernel/livepatch/core.c            |   17 ++-
>>>>   kernel/trace/ftrace.c              |   32 ++++
>>>>   scripts/recordmcount.pl            |    2 +-
>>>>   13 files changed, 508 insertions(+), 186 deletions(-)
>>>>   create mode 100644 arch/arm64/include/asm/livepatch.h
>>>>   create mode 100644 arch/arm64/kernel/livepatch.c
>>>>


-- 
Masami HIRAMATSU
Linux Technology Research Center, System Productivity Research Dept.
Center for Technology Innovation - Systems Engineering
Hitachi, Ltd., Research & Development Group
E-mail: masami.hiramatsu.pt@hitachi.com

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2015-06-02 21:05 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
2015-05-29  7:14   ` Paul Bolle
2015-05-29  8:01     ` Li Bin
2015-05-28  5:51 ` [RFC PATCH 2/5] livepatch: ftrace: add ftrace_function_stub_ip function Li Bin
2015-05-28  5:51 ` [RFC PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64 Li Bin
2015-05-28  5:51 ` [RFC PATCH 4/5] livepatch: arm64: add support for livepatch " Li Bin
2015-05-28  5:51 ` [RFC PATCH 5/5] livepatch: arm64: support relocation in a module Li Bin
2015-05-29 11:52 ` [RFC PATCH 0/5] livepatch: add support on arm64 Jiri Kosina
2015-05-30  0:01 ` Masami Hiramatsu
2015-06-02  2:15   ` AKASHI Takahiro
2015-06-02 11:00     ` Li Bin
2015-06-02 21:04       ` Masami Hiramatsu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).