linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Li Bin <huawei.libin@huawei.com>
To: <rostedt@goodmis.org>, <mingo@kernel.org>, <jpoimboe@redhat.com>,
	<sjenning@redhat.com>, <jkosina@suse.cz>, <vojtech@suse.cz>,
	<catalin.marinas@arm.com>, <will.deacon@arm.com>,
	<masami.hiramatsu.pt@hitachi.com>
Cc: <live-patching@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>, <lizefan@huawei.com>,
	<felix.yang@huawei.com>, <guohanjun@huawei.com>,
	<xiexiuqi@huawei.com>, <huawei.libin@huawei.com>
Subject: [RFC PATCH 5/5] livepatch: arm64: support relocation in a module
Date: Thu, 28 May 2015 13:51:05 +0800	[thread overview]
Message-ID: <1432792265-24076-6-git-send-email-huawei.libin@huawei.com> (raw)
In-Reply-To: <1432792265-24076-1-git-send-email-huawei.libin@huawei.com>

From: Xie XiuQi <xiexiuqi@huawei.com>

This patch implement klp_write_module_reloc on arm64 platform.

Signed-off-by: Xie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: Li Bin <huawei.libin@huawei.com>
---
 arch/arm64/kernel/livepatch.c |    7 +-
 arch/arm64/kernel/module.c    |  355 +++++++++++++++++++++--------------------
 2 files changed, 186 insertions(+), 176 deletions(-)

diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c
index 2a55532..ad674f0 100644
--- a/arch/arm64/kernel/livepatch.c
+++ b/arch/arm64/kernel/livepatch.c
@@ -18,8 +18,11 @@
  */
 
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/livepatch.h>
 
+extern int static_relocate(struct module *mod, unsigned long type,
+			   void * loc, unsigned long value);
 /**
  * klp_write_module_reloc() - write a relocation in a module
  * @mod:	module in which the section to be modified is found
@@ -33,6 +36,6 @@
 int klp_write_module_reloc(struct module *mod, unsigned long type,
 			   unsigned long loc, unsigned long value)
 {
-	pr_err("lpc_write_module_reloc has not supported now\n");
-	return -ENOSYS;
+	/* Perform the static relocation. */
+	return static_relocate(mod, type, (void *)loc, value);
 }
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf410..7781241 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -193,6 +193,182 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
 	return 0;
 }
 
+int static_relocate(struct module *me, unsigned long type, void *loc,
+		    unsigned long val)
+{
+	int ovf = 0;
+	bool overflow_check = true;
+	/* Perform the static relocation. */
+	switch (type) {
+	/* Null relocations. */
+	case R_ARM_NONE:
+	case R_AARCH64_NONE:
+		ovf = 0;
+		break;
+
+		/* Data relocations. */
+	case R_AARCH64_ABS64:
+		overflow_check = false;
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
+		break;
+	case R_AARCH64_ABS32:
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
+		break;
+	case R_AARCH64_ABS16:
+		ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
+		break;
+	case R_AARCH64_PREL64:
+		overflow_check = false;
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
+		break;
+	case R_AARCH64_PREL32:
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
+		break;
+	case R_AARCH64_PREL16:
+		ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
+		break;
+
+		/* MOVW instruction relocations. */
+	case R_AARCH64_MOVW_UABS_G0_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G0:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G1_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G1:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G2_NC:
+		overflow_check = false;
+	case R_AARCH64_MOVW_UABS_G2:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_UABS_G3:
+		/* We're using the top bits so we can't overflow. */
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
+				      AARCH64_INSN_IMM_16);
+		break;
+	case R_AARCH64_MOVW_SABS_G0:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_SABS_G1:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_SABS_G2:
+		ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G0_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G0:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G1_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G1:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G2_NC:
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVK);
+		break;
+	case R_AARCH64_MOVW_PREL_G2:
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+	case R_AARCH64_MOVW_PREL_G3:
+		/* We're using the top bits so we can't overflow. */
+		overflow_check = false;
+		ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
+				      AARCH64_INSN_IMM_MOVNZ);
+		break;
+
+		/* Immediate instruction relocations. */
+	case R_AARCH64_LD_PREL_LO19:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+				     AARCH64_INSN_IMM_19);
+		break;
+	case R_AARCH64_ADR_PREL_LO21:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+				     AARCH64_INSN_IMM_ADR);
+		break;
+	case R_AARCH64_ADR_PREL_PG_HI21_NC:
+		overflow_check = false;
+	case R_AARCH64_ADR_PREL_PG_HI21:
+		ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+				     AARCH64_INSN_IMM_ADR);
+		break;
+	case R_AARCH64_ADD_ABS_LO12_NC:
+	case R_AARCH64_LDST8_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST16_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST32_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST64_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_LDST128_ABS_LO12_NC:
+		overflow_check = false;
+		ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
+				     AARCH64_INSN_IMM_12);
+		break;
+	case R_AARCH64_TSTBR14:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
+				     AARCH64_INSN_IMM_14);
+		break;
+	case R_AARCH64_CONDBR19:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
+				     AARCH64_INSN_IMM_19);
+		break;
+	case R_AARCH64_JUMP26:
+	case R_AARCH64_CALL26:
+		ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
+				     AARCH64_INSN_IMM_26);
+		break;
+
+	default:
+		pr_err("module %s: unsupported RELA relocation: %lu\n",
+			me->name, type);
+		return -ENOEXEC;
+	}
+
+	if (overflow_check && ovf == -ERANGE) {
+		pr_err("module %s: overflow in relocation type %lu val %lx\n",
+			me->name, type, val);
+		return -ENOEXEC;
+	}
+
+	return 0;
+}
+
 int apply_relocate_add(Elf64_Shdr *sechdrs,
 		       const char *strtab,
 		       unsigned int symindex,
@@ -200,12 +376,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		       struct module *me)
 {
 	unsigned int i;
-	int ovf;
-	bool overflow_check;
 	Elf64_Sym *sym;
 	void *loc;
 	u64 val;
 	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	int type, ret;
 
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 		/* loc corresponds to P in the AArch64 ELF document. */
@@ -219,182 +394,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		/* val corresponds to (S + A) in the AArch64 ELF document. */
 		val = sym->st_value + rel[i].r_addend;
 
+		type = ELF64_R_TYPE(rel[i].r_info);
 		/* Check for overflow by default. */
-		overflow_check = true;
-
-		/* Perform the static relocation. */
-		switch (ELF64_R_TYPE(rel[i].r_info)) {
-		/* Null relocations. */
-		case R_ARM_NONE:
-		case R_AARCH64_NONE:
-			ovf = 0;
-			break;
-
-		/* Data relocations. */
-		case R_AARCH64_ABS64:
-			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
-			break;
-		case R_AARCH64_ABS32:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
-			break;
-		case R_AARCH64_ABS16:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
-			break;
-		case R_AARCH64_PREL64:
-			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
-			break;
-		case R_AARCH64_PREL32:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
-			break;
-		case R_AARCH64_PREL16:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
-			break;
-
-		/* MOVW instruction relocations. */
-		case R_AARCH64_MOVW_UABS_G0_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G0:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G1_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G1:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G2_NC:
-			overflow_check = false;
-		case R_AARCH64_MOVW_UABS_G2:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_UABS_G3:
-			/* We're using the top bits so we can't overflow. */
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
-					      AARCH64_INSN_IMM_16);
-			break;
-		case R_AARCH64_MOVW_SABS_G0:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_SABS_G1:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_SABS_G2:
-			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G0_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G0:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G1_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G1:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G2_NC:
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVK);
-			break;
-		case R_AARCH64_MOVW_PREL_G2:
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-		case R_AARCH64_MOVW_PREL_G3:
-			/* We're using the top bits so we can't overflow. */
-			overflow_check = false;
-			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
-					      AARCH64_INSN_IMM_MOVNZ);
-			break;
-
-		/* Immediate instruction relocations. */
-		case R_AARCH64_LD_PREL_LO19:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
-			break;
-		case R_AARCH64_ADR_PREL_LO21:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
-					     AARCH64_INSN_IMM_ADR);
-			break;
-		case R_AARCH64_ADR_PREL_PG_HI21_NC:
-			overflow_check = false;
-		case R_AARCH64_ADR_PREL_PG_HI21:
-			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
-					     AARCH64_INSN_IMM_ADR);
-			break;
-		case R_AARCH64_ADD_ABS_LO12_NC:
-		case R_AARCH64_LDST8_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST16_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST32_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST64_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_LDST128_ABS_LO12_NC:
-			overflow_check = false;
-			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
-					     AARCH64_INSN_IMM_12);
-			break;
-		case R_AARCH64_TSTBR14:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
-					     AARCH64_INSN_IMM_14);
-			break;
-		case R_AARCH64_CONDBR19:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
-			break;
-		case R_AARCH64_JUMP26:
-		case R_AARCH64_CALL26:
-			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
-					     AARCH64_INSN_IMM_26);
-			break;
-
-		default:
-			pr_err("module %s: unsupported RELA relocation: %llu\n",
-			       me->name, ELF64_R_TYPE(rel[i].r_info));
-			return -ENOEXEC;
-		}
-
-		if (overflow_check && ovf == -ERANGE)
-			goto overflow;
-
+		ret = static_relocate(me, type, loc, val);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
-
-overflow:
-	pr_err("module %s: overflow in relocation type %d val %Lx\n",
-	       me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
-	return -ENOEXEC;
 }
 
 int module_finalize(const Elf_Ehdr *hdr,
-- 
1.7.1


  parent reply	other threads:[~2015-05-28  5:55 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-28  5:51 [RFC PATCH 0/5] livepatch: add support on arm64 Li Bin
2015-05-28  5:51 ` [RFC PATCH 1/5] livepatch: ftrace: arm64: Add support for DYNAMIC_FTRACE_WITH_REGS Li Bin
2015-05-29  7:14   ` Paul Bolle
2015-05-29  8:01     ` Li Bin
2015-05-28  5:51 ` [RFC PATCH 2/5] livepatch: ftrace: add ftrace_function_stub_ip function Li Bin
2015-05-28  5:51 ` [RFC PATCH 3/5] livepatch: ftrace: arm64: Add support for -mfentry on arm64 Li Bin
2015-05-28  5:51 ` [RFC PATCH 4/5] livepatch: arm64: add support for livepatch " Li Bin
2015-05-28  5:51 ` Li Bin [this message]
2015-05-29 11:52 ` [RFC PATCH 0/5] livepatch: add support " Jiri Kosina
2015-05-30  0:01 ` Masami Hiramatsu
2015-06-02  2:15   ` AKASHI Takahiro
2015-06-02 11:00     ` Li Bin
2015-06-02 21:04       ` Masami Hiramatsu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432792265-24076-6-git-send-email-huawei.libin@huawei.com \
    --to=huawei.libin@huawei.com \
    --cc=catalin.marinas@arm.com \
    --cc=felix.yang@huawei.com \
    --cc=guohanjun@huawei.com \
    --cc=jkosina@suse.cz \
    --cc=jpoimboe@redhat.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=live-patching@vger.kernel.org \
    --cc=lizefan@huawei.com \
    --cc=masami.hiramatsu.pt@hitachi.com \
    --cc=mingo@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=sjenning@redhat.com \
    --cc=vojtech@suse.cz \
    --cc=will.deacon@arm.com \
    --cc=xiexiuqi@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).