linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism
@ 2022-10-24  7:04 Huacai Chen
  2022-10-24  7:04 ` [PATCH 2/2] LoongArch: Use alternative to optimize libraries Huacai Chen
  2022-10-24 12:51 ` [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Jinyang He
  0 siblings, 2 replies; 4+ messages in thread
From: Huacai Chen @ 2022-10-24  7:04 UTC (permalink / raw)
  To: Arnd Bergmann, Huacai Chen
  Cc: loongarch, linux-arch, Xuefeng Li, Guo Ren, Xuerui Wang,
	Jiaxun Yang, linux-kernel, Huacai Chen, Jun Yi

Introduce the "alternative" mechanism from ARM64 and x86 for LoongArch
to apply runtime patching. The main purpose of this patch is to provide
a framework. In future we can use this mechanism (i.e., the ALTERNATIVE
and ALTERNATIVE_2 macros) to optimize hotspot functions according to cpu
features.

Signed-off-by: Jun Yi <yijun@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
 arch/loongarch/include/asm/alternative-asm.h |  82 ++++++
 arch/loongarch/include/asm/alternative.h     | 176 ++++++++++++
 arch/loongarch/include/asm/bugs.h            |  15 ++
 arch/loongarch/include/asm/inst.h            |  10 +
 arch/loongarch/kernel/Makefile               |   2 +-
 arch/loongarch/kernel/alternative.c          | 266 +++++++++++++++++++
 arch/loongarch/kernel/module.c               |  16 ++
 arch/loongarch/kernel/setup.c                |   7 +
 arch/loongarch/kernel/vmlinux.lds.S          |  12 +
 9 files changed, 585 insertions(+), 1 deletion(-)
 create mode 100644 arch/loongarch/include/asm/alternative-asm.h
 create mode 100644 arch/loongarch/include/asm/alternative.h
 create mode 100644 arch/loongarch/include/asm/bugs.h
 create mode 100644 arch/loongarch/kernel/alternative.c

diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..f0f32ace29b1
--- /dev/null
+++ b/arch/loongarch/include/asm/alternative-asm.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ALTERNATIVE_ASM_H
+#define _ASM_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm.h>
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro altinstruction_entry orig alt feature orig_len alt_len
+	.long \orig - .
+	.long \alt - .
+	.2byte \feature
+	.byte \orig_len
+	.byte \alt_len
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".fill" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+140 :
+	\oldinstr
+141 :
+	.fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000
+142 :
+
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f
+	.popsection
+
+	.subsection 1
+143 :
+	\newinstr
+144 :
+	.previous
+.endm
+
+#define old_len			(141b-140b)
+#define new_len1		(144f-143f)
+#define new_len2		(145f-144f)
+
+#define alt_max_short(a, b)	((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+
+/*
+ * Same as ALTERNATIVE macro above but for two alternatives. If CPU
+ * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
+ * @feature2, it replaces @oldinstr with @feature2.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+140 :
+	\oldinstr
+141 :
+	.fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
+		(alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000
+142 :
+
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b
+	altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b
+	.popsection
+
+	.subsection 1
+143 :
+	\newinstr1
+144 :
+	\newinstr2
+145 :
+	.previous
+.endm
+
+#endif  /*  __ASSEMBLY__  */
+
+#endif /* _ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h
new file mode 100644
index 000000000000..b4fe66c7067e
--- /dev/null
+++ b/arch/loongarch/include/asm/alternative.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ALTERNATIVE_H
+#define _ASM_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <asm/asm.h>
+
+struct alt_instr {
+	s32 instr_offset;	/* offset to original instruction */
+	s32 replace_offset;	/* offset to replacement instruction */
+	u16 feature;		/* feature bit set for replacement */
+	u8  instrlen;		/* length of original instruction */
+	u8  replacementlen;	/* length of new instruction */
+} __packed;
+
+/*
+ * Debug flag that can be tested to see whether alternative
+ * instructions were patched in already:
+ */
+extern int alternatives_patched;
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+extern void alternative_instructions(void);
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+#define b_replacement(num)	"664"#num
+#define e_replacement(num)	"665"#num
+
+#define alt_end_marker		"663"
+#define alt_slen		"662b-661b"
+#define alt_total_slen		alt_end_marker"b-661b"
+#define alt_rlen(num)		e_replacement(num)"f-"b_replacement(num)"f"
+
+#define __OLDINSTR(oldinstr, num)					\
+	"661:\n\t" oldinstr "\n662:\n"					\
+	".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * "		\
+		"((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n"
+
+#define OLDINSTR(oldinstr, num)						\
+	__OLDINSTR(oldinstr, num)					\
+	alt_end_marker ":\n"
+
+#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
+
+/*
+ * Pad the second replacement alternative with additional NOPs if it is
+ * additionally longer than the first replacement alternative.
+ */
+#define OLDINSTR_2(oldinstr, num1, num2) \
+	"661:\n\t" oldinstr "\n662:\n"								\
+	".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * "	\
+		"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, "	\
+		"4, 0x03400000\n"	\
+	alt_end_marker ":\n"
+
+#define ALTINSTR_ENTRY(feature, num)					      \
+	" .long 661b - .\n"				/* label           */ \
+	" .long " b_replacement(num)"f - .\n"		/* new instruction */ \
+	" .2byte " __stringify(feature) "\n"		/* feature bit     */ \
+	" .byte " alt_total_slen "\n"			/* source len      */ \
+	" .byte " alt_rlen(num) "\n"			/* replacement len */
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, num)	/* replacement */     \
+	b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, newinstr, feature)			\
+	OLDINSTR(oldinstr, 1)						\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(feature, 1)					\
+	".popsection\n"							\
+	".subsection 1\n" \
+	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
+	".previous\n"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+	OLDINSTR_2(oldinstr, 1, 2)					\
+	".pushsection .altinstructions,\"a\"\n"				\
+	ALTINSTR_ENTRY(feature1, 1)					\
+	ALTINSTR_ENTRY(feature2, 2)					\
+	".popsection\n"							\
+	".subsection 1\n" \
+	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
+	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
+	".previous\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature)			\
+	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory"))
+
+#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
+	(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement make sure to pad to the worst case length.
+ * Leaving an unused argument 0 to keep API compatibility.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...)	\
+	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)		\
+		: : "i" (0), ## input))
+
+/*
+ * This is similar to alternative_input. But it has two features and
+ * respective instructions.
+ *
+ * If CPU has feature2, newinstr2 is used.
+ * Otherwise, if CPU has feature1, newinstr1 is used.
+ * Otherwise, oldinstr is used.
+ */
+#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2,	     \
+			   feature2, input...)				     \
+	(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1,	     \
+		newinstr2, feature2)					     \
+		: : "i" (0), ## input))
+
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, newinstr, feature, output, input...)	\
+	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)		\
+		: output : "i" (0), ## input))
+
+/* Like alternative_io, but for replacing a direct call with another one. */
+#define alternative_call(oldfunc, newfunc, feature, output, input...)	\
+	(asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
+		: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input))
+
+/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2,   \
+			   output, input...)				      \
+	(asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+		"call %P[new2]", feature2)				      \
+		: output, ASM_CALL_CONSTRAINT				      \
+		: [old] "i" (oldfunc), [new1] "i" (newfunc1),		      \
+		  [new2] "i" (newfunc2), ## input))
+
+/*
+ * use this macro(s) if you need more than one output parameter
+ * in alternative_io
+ */
+#define ASM_OUTPUT2(a...) a
+
+/*
+ * use this macro if you need clobbers but no inputs in
+ * alternative_{input,io,call}()
+ */
+#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ALTERNATIVE_H */
diff --git a/arch/loongarch/include/asm/bugs.h b/arch/loongarch/include/asm/bugs.h
new file mode 100644
index 000000000000..651fffe1f743
--- /dev/null
+++ b/arch/loongarch/include/asm/bugs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Copyright (C) 2020-2021 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_BUGS_H
+#define _ASM_BUGS_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+extern void check_bugs(void);
+
+#endif /* _ASM_BUGS_H */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 889d6c9fc2b6..bd4c116aa73d 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -8,6 +8,7 @@
 #include <linux/types.h>
 #include <asm/asm.h>
 
+#define INSN_NOP		0x03400000
 #define INSN_BREAK		0x002a0000
 
 #define ADDR_IMMMASK_LU52ID	0xFFF0000000000000
@@ -28,6 +29,7 @@ enum reg0i26_op {
 enum reg1i20_op {
 	lu12iw_op	= 0x0a,
 	lu32id_op	= 0x0b,
+	pcaddi_op	= 0x0c,
 	pcaddu12i_op	= 0x0e,
 	pcaddu18i_op	= 0x0f,
 };
@@ -35,6 +37,8 @@ enum reg1i20_op {
 enum reg1i21_op {
 	beqz_op		= 0x10,
 	bnez_op		= 0x11,
+	bceqz_op	= 0x12,
+	bcnez_op	= 0x12,
 };
 
 enum reg2_op {
@@ -315,6 +319,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit)
 	return val & (1UL << (bit - 1));
 }
 
+static inline bool is_pc_ins(union loongarch_instruction *ip)
+{
+	return ip->reg1i20_format.opcode >= pcaddi_op &&
+			ip->reg1i20_format.opcode <= pcaddu18i_op;
+}
+
 static inline bool is_branch_ins(union loongarch_instruction *ip)
 {
 	return ip->reg1i21_format.opcode >= beqz_op &&
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 2ad2555b53ea..86744531b100 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -8,7 +8,7 @@ extra-y		:= vmlinux.lds
 obj-y		+= head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
 		   traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
 		   elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
-		   unaligned.o
+		   alternative.o unaligned.o
 
 obj-$(CONFIG_ACPI)		+= acpi.o
 obj-$(CONFIG_EFI) 		+= efi.o
diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
new file mode 100644
index 000000000000..43434150b853
--- /dev/null
+++ b/arch/loongarch/kernel/alternative.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+#include <asm/sections.h>
+
+int __read_mostly alternatives_patched;
+
+EXPORT_SYMBOL_GPL(alternatives_patched);
+
+#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
+
+static int __initdata_or_module debug_alternative;
+
+static int __init debug_alt(char *str)
+{
+	debug_alternative = 1;
+	return 1;
+}
+__setup("debug-alternative", debug_alt);
+
+#define DPRINTK(fmt, args...)						\
+do {									\
+	if (debug_alternative)						\
+		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
+} while (0)
+
+#define DUMP_WORDS(buf, count, fmt, args...)				\
+do {									\
+	if (unlikely(debug_alternative)) {				\
+		int _j;							\
+		union loongarch_instruction *_buf = buf;		\
+									\
+		if (!(count))						\
+			break;						\
+									\
+		printk(KERN_DEBUG fmt, ##args);				\
+		for (_j = 0; _j < count - 1; _j++)			\
+			printk(KERN_CONT "<%08x> ", _buf[_j].word);	\
+		printk(KERN_CONT "<%08x>\n", _buf[_j].word);		\
+	}								\
+} while (0)
+
+#define __SIGNEX(X, SIDX) ((X) >= (1 << SIDX) ? ~((1 << SIDX) - 1) | (X) : (X))
+#define SIGNEX16(X) __SIGNEX(((unsigned long)(X)), 15)
+#define SIGNEX20(X) __SIGNEX(((unsigned long)(X)), 19)
+#define SIGNEX21(X) __SIGNEX(((unsigned long)(X)), 20)
+#define SIGNEX26(X) __SIGNEX(((unsigned long)(X)), 25)
+
+static inline unsigned long bs_dest_16(unsigned long now, unsigned int si)
+{
+	return now + (SIGNEX16(si) << 2);
+}
+
+static inline unsigned long bs_dest_21(unsigned long now, unsigned int h, unsigned int l)
+{
+	return now + (SIGNEX21(h << 16 | l) << 2);
+}
+
+static inline unsigned long bs_dest_26(unsigned long now, unsigned int h, unsigned int l)
+{
+	return now + (SIGNEX26(h << 16 | l) << 2);
+}
+
+/* Use this to add nops to a buffer, then text_poke the whole buffer. */
+static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
+{
+	while (count--) {
+		insn->word = INSN_NOP;
+		insn++;
+	}
+}
+
+/* Is the jump addr in local .altinstructions */
+static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
+{
+	return jump >= (unsigned long)start && jump < (unsigned long)end;
+}
+
+static void __init_or_module recompute_jump(union loongarch_instruction *buf,
+		union loongarch_instruction *dest, union loongarch_instruction *src,
+		void *start, void *end)
+{
+	unsigned int si, si_l, si_h;
+	unsigned long cur_pc, jump_addr, pc;
+	long offset;
+
+	cur_pc = (unsigned long)src;
+	pc = (unsigned long)dest;
+
+	si_l = src->reg0i26_format.immediate_l;
+	si_h = src->reg0i26_format.immediate_h;
+	switch (src->reg0i26_format.opcode) {
+	case b_op:
+	case bl_op:
+		jump_addr = bs_dest_26(cur_pc, si_h, si_l);
+		if (in_alt_jump(jump_addr, start, end))
+			return;
+		offset = jump_addr - pc;
+		BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+		offset >>= 2;
+		buf->reg0i26_format.immediate_h = offset >> 16;
+		buf->reg0i26_format.immediate_l = offset;
+		return;
+	}
+
+	si_l = src->reg1i21_format.immediate_l;
+	si_h = src->reg1i21_format.immediate_h;
+	switch (src->reg1i21_format.opcode) {
+	case beqz_op:
+	case bnez_op:
+	case bceqz_op:
+		jump_addr = bs_dest_21(cur_pc, si_h, si_l);
+		if (in_alt_jump(jump_addr, start, end))
+			return;
+		offset = jump_addr - pc;
+		BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
+		offset >>= 2;
+		buf->reg1i21_format.immediate_h = offset >> 16;
+		buf->reg1i21_format.immediate_l = offset;
+		return;
+	}
+
+	si = src->reg2i16_format.immediate;
+	switch (src->reg2i16_format.opcode) {
+	case beq_op:
+	case bne_op:
+	case blt_op:
+	case bge_op:
+	case bltu_op:
+	case bgeu_op:
+		jump_addr = bs_dest_16(cur_pc, si);
+		if (in_alt_jump(jump_addr, start, end))
+			return;
+		offset = jump_addr - pc;
+		BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
+		offset >>= 2;
+                buf->reg2i16_format.immediate = offset;
+		return;
+	}
+}
+
+static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
+	union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		buf[i].word = src[i].word;
+
+		if (is_branch_ins(&src[i]) &&
+		    src[i].reg2i16_format.opcode != jirl_op) {
+			recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
+		} else if (is_pc_ins(&src[i])) {
+			pr_err("Not support pcrel instruction at present!");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * text_poke_early - Update instructions on a live kernel at boot time
+ *
+ * When you use this code to patch more than one byte of an instruction
+ * you need to make sure that other CPUs cannot execute this code in parallel.
+ * Also no thread must be currently preempted in the middle of these
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
+			      union loongarch_instruction *buf, unsigned int nr)
+{
+	int i;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for (i = 0; i < nr; i++)
+		insn[i].word = buf[i].word;
+
+	local_irq_restore(flags);
+
+	wbflush();
+	flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
+
+	return insn;
+}
+
+/*
+ * Replace instructions with better alternatives for this CPU type. This runs
+ * before SMP is initialized to avoid SMP problems with self modifying code.
+ * This implies that asymmetric systems where APs have less capabilities than
+ * the boot processor are not handled. Tough. Make sure you disable such
+ * features by hand.
+ */
+void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
+{
+	struct alt_instr *a;
+	unsigned int nr_instr, nr_repl, nr_insnbuf;
+	union loongarch_instruction *instr, *replacement;
+	union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
+
+	DPRINTK("alt table %px, -> %px", start, end);
+	/*
+	 * The scan order should be from start to end. A later scanned
+	 * alternative code can overwrite previously scanned alternative code.
+	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
+	 * patch code.
+	 *
+	 * So be careful if you want to change the scan order to any other
+	 * order.
+	 */
+	for (a = start; a < end; a++) {
+		nr_insnbuf = 0;
+
+		instr = (void *)&a->instr_offset + a->instr_offset;
+		replacement = (void *)&a->replace_offset + a->replace_offset;
+
+		BUG_ON(a->instrlen > sizeof(insnbuf));
+		BUG_ON(a->instrlen & 0x3);
+		BUG_ON(a->replacementlen & 0x3);
+
+		nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
+		nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
+
+		if (!cpu_has(a->feature)) {
+			DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
+				a->feature, instr, a->instrlen,
+				replacement, a->replacementlen);
+
+			continue;
+		}
+
+		DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
+			a->feature, instr, a->instrlen,
+			replacement, a->replacementlen);
+
+		DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
+		DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
+
+		copy_alt_insns(insnbuf, instr, replacement, nr_repl);
+		nr_insnbuf = nr_repl;
+
+		if (nr_instr > nr_repl) {
+			add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
+			nr_insnbuf += nr_instr - nr_repl;
+		}
+		DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
+
+		text_poke_early(instr, insnbuf, nr_insnbuf);
+	}
+}
+
+void __init alternative_instructions(void)
+{
+	apply_alternatives(__alt_instructions, __alt_instructions_end);
+
+	alternatives_patched = 1;
+}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index 097595b2fc14..669e750917a3 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -17,6 +17,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <asm/alternative.h>
 
 static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
 {
@@ -456,3 +457,18 @@ void *module_alloc(unsigned long size)
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 			GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
 }
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *mod)
+{
+	const Elf_Shdr *s, *se;
+	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+		if (!strcmp(".altinstructions", secstrs + s->sh_name))
+			apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
+	}
+
+	return 0;
+}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 1eb63fa9bc81..96b6cb5db004 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -31,7 +31,9 @@
 #include <linux/swiotlb.h>
 
 #include <asm/addrspace.h>
+#include <asm/alternative.h>
 #include <asm/bootinfo.h>
+#include <asm/bugs.h>
 #include <asm/cache.h>
 #include <asm/cpu.h>
 #include <asm/dma.h>
@@ -80,6 +82,11 @@ const char *get_system_type(void)
 	return "generic-loongson-machine";
 }
 
+void __init check_bugs(void)
+{
+	alternative_instructions();
+}
+
 static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
 {
 	const u8 *bp = ((u8 *) dm) + dm->length;
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index efecda0c2361..733b16e8d55d 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -54,6 +54,18 @@ SECTIONS
 	. = ALIGN(PECOFF_SEGMENT_ALIGN);
 	_etext = .;
 
+	/*
+	 * struct alt_inst entries. From the header (alternative.h):
+	 * "Alternative instructions for different CPU types or capabilities"
+	 * Think locking instructions on spinlocks.
+	 */
+	. = ALIGN(4);
+	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+		__alt_instructions = .;
+		*(.altinstructions)
+		__alt_instructions_end = .;
+	}
+
 	.got : ALIGN(16) { *(.got) }
 	.plt : ALIGN(16) { *(.plt) }
 	.got.plt : ALIGN(16) { *(.got.plt) }
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] LoongArch: Use alternative to optimize libraries
  2022-10-24  7:04 [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Huacai Chen
@ 2022-10-24  7:04 ` Huacai Chen
  2022-10-24 12:51 ` [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Jinyang He
  1 sibling, 0 replies; 4+ messages in thread
From: Huacai Chen @ 2022-10-24  7:04 UTC (permalink / raw)
  To: Arnd Bergmann, Huacai Chen
  Cc: loongarch, linux-arch, Xuefeng Li, Guo Ren, Xuerui Wang,
	Jiaxun Yang, linux-kernel, Huacai Chen, Jun Yi

Use the alternative to optimize common libraries according whether CPU
has UAL (hardware unaligned access support) feature, including memset(),
memcopy(), memmove(), copy_user() and clear_user().

Signed-off-by: Jun Yi <yijun@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
 arch/loongarch/include/asm/string.h |   5 ++
 arch/loongarch/lib/Makefile         |   3 +-
 arch/loongarch/lib/clear_user.S     |  70 ++++++++++++++--
 arch/loongarch/lib/copy_user.S      |  91 +++++++++++++++++++--
 arch/loongarch/lib/memcpy.S         |  95 ++++++++++++++++++++++
 arch/loongarch/lib/memmove.S        | 121 ++++++++++++++++++++++++++++
 arch/loongarch/lib/memset.S         |  91 +++++++++++++++++++++
 7 files changed, 465 insertions(+), 11 deletions(-)
 create mode 100644 arch/loongarch/lib/memcpy.S
 create mode 100644 arch/loongarch/lib/memmove.S
 create mode 100644 arch/loongarch/lib/memset.S

diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index b07e60ded957..7b29cc9c70aa 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -5,8 +5,13 @@
 #ifndef _ASM_STRING_H
 #define _ASM_STRING_H
 
+#define __HAVE_ARCH_MEMSET
 extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
 extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
 
 #endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index 867895530340..40bde632900f 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -3,4 +3,5 @@
 # Makefile for LoongArch-specific library files.
 #
 
-lib-y	+= delay.o clear_user.o copy_user.o dump_tlb.o unaligned.o
+lib-y	+= delay.o memset.o memcpy.o memmove.o \
+	   clear_user.o copy_user.o dump_tlb.o unaligned.o
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 167823b21def..9462fbb211d3 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -3,25 +3,37 @@
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
 
+#include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
 #include <asm/asm-extable.h>
+#include <asm/cpu.h>
 #include <asm/export.h>
 #include <asm/regdef.h>
 
-.irp to, 0
+.irp to, 0, 1, 2, 3, 4, 5, 6, 7
 .L_fixup_handle_\to\():
 	addi.d	a0, a1, (\to) * (-8)
 	jr	ra
 .endr
 
+SYM_FUNC_START(__clear_user)
+	/*
+	 * Some CPUs support hardware unaligned access
+	 */
+	ALTERNATIVE	"b __clear_user_generic",	\
+			"b __clear_user_fast", CPU_FEATURE_UAL
+SYM_FUNC_END(__clear_user)
+
+EXPORT_SYMBOL(__clear_user)
+
 /*
- * unsigned long __clear_user(void *addr, size_t size)
+ * unsigned long __clear_user_generic(void *addr, size_t size)
  *
  * a0: addr
  * a1: size
  */
-SYM_FUNC_START(__clear_user)
+SYM_FUNC_START(__clear_user_generic)
 	beqz	a1, 2f
 
 1:	st.b	zero, a0, 0
@@ -33,6 +45,54 @@ SYM_FUNC_START(__clear_user)
 	jr	ra
 
 	_asm_extable 1, .L_fixup_handle_0
-SYM_FUNC_END(__clear_user)
+SYM_FUNC_END(__clear_user_generic)
 
-EXPORT_SYMBOL(__clear_user)
+/*
+ * unsigned long __clear_user_fast(void *addr, unsigned long size)
+ *
+ * a0: addr
+ * a1: size
+ */
+SYM_FUNC_START(__clear_user_fast)
+	beqz	a1, 10f
+
+	ori	a2, zero, 64
+	blt	a1, a2, 9f
+
+	/* set 64 bytes at a time */
+1:	st.d	zero, a0, 0
+2:	st.d	zero, a0, 8
+3:	st.d	zero, a0, 16
+4:	st.d	zero, a0, 24
+5:	st.d	zero, a0, 32
+6:	st.d	zero, a0, 40
+7:	st.d	zero, a0, 48
+8:	st.d	zero, a0, 56
+
+	addi.d	a0, a0, 64
+	addi.d	a1, a1, -64
+	bge	a1, a2, 1b
+
+	beqz	a1, 10f
+
+	/* set the remaining bytes */
+9:	st.b	zero, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a1, a1, -1
+	bgt	a1, zero, 9b
+
+	/* return */
+10:	move	a0, a1
+	jr	ra
+
+	/* fixup and ex_table */
+	_asm_extable 1b, .L_fixup_handle_0
+	_asm_extable 2b, .L_fixup_handle_1
+	_asm_extable 3b, .L_fixup_handle_2
+	_asm_extable 4b, .L_fixup_handle_3
+	_asm_extable 5b, .L_fixup_handle_4
+	_asm_extable 6b, .L_fixup_handle_5
+	_asm_extable 7b, .L_fixup_handle_6
+	_asm_extable 8b, .L_fixup_handle_7
+	_asm_extable 9b, .L_fixup_handle_0
+SYM_FUNC_END(__clear_user_fast)
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index 5d7bfa8d53d2..bcc01d453767 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -3,26 +3,38 @@
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
 
+#include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
 #include <asm/asm-extable.h>
+#include <asm/cpu.h>
 #include <asm/export.h>
 #include <asm/regdef.h>
 
-.irp to, 0
+.irp to, 0, 1, 2, 3, 4, 5, 6, 7
 .L_fixup_handle_\to\():
 	addi.d	a0, a2, (\to) * (-8)
 	jr	ra
 .endr
 
+SYM_FUNC_START(__copy_user)
+	/*
+	 * Some CPUs support hardware unaligned access
+	 */
+	ALTERNATIVE	"b __copy_user_generic",	\
+			"b __copy_user_fast", CPU_FEATURE_UAL
+SYM_FUNC_END(__copy_user)
+
+EXPORT_SYMBOL(__copy_user)
+
 /*
- * unsigned long __copy_user(void *to, const void *from, size_t n)
+ * unsigned long __copy_user_generic(void *to, const void *from, size_t n)
  *
  * a0: to
  * a1: from
  * a2: n
  */
-SYM_FUNC_START(__copy_user)
+SYM_FUNC_START(__copy_user_generic)
 	beqz	a2, 3f
 
 1:	ld.b	t0, a1, 0
@@ -37,6 +49,75 @@ SYM_FUNC_START(__copy_user)
 
 	_asm_extable 1, .L_fixup_handle_0
 	_asm_extable 2, .L_fixup_handle_0
-SYM_FUNC_END(__copy_user)
+SYM_FUNC_END(__copy_user_generic)
 
-EXPORT_SYMBOL(__copy_user)
+/*
+ * unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
+ *
+ * a0: to
+ * a1: from
+ * a2: n
+ */
+SYM_FUNC_START(__copy_user_fast)
+	beqz	a2, 19f
+
+	ori	a3, zero, 64
+	blt	a2, a3, 17f
+
+	/* copy 64 bytes at a time */
+1:	ld.d	t0, a1, 0
+2:	ld.d	t1, a1, 8
+3:	ld.d	t2, a1, 16
+4:	ld.d	t3, a1, 24
+5:	ld.d	t4, a1, 32
+6:	ld.d	t5, a1, 40
+7:	ld.d	t6, a1, 48
+8:	ld.d	t7, a1, 56
+9:	st.d	t0, a0, 0
+10:	st.d	t1, a0, 8
+11:	st.d	t2, a0, 16
+12:	st.d	t3, a0, 24
+13:	st.d	t4, a0, 32
+14:	st.d	t5, a0, 40
+15:	st.d	t6, a0, 48
+16:	st.d	t7, a0, 56
+
+	addi.d	a0, a0, 64
+	addi.d	a1, a1, 64
+	addi.d	a2, a2, -64
+	bge	a2, a3, 1b
+
+	beqz	a2, 19f
+
+	/* copy the remaining bytes */
+17:	ld.b	t0, a1, 0
+18:	st.b	t0, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a1, a1, 1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 17b
+
+	/* return */
+19:	move	a0, a2
+	jr	ra
+
+	/* fixup and ex_table */
+	_asm_extable 1b, .L_fixup_handle_0
+	_asm_extable 2b, .L_fixup_handle_1
+	_asm_extable 3b, .L_fixup_handle_2
+	_asm_extable 4b, .L_fixup_handle_3
+	_asm_extable 5b, .L_fixup_handle_4
+	_asm_extable 6b, .L_fixup_handle_5
+	_asm_extable 7b, .L_fixup_handle_6
+	_asm_extable 8b, .L_fixup_handle_7
+	_asm_extable 9b, .L_fixup_handle_0
+	_asm_extable 10b, .L_fixup_handle_1
+	_asm_extable 11b, .L_fixup_handle_2
+	_asm_extable 12b, .L_fixup_handle_3
+	_asm_extable 13b, .L_fixup_handle_4
+	_asm_extable 14b, .L_fixup_handle_5
+	_asm_extable 15b, .L_fixup_handle_6
+	_asm_extable 16b, .L_fixup_handle_7
+	_asm_extable 17b, .L_fixup_handle_0
+	_asm_extable 18b, .L_fixup_handle_0
+SYM_FUNC_END(__copy_user_fast)
diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S
new file mode 100644
index 000000000000..7c07d595ee89
--- /dev/null
+++ b/arch/loongarch/lib/memcpy.S
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/alternative-asm.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cpu.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+SYM_FUNC_START(memcpy)
+	/*
+	 * Some CPUs support hardware unaligned access
+	 */
+	ALTERNATIVE	"b __memcpy_generic", \
+			"b __memcpy_fast", CPU_FEATURE_UAL
+SYM_FUNC_END(memcpy)
+
+EXPORT_SYMBOL(memcpy)
+
+/*
+ * void *__memcpy_generic(void *dst, const void *src, size_t n)
+ *
+ * a0: dst
+ * a1: src
+ * a2: n
+ */
+SYM_FUNC_START(__memcpy_generic)
+	move	a3, a0
+	beqz	a2, 2f
+
+1:	ld.b	t0, a1, 0
+	st.b	t0, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a1, a1, 1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 1b
+
+2:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__memcpy_generic)
+
+/*
+ * void *__memcpy_fast(void *dst, const void *src, size_t n)
+ *
+ * a0: dst
+ * a1: src
+ * a2: n
+ */
+SYM_FUNC_START(__memcpy_fast)
+	move	a3, a0
+	beqz	a2, 3f
+
+	ori	a4, zero, 64
+	blt	a2, a4, 2f
+
+	/* copy 64 bytes at a time */
+1:	ld.d	t0, a1, 0
+	ld.d	t1, a1, 8
+	ld.d	t2, a1, 16
+	ld.d	t3, a1, 24
+	ld.d	t4, a1, 32
+	ld.d	t5, a1, 40
+	ld.d	t6, a1, 48
+	ld.d	t7, a1, 56
+	st.d	t0, a0, 0
+	st.d	t1, a0, 8
+	st.d	t2, a0, 16
+	st.d	t3, a0, 24
+	st.d	t4, a0, 32
+	st.d	t5, a0, 40
+	st.d	t6, a0, 48
+	st.d	t7, a0, 56
+
+	addi.d	a0, a0, 64
+	addi.d	a1, a1, 64
+	addi.d	a2, a2, -64
+	bge	a2, a4, 1b
+
+	beqz	a2, 3f
+
+	/* copy the remaining bytes */
+2:	ld.b	t0, a1, 0
+	st.b	t0, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a1, a1, 1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 2b
+
+	/* return */
+3:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__memcpy_fast)
diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S
new file mode 100644
index 000000000000..6ffdb46da78f
--- /dev/null
+++ b/arch/loongarch/lib/memmove.S
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/alternative-asm.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cpu.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+SYM_FUNC_START(memmove)
+	blt	a0, a1, 1f	/* dst < src, memcpy */
+	blt	a1, a0, 3f	/* src < dst, rmemcpy */
+	jr	ra		/* dst == src, return */
+
+	/* if (src - dst) < 64, copy 1 byte at a time */
+1:	ori	a3, zero, 64
+	sub.d	t0, a1, a0
+	blt	t0, a3, 2f
+	b	memcpy
+2:	b	__memcpy_generic
+
+	/* if (dst - src) < 64, copy 1 byte at a time */
+3:	ori	a3, zero, 64
+	sub.d	t0, a0, a1
+	blt	t0, a3, 4f
+	b	rmemcpy
+4:	b	__rmemcpy_generic
+SYM_FUNC_END(memmove)
+
+EXPORT_SYMBOL(memmove)
+
+SYM_FUNC_START(rmemcpy)
+	/*
+	 * Some CPUs support hardware unaligned access
+	 */
+	ALTERNATIVE	"b __rmemcpy_generic", \
+			"b __rmemcpy_fast", CPU_FEATURE_UAL
+SYM_FUNC_END(rmemcpy)
+
+/*
+ * void *__rmemcpy_generic(void *dst, const void *src, size_t n)
+ *
+ * a0: dst
+ * a1: src
+ * a2: n
+ */
+SYM_FUNC_START(__rmemcpy_generic)
+	move	a3, a0
+	beqz	a2, 2f
+
+	add.d	a0, a0, a2
+	add.d	a1, a1, a2
+
+1:	ld.b	t0, a1, -1
+	st.b	t0, a0, -1
+	addi.d	a0, a0, -1
+	addi.d	a1, a1, -1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 1b
+
+2:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__rmemcpy_generic)
+
+/*
+ * void *__rmemcpy_fast(void *dst, const void *src, size_t n)
+ *
+ * a0: dst
+ * a1: src
+ * a2: n
+ */
+SYM_FUNC_START(__rmemcpy_fast)
+	move	a3, a0
+	beqz	a2, 3f
+
+	add.d	a0, a0, a2
+	add.d	a1, a1, a2
+
+	ori	a4, zero, 64
+	blt	a2, a4, 2f
+
+	/* copy 64 bytes at a time */
+1:	ld.d	t0, a1, -8
+	ld.d	t1, a1, -16
+	ld.d	t2, a1, -24
+	ld.d	t3, a1, -32
+	ld.d	t4, a1, -40
+	ld.d	t5, a1, -48
+	ld.d	t6, a1, -56
+	ld.d	t7, a1, -64
+	st.d	t0, a0, -8
+	st.d	t1, a0, -16
+	st.d	t2, a0, -24
+	st.d	t3, a0, -32
+	st.d	t4, a0, -40
+	st.d	t5, a0, -48
+	st.d	t6, a0, -56
+	st.d	t7, a0, -64
+
+	addi.d	a0, a0, -64
+	addi.d	a1, a1, -64
+	addi.d	a2, a2, -64
+	bge	a2, a4, 1b
+
+	beqz	a2, 3f
+
+	/* copy the remaining bytes */
+2:	ld.b	t0, a1, -1
+	st.b	t0, a0, -1
+	addi.d	a0, a0, -1
+	addi.d	a1, a1, -1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 2b
+
+	/* return */
+3:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__rmemcpy_fast)
diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S
new file mode 100644
index 000000000000..e7cb4ea3747d
--- /dev/null
+++ b/arch/loongarch/lib/memset.S
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/alternative-asm.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cpu.h>
+#include <asm/export.h>
+#include <asm/regdef.h>
+
+.macro fill_to_64 r0
+	bstrins.d \r0, \r0, 15, 8
+	bstrins.d \r0, \r0, 31, 16
+	bstrins.d \r0, \r0, 63, 32
+.endm
+
+SYM_FUNC_START(memset)
+	/*
+	 * Some CPUs support hardware unaligned access
+	 */
+	ALTERNATIVE	"b __memset_generic", \
+			"b __memset_fast", CPU_FEATURE_UAL
+SYM_FUNC_END(memset)
+
+EXPORT_SYMBOL(memset)
+
+/*
+ * void *__memset_generic(void *s, int c, size_t n)
+ *
+ * a0: s
+ * a1: c
+ * a2: n
+ */
+SYM_FUNC_START(__memset_generic)
+	move	a3, a0
+	beqz	a2, 2f
+
+1:	st.b	a1, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 1b
+
+2:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__memset_generic)
+
+/*
+ * void *__memset_fast(void *s, int c, size_t n)
+ *
+ * a0: s
+ * a1: c
+ * a2: n
+ */
+SYM_FUNC_START(__memset_fast)
+	move	a3, a0
+	beqz	a2, 3f
+
+	ori	a4, zero, 64
+	blt	a2, a4, 2f
+
+	/* fill a1 to 64 bits */
+	fill_to_64 a1
+
+	/* set 64 bytes at a time */
+1:	st.d	a1, a0, 0
+	st.d	a1, a0, 8
+	st.d	a1, a0, 16
+	st.d	a1, a0, 24
+	st.d	a1, a0, 32
+	st.d	a1, a0, 40
+	st.d	a1, a0, 48
+	st.d	a1, a0, 56
+
+	addi.d	a0, a0, 64
+	addi.d	a2, a2, -64
+	bge	a2, a4, 1b
+
+	beqz	a2, 3f
+
+	/* set the remaining bytes */
+2:	st.b	a1, a0, 0
+	addi.d	a0, a0, 1
+	addi.d	a2, a2, -1
+	bgt	a2, zero, 2b
+
+	/* return */
+3:	move	a0, a3
+	jr	ra
+SYM_FUNC_END(__memset_fast)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism
  2022-10-24  7:04 [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Huacai Chen
  2022-10-24  7:04 ` [PATCH 2/2] LoongArch: Use alternative to optimize libraries Huacai Chen
@ 2022-10-24 12:51 ` Jinyang He
  2022-10-24 14:57   ` Huacai Chen
  1 sibling, 1 reply; 4+ messages in thread
From: Jinyang He @ 2022-10-24 12:51 UTC (permalink / raw)
  To: Huacai Chen, Arnd Bergmann, Huacai Chen
  Cc: loongarch, linux-arch, Xuefeng Li, Guo Ren, Xuerui Wang,
	Jiaxun Yang, linux-kernel, Jun Yi

Hi, Huacai,


On 2022/10/24 下午3:04, Huacai Chen wrote:
> Introduce the "alternative" mechanism from ARM64 and x86 for LoongArch
> to apply runtime patching. The main purpose of this patch is to provide
> a framework. In future we can use this mechanism (i.e., the ALTERNATIVE
> and ALTERNATIVE_2 macros) to optimize hotspot functions according to cpu
> features.
>
> Signed-off-by: Jun Yi <yijun@loongson.cn>
> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> ---
>   arch/loongarch/include/asm/alternative-asm.h |  82 ++++++
>   arch/loongarch/include/asm/alternative.h     | 176 ++++++++++++
>   arch/loongarch/include/asm/bugs.h            |  15 ++
>   arch/loongarch/include/asm/inst.h            |  10 +
>   arch/loongarch/kernel/Makefile               |   2 +-
>   arch/loongarch/kernel/alternative.c          | 266 +++++++++++++++++++
>   arch/loongarch/kernel/module.c               |  16 ++
>   arch/loongarch/kernel/setup.c                |   7 +
>   arch/loongarch/kernel/vmlinux.lds.S          |  12 +
>   9 files changed, 585 insertions(+), 1 deletion(-)
>   create mode 100644 arch/loongarch/include/asm/alternative-asm.h
>   create mode 100644 arch/loongarch/include/asm/alternative.h
>   create mode 100644 arch/loongarch/include/asm/bugs.h
>   create mode 100644 arch/loongarch/kernel/alternative.c
>
> diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h
> new file mode 100644
> index 000000000000..f0f32ace29b1
> --- /dev/null
> +++ b/arch/loongarch/include/asm/alternative-asm.h
> @@ -0,0 +1,82 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_ALTERNATIVE_ASM_H
> +#define _ASM_ALTERNATIVE_ASM_H
> +
> +#ifdef __ASSEMBLY__
> +
> +#include <asm/asm.h>
> +
> +/*
> + * Issue one struct alt_instr descriptor entry (need to put it into
> + * the section .altinstructions, see below). This entry contains
> + * enough information for the alternatives patching code to patch an
> + * instruction. See apply_alternatives().
> + */
> +.macro altinstruction_entry orig alt feature orig_len alt_len
> +	.long \orig - .
> +	.long \alt - .
> +	.2byte \feature
> +	.byte \orig_len
> +	.byte \alt_len
I tried '.byte 256' and gas warned but still finished compiling.
  " warning: value 0x0000000000000100 truncated to 0x0000000000000000 "
How about add '.if .error .endif' here to check the length.
> +.endm
> +
> +/*
> + * Define an alternative between two instructions. If @feature is
> + * present, early code in apply_alternatives() replaces @oldinstr with
> + * @newinstr. ".fill" directive takes care of proper instruction padding
> + * in case @newinstr is longer than @oldinstr.
> + */
> +.macro ALTERNATIVE oldinstr, newinstr, feature
> +140 :
> +	\oldinstr
> +141 :
> +	.fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000
> +142 :
> +
> +	.pushsection .altinstructions, "a"
> +	altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f
> +	.popsection
> +
> +	.subsection 1
> +143 :
> +	\newinstr
> +144 :
> +	.previous
> +.endm
> +
> +#define old_len			(141b-140b)
> +#define new_len1		(144f-143f)
> +#define new_len2		(145f-144f)
> +
> +#define alt_max_short(a, b)	((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
> +
> +/*
> + * Same as ALTERNATIVE macro above but for two alternatives. If CPU
> + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
> + * @feature2, it replaces @oldinstr with @feature2.
> + */
> +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
> +140 :
> +	\oldinstr
> +141 :
> +	.fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
> +		(alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000
> +142 :
> +
> +	.pushsection .altinstructions, "a"
> +	altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b
> +	altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b
> +	.popsection
> +
> +	.subsection 1
> +143 :
> +	\newinstr1
> +144 :
> +	\newinstr2
> +145 :
> +	.previous
> +.endm
> +
> +#endif  /*  __ASSEMBLY__  */
> +
> +#endif /* _ASM_ALTERNATIVE_ASM_H */
> diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h
> new file mode 100644
> index 000000000000..b4fe66c7067e
> --- /dev/null
> +++ b/arch/loongarch/include/asm/alternative.h
> @@ -0,0 +1,176 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_ALTERNATIVE_H
> +#define _ASM_ALTERNATIVE_H
> +
> +#ifndef __ASSEMBLY__
> +
> +#include <linux/types.h>
> +#include <linux/stddef.h>
> +#include <linux/stringify.h>
> +#include <asm/asm.h>
> +
> +struct alt_instr {
> +	s32 instr_offset;	/* offset to original instruction */
> +	s32 replace_offset;	/* offset to replacement instruction */
> +	u16 feature;		/* feature bit set for replacement */
> +	u8  instrlen;		/* length of original instruction */
> +	u8  replacementlen;	/* length of new instruction */
> +} __packed;
> +
> +/*
> + * Debug flag that can be tested to see whether alternative
> + * instructions were patched in already:
> + */
> +extern int alternatives_patched;
> +extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
> +
> +extern void alternative_instructions(void);
> +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
> +
> +#define b_replacement(num)	"664"#num
> +#define e_replacement(num)	"665"#num
> +
> +#define alt_end_marker		"663"
> +#define alt_slen		"662b-661b"
> +#define alt_total_slen		alt_end_marker"b-661b"
> +#define alt_rlen(num)		e_replacement(num)"f-"b_replacement(num)"f"
> +
> +#define __OLDINSTR(oldinstr, num)					\
> +	"661:\n\t" oldinstr "\n662:\n"					\
> +	".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * "		\
> +		"((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n"
> +
> +#define OLDINSTR(oldinstr, num)						\
> +	__OLDINSTR(oldinstr, num)					\
> +	alt_end_marker ":\n"
> +
> +#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
> +
> +/*
> + * Pad the second replacement alternative with additional NOPs if it is
> + * additionally longer than the first replacement alternative.
> + */
> +#define OLDINSTR_2(oldinstr, num1, num2) \
> +	"661:\n\t" oldinstr "\n662:\n"								\
> +	".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * "	\
> +		"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, "	\
> +		"4, 0x03400000\n"	\
> +	alt_end_marker ":\n"
> +
> +#define ALTINSTR_ENTRY(feature, num)					      \
> +	" .long 661b - .\n"				/* label           */ \
> +	" .long " b_replacement(num)"f - .\n"		/* new instruction */ \
> +	" .2byte " __stringify(feature) "\n"		/* feature bit     */ \
> +	" .byte " alt_total_slen "\n"			/* source len      */ \
> +	" .byte " alt_rlen(num) "\n"			/* replacement len */
> +
> +#define ALTINSTR_REPLACEMENT(newinstr, feature, num)	/* replacement */     \
> +	b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
> +
> +/* alternative assembly primitive: */
> +#define ALTERNATIVE(oldinstr, newinstr, feature)			\
> +	OLDINSTR(oldinstr, 1)						\
> +	".pushsection .altinstructions,\"a\"\n"				\
> +	ALTINSTR_ENTRY(feature, 1)					\
> +	".popsection\n"							\
> +	".subsection 1\n" \
> +	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
> +	".previous\n"
> +
> +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
> +	OLDINSTR_2(oldinstr, 1, 2)					\
> +	".pushsection .altinstructions,\"a\"\n"				\
> +	ALTINSTR_ENTRY(feature1, 1)					\
> +	ALTINSTR_ENTRY(feature2, 2)					\
> +	".popsection\n"							\
> +	".subsection 1\n" \
> +	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
> +	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
> +	".previous\n"
> +
> +/*
> + * Alternative instructions for different CPU types or capabilities.
> + *
> + * This allows to use optimized instructions even on generic binary
> + * kernels.
> + *
> + * length of oldinstr must be longer or equal the length of newinstr
> + * It can be padded with nops as needed.
> + *
> + * For non barrier like inlines please define new variants
> + * without volatile and memory clobber.
> + */
> +#define alternative(oldinstr, newinstr, feature)			\
> +	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory"))
> +
> +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
> +	(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
> +
> +/*
> + * Alternative inline assembly with input.
> + *
> + * Pecularities:
> + * No memory clobber here.
> + * Argument numbers start with 1.
> + * Best is to use constraints that are fixed size (like (%1) ... "r")
> + * If you use variable sized constraints like "m" or "g" in the
> + * replacement make sure to pad to the worst case length.
> + * Leaving an unused argument 0 to keep API compatibility.
> + */
> +#define alternative_input(oldinstr, newinstr, feature, input...)	\
> +	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)		\
> +		: : "i" (0), ## input))
> +
> +/*
> + * This is similar to alternative_input. But it has two features and
> + * respective instructions.
> + *
> + * If CPU has feature2, newinstr2 is used.
> + * Otherwise, if CPU has feature1, newinstr1 is used.
> + * Otherwise, oldinstr is used.
> + */
> +#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2,	     \
> +			   feature2, input...)				     \
> +	(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1,	     \
> +		newinstr2, feature2)					     \
> +		: : "i" (0), ## input))
> +
> +/* Like alternative_input, but with a single output argument */
> +#define alternative_io(oldinstr, newinstr, feature, output, input...)	\
> +	(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)		\
> +		: output : "i" (0), ## input))
> +
> +/* Like alternative_io, but for replacing a direct call with another one. */
> +#define alternative_call(oldfunc, newfunc, feature, output, input...)	\
> +	(asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \

Not 'call' but 'bl'? And IMHO it is better to separate these patches
to several patches and convenient for code review. Such as this patch,
the 'ALTERNATIVE_2' and 'C inline assembly' is not used in [patch2/2],
these features can be added after [patch2/2] or in the future.


> +		: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input))
> +
> +/*
> + * Like alternative_call, but there are two features and respective functions.
> + * If CPU has feature2, function2 is used.
> + * Otherwise, if CPU has feature1, function1 is used.
> + * Otherwise, old function is used.
> + */
> +#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2,   \
> +			   output, input...)				      \
> +	(asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
> +		"call %P[new2]", feature2)				      \
> +		: output, ASM_CALL_CONSTRAINT				      \
> +		: [old] "i" (oldfunc), [new1] "i" (newfunc1),		      \
> +		  [new2] "i" (newfunc2), ## input))
> +
> +/*
> + * use this macro(s) if you need more than one output parameter
> + * in alternative_io
> + */
> +#define ASM_OUTPUT2(a...) a
> +
> +/*
> + * use this macro if you need clobbers but no inputs in
> + * alternative_{input,io,call}()
> + */
> +#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* _ASM_ALTERNATIVE_H */
> diff --git a/arch/loongarch/include/asm/bugs.h b/arch/loongarch/include/asm/bugs.h
> new file mode 100644
> index 000000000000..651fffe1f743
> --- /dev/null
> +++ b/arch/loongarch/include/asm/bugs.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * This is included by init/main.c to check for architecture-dependent bugs.
> + *
> + * Copyright (C) 2020-2021 Loongson Technology Corporation Limited
> + */
> +#ifndef _ASM_BUGS_H
> +#define _ASM_BUGS_H
> +
> +#include <asm/cpu.h>
> +#include <asm/cpu-info.h>
> +
> +extern void check_bugs(void);
> +
> +#endif /* _ASM_BUGS_H */
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index 889d6c9fc2b6..bd4c116aa73d 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -8,6 +8,7 @@
>   #include <linux/types.h>
>   #include <asm/asm.h>
>   
> +#define INSN_NOP		0x03400000
>   #define INSN_BREAK		0x002a0000
>   
>   #define ADDR_IMMMASK_LU52ID	0xFFF0000000000000
> @@ -28,6 +29,7 @@ enum reg0i26_op {
>   enum reg1i20_op {
>   	lu12iw_op	= 0x0a,
>   	lu32id_op	= 0x0b,
> +	pcaddi_op	= 0x0c,
>   	pcaddu12i_op	= 0x0e,
>   	pcaddu18i_op	= 0x0f,
>   };
> @@ -35,6 +37,8 @@ enum reg1i20_op {
>   enum reg1i21_op {
>   	beqz_op		= 0x10,
>   	bnez_op		= 0x11,
> +	bceqz_op	= 0x12,
> +	bcnez_op	= 0x12,
>   };
>   
>   enum reg2_op {
> @@ -315,6 +319,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit)
>   	return val & (1UL << (bit - 1));
>   }
>   
> +static inline bool is_pc_ins(union loongarch_instruction *ip)
> +{
> +	return ip->reg1i20_format.opcode >= pcaddi_op &&
> +			ip->reg1i20_format.opcode <= pcaddu18i_op;
> +}
> +
>   static inline bool is_branch_ins(union loongarch_instruction *ip)
>   {
>   	return ip->reg1i21_format.opcode >= beqz_op &&
> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> index 2ad2555b53ea..86744531b100 100644
> --- a/arch/loongarch/kernel/Makefile
> +++ b/arch/loongarch/kernel/Makefile
> @@ -8,7 +8,7 @@ extra-y		:= vmlinux.lds
>   obj-y		+= head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
>   		   traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
>   		   elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
> -		   unaligned.o
> +		   alternative.o unaligned.o
>   
>   obj-$(CONFIG_ACPI)		+= acpi.o
>   obj-$(CONFIG_EFI) 		+= efi.o
> diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
> new file mode 100644
> index 000000000000..43434150b853
> --- /dev/null
> +++ b/arch/loongarch/kernel/alternative.c
> @@ -0,0 +1,263 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +#include <linux/mm.h>
> +#include <linux/module.h>
> +#include <asm/alternative.h>
> +#include <asm/cacheflush.h>
> +#include <asm/inst.h>
> +#include <asm/sections.h>
> +
> +int __read_mostly alternatives_patched;
> +
> +EXPORT_SYMBOL_GPL(alternatives_patched);
> +
> +#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
> +
> +static int __initdata_or_module debug_alternative;
> +
> +static int __init debug_alt(char *str)
> +{
> +	debug_alternative = 1;
> +	return 1;
> +}
> +__setup("debug-alternative", debug_alt);
> +
> +#define DPRINTK(fmt, args...)						\
> +do {									\
> +	if (debug_alternative)						\
> +		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
> +} while (0)
> +
> +#define DUMP_WORDS(buf, count, fmt, args...)				\
> +do {									\
> +	if (unlikely(debug_alternative)) {				\
> +		int _j;							\
> +		union loongarch_instruction *_buf = buf;		\
> +									\
> +		if (!(count))						\
> +			break;						\
> +									\
> +		printk(KERN_DEBUG fmt, ##args);				\
> +		for (_j = 0; _j < count - 1; _j++)			\
> +			printk(KERN_CONT "<%08x> ", _buf[_j].word);	\
> +		printk(KERN_CONT "<%08x>\n", _buf[_j].word);		\
> +	}								\
> +} while (0)
> +
> +#define __SIGNEX(X, SIDX) ((X) >= (1 << SIDX) ? ~((1 << SIDX) - 1) | (X) : (X))
> +#define SIGNEX16(X) __SIGNEX(((unsigned long)(X)), 15)
> +#define SIGNEX20(X) __SIGNEX(((unsigned long)(X)), 19)
> +#define SIGNEX21(X) __SIGNEX(((unsigned long)(X)), 20)
> +#define SIGNEX26(X) __SIGNEX(((unsigned long)(X)), 25)
> +
> +static inline unsigned long bs_dest_16(unsigned long now, unsigned int si)
> +{
> +	return now + (SIGNEX16(si) << 2);
> +}
> +
> +static inline unsigned long bs_dest_21(unsigned long now, unsigned int h, unsigned int l)
> +{
> +	return now + (SIGNEX21(h << 16 | l) << 2);
> +}
> +
> +static inline unsigned long bs_dest_26(unsigned long now, unsigned int h, unsigned int l)
> +{
> +	return now + (SIGNEX26(h << 16 | l) << 2);
> +}
> +
> +/* Use this to add nops to a buffer, then text_poke the whole buffer. */
> +static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
> +{
> +	while (count--) {
> +		insn->word = INSN_NOP;
> +		insn++;
> +	}
> +}
> +
> +/* Is the jump addr in local .altinstructions */
> +static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
> +{
> +	return jump >= (unsigned long)start && jump < (unsigned long)end;
> +}
> +
> +static void __init_or_module recompute_jump(union loongarch_instruction *buf,
> +		union loongarch_instruction *dest, union loongarch_instruction *src,
> +		void *start, void *end)
> +{
> +	unsigned int si, si_l, si_h;
> +	unsigned long cur_pc, jump_addr, pc;
> +	long offset;
> +
> +	cur_pc = (unsigned long)src;
> +	pc = (unsigned long)dest;
> +
> +	si_l = src->reg0i26_format.immediate_l;
> +	si_h = src->reg0i26_format.immediate_h;
> +	switch (src->reg0i26_format.opcode) {
> +	case b_op:
> +	case bl_op:
> +		jump_addr = bs_dest_26(cur_pc, si_h, si_l);
> +		if (in_alt_jump(jump_addr, start, end))
> +			return;
> +		offset = jump_addr - pc;
> +		BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
> +		offset >>= 2;
> +		buf->reg0i26_format.immediate_h = offset >> 16;
> +		buf->reg0i26_format.immediate_l = offset;
> +		return;
> +	}
> +
> +	si_l = src->reg1i21_format.immediate_l;
> +	si_h = src->reg1i21_format.immediate_h;
> +	switch (src->reg1i21_format.opcode) {
> +	case beqz_op:
> +	case bnez_op:
> +	case bceqz_op:
> +		jump_addr = bs_dest_21(cur_pc, si_h, si_l);
> +		if (in_alt_jump(jump_addr, start, end))
> +			return;
> +		offset = jump_addr - pc;
> +		BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
> +		offset >>= 2;
> +		buf->reg1i21_format.immediate_h = offset >> 16;
> +		buf->reg1i21_format.immediate_l = offset;
> +		return;
> +	}
> +
> +	si = src->reg2i16_format.immediate;
> +	switch (src->reg2i16_format.opcode) {
> +	case beq_op:
> +	case bne_op:
> +	case blt_op:
> +	case bge_op:
> +	case bltu_op:
> +	case bgeu_op:
> +		jump_addr = bs_dest_16(cur_pc, si);
> +		if (in_alt_jump(jump_addr, start, end))
> +			return;
> +		offset = jump_addr - pc;
> +		BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
> +		offset >>= 2;
> +                buf->reg2i16_format.immediate = offset;

code indent should use tabs where possible


> +		return;
> +	}
> +}
> +
> +static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
> +	union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
> +{
> +	int i;
> +
> +	for (i = 0; i < nr; i++) {
> +		buf[i].word = src[i].word;
> +
> +		if (is_branch_ins(&src[i]) &&
> +		    src[i].reg2i16_format.opcode != jirl_op) {
> +			recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
> +		} else if (is_pc_ins(&src[i])) {
> +			pr_err("Not support pcrel instruction at present!");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +/*
> + * text_poke_early - Update instructions on a live kernel at boot time
> + *
> + * When you use this code to patch more than one byte of an instruction
> + * you need to make sure that other CPUs cannot execute this code in parallel.
> + * Also no thread must be currently preempted in the middle of these
> + * instructions. And on the local CPU you need to be protected again NMI or MCE
> + * handlers seeing an inconsistent instruction while you patch.
> + */
> +static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
> +			      union loongarch_instruction *buf, unsigned int nr)
> +{
> +	int i;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +
> +	for (i = 0; i < nr; i++)
> +		insn[i].word = buf[i].word;
> +
> +	local_irq_restore(flags);
> +
> +	wbflush();
> +	flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));

nr * LOONGARCH_INSN_SIZE


Thanks,

Jinyang


> +
> +	return insn;
> +}
> +
> +/*
> + * Replace instructions with better alternatives for this CPU type. This runs
> + * before SMP is initialized to avoid SMP problems with self modifying code.
> + * This implies that asymmetric systems where APs have less capabilities than
> + * the boot processor are not handled. Tough. Make sure you disable such
> + * features by hand.
> + */
> +void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
> +{
> +	struct alt_instr *a;
> +	unsigned int nr_instr, nr_repl, nr_insnbuf;
> +	union loongarch_instruction *instr, *replacement;
> +	union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
> +
> +	DPRINTK("alt table %px, -> %px", start, end);
> +	/*
> +	 * The scan order should be from start to end. A later scanned
> +	 * alternative code can overwrite previously scanned alternative code.
> +	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
> +	 * patch code.
> +	 *
> +	 * So be careful if you want to change the scan order to any other
> +	 * order.
> +	 */
> +	for (a = start; a < end; a++) {
> +		nr_insnbuf = 0;
> +
> +		instr = (void *)&a->instr_offset + a->instr_offset;
> +		replacement = (void *)&a->replace_offset + a->replace_offset;
> +
> +		BUG_ON(a->instrlen > sizeof(insnbuf));
> +		BUG_ON(a->instrlen & 0x3);
> +		BUG_ON(a->replacementlen & 0x3);
> +
> +		nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
> +		nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
> +
> +		if (!cpu_has(a->feature)) {
> +			DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
> +				a->feature, instr, a->instrlen,
> +				replacement, a->replacementlen);
> +
> +			continue;
> +		}
> +
> +		DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
> +			a->feature, instr, a->instrlen,
> +			replacement, a->replacementlen);
> +
> +		DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
> +		DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
> +
> +		copy_alt_insns(insnbuf, instr, replacement, nr_repl);
> +		nr_insnbuf = nr_repl;
> +
> +		if (nr_instr > nr_repl) {
> +			add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
> +			nr_insnbuf += nr_instr - nr_repl;
> +		}
> +		DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
> +
> +		text_poke_early(instr, insnbuf, nr_insnbuf);
> +	}
> +}
> +
> +void __init alternative_instructions(void)
> +{
> +	apply_alternatives(__alt_instructions, __alt_instructions_end);
> +
> +	alternatives_patched = 1;
> +}
> diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
> index 097595b2fc14..669e750917a3 100644
> --- a/arch/loongarch/kernel/module.c
> +++ b/arch/loongarch/kernel/module.c
> @@ -17,6 +17,7 @@
>   #include <linux/fs.h>
>   #include <linux/string.h>
>   #include <linux/kernel.h>
> +#include <asm/alternative.h>
>   
>   static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
>   {
> @@ -456,3 +457,18 @@ void *module_alloc(unsigned long size)
>   	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
>   			GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
>   }
> +
> +int module_finalize(const Elf_Ehdr *hdr,
> +		    const Elf_Shdr *sechdrs,
> +		    struct module *mod)
> +{
> +	const Elf_Shdr *s, *se;
> +	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
> +
> +	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
> +		if (!strcmp(".altinstructions", secstrs + s->sh_name))
> +			apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
> +	}
> +
> +	return 0;
> +}
> diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
> index 1eb63fa9bc81..96b6cb5db004 100644
> --- a/arch/loongarch/kernel/setup.c
> +++ b/arch/loongarch/kernel/setup.c
> @@ -31,7 +31,9 @@
>   #include <linux/swiotlb.h>
>   
>   #include <asm/addrspace.h>
> +#include <asm/alternative.h>
>   #include <asm/bootinfo.h>
> +#include <asm/bugs.h>
>   #include <asm/cache.h>
>   #include <asm/cpu.h>
>   #include <asm/dma.h>
> @@ -80,6 +82,11 @@ const char *get_system_type(void)
>   	return "generic-loongson-machine";
>   }
>   
> +void __init check_bugs(void)
> +{
> +	alternative_instructions();
> +}
> +
>   static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
>   {
>   	const u8 *bp = ((u8 *) dm) + dm->length;
> diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
> index efecda0c2361..733b16e8d55d 100644
> --- a/arch/loongarch/kernel/vmlinux.lds.S
> +++ b/arch/loongarch/kernel/vmlinux.lds.S
> @@ -54,6 +54,18 @@ SECTIONS
>   	. = ALIGN(PECOFF_SEGMENT_ALIGN);
>   	_etext = .;
>   
> +	/*
> +	 * struct alt_inst entries. From the header (alternative.h):
> +	 * "Alternative instructions for different CPU types or capabilities"
> +	 * Think locking instructions on spinlocks.
> +	 */
> +	. = ALIGN(4);
> +	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
> +		__alt_instructions = .;
> +		*(.altinstructions)
> +		__alt_instructions_end = .;
> +	}
> +
>   	.got : ALIGN(16) { *(.got) }
>   	.plt : ALIGN(16) { *(.plt) }
>   	.got.plt : ALIGN(16) { *(.got.plt) }


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism
  2022-10-24 12:51 ` [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Jinyang He
@ 2022-10-24 14:57   ` Huacai Chen
  0 siblings, 0 replies; 4+ messages in thread
From: Huacai Chen @ 2022-10-24 14:57 UTC (permalink / raw)
  To: Jinyang He
  Cc: Huacai Chen, Arnd Bergmann, loongarch, linux-arch, Xuefeng Li,
	Guo Ren, Xuerui Wang, Jiaxun Yang, linux-kernel, Jun Yi

Hi, Jinyang,

On Mon, Oct 24, 2022 at 8:51 PM Jinyang He <hejinyang@loongson.cn> wrote:
>
> Hi, Huacai,
>
>
> On 2022/10/24 下午3:04, Huacai Chen wrote:
> > Introduce the "alternative" mechanism from ARM64 and x86 for LoongArch
> > to apply runtime patching. The main purpose of this patch is to provide
> > a framework. In future we can use this mechanism (i.e., the ALTERNATIVE
> > and ALTERNATIVE_2 macros) to optimize hotspot functions according to cpu
> > features.
> >
> > Signed-off-by: Jun Yi <yijun@loongson.cn>
> > Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> > ---
> >   arch/loongarch/include/asm/alternative-asm.h |  82 ++++++
> >   arch/loongarch/include/asm/alternative.h     | 176 ++++++++++++
> >   arch/loongarch/include/asm/bugs.h            |  15 ++
> >   arch/loongarch/include/asm/inst.h            |  10 +
> >   arch/loongarch/kernel/Makefile               |   2 +-
> >   arch/loongarch/kernel/alternative.c          | 266 +++++++++++++++++++
> >   arch/loongarch/kernel/module.c               |  16 ++
> >   arch/loongarch/kernel/setup.c                |   7 +
> >   arch/loongarch/kernel/vmlinux.lds.S          |  12 +
> >   9 files changed, 585 insertions(+), 1 deletion(-)
> >   create mode 100644 arch/loongarch/include/asm/alternative-asm.h
> >   create mode 100644 arch/loongarch/include/asm/alternative.h
> >   create mode 100644 arch/loongarch/include/asm/bugs.h
> >   create mode 100644 arch/loongarch/kernel/alternative.c
> >
> > diff --git a/arch/loongarch/include/asm/alternative-asm.h b/arch/loongarch/include/asm/alternative-asm.h
> > new file mode 100644
> > index 000000000000..f0f32ace29b1
> > --- /dev/null
> > +++ b/arch/loongarch/include/asm/alternative-asm.h
> > @@ -0,0 +1,82 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ASM_ALTERNATIVE_ASM_H
> > +#define _ASM_ALTERNATIVE_ASM_H
> > +
> > +#ifdef __ASSEMBLY__
> > +
> > +#include <asm/asm.h>
> > +
> > +/*
> > + * Issue one struct alt_instr descriptor entry (need to put it into
> > + * the section .altinstructions, see below). This entry contains
> > + * enough information for the alternatives patching code to patch an
> > + * instruction. See apply_alternatives().
> > + */
> > +.macro altinstruction_entry orig alt feature orig_len alt_len
> > +     .long \orig - .
> > +     .long \alt - .
> > +     .2byte \feature
> > +     .byte \orig_len
> > +     .byte \alt_len
> I tried '.byte 256' and gas warned but still finished compiling.
>   " warning: value 0x0000000000000100 truncated to 0x0000000000000000 "
> How about add '.if .error .endif' here to check the length.
I think a warning is just enough.

> > +.endm
> > +
> > +/*
> > + * Define an alternative between two instructions. If @feature is
> > + * present, early code in apply_alternatives() replaces @oldinstr with
> > + * @newinstr. ".fill" directive takes care of proper instruction padding
> > + * in case @newinstr is longer than @oldinstr.
> > + */
> > +.macro ALTERNATIVE oldinstr, newinstr, feature
> > +140 :
> > +     \oldinstr
> > +141 :
> > +     .fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000
> > +142 :
> > +
> > +     .pushsection .altinstructions, "a"
> > +     altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f
> > +     .popsection
> > +
> > +     .subsection 1
> > +143 :
> > +     \newinstr
> > +144 :
> > +     .previous
> > +.endm
> > +
> > +#define old_len                      (141b-140b)
> > +#define new_len1             (144f-143f)
> > +#define new_len2             (145f-144f)
> > +
> > +#define alt_max_short(a, b)  ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
> > +
> > +/*
> > + * Same as ALTERNATIVE macro above but for two alternatives. If CPU
> > + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
> > + * @feature2, it replaces @oldinstr with @feature2.
> > + */
> > +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
> > +140 :
> > +     \oldinstr
> > +141 :
> > +     .fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
> > +             (alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000
> > +142 :
> > +
> > +     .pushsection .altinstructions, "a"
> > +     altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b
> > +     altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b
> > +     .popsection
> > +
> > +     .subsection 1
> > +143 :
> > +     \newinstr1
> > +144 :
> > +     \newinstr2
> > +145 :
> > +     .previous
> > +.endm
> > +
> > +#endif  /*  __ASSEMBLY__  */
> > +
> > +#endif /* _ASM_ALTERNATIVE_ASM_H */
> > diff --git a/arch/loongarch/include/asm/alternative.h b/arch/loongarch/include/asm/alternative.h
> > new file mode 100644
> > index 000000000000..b4fe66c7067e
> > --- /dev/null
> > +++ b/arch/loongarch/include/asm/alternative.h
> > @@ -0,0 +1,176 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ASM_ALTERNATIVE_H
> > +#define _ASM_ALTERNATIVE_H
> > +
> > +#ifndef __ASSEMBLY__
> > +
> > +#include <linux/types.h>
> > +#include <linux/stddef.h>
> > +#include <linux/stringify.h>
> > +#include <asm/asm.h>
> > +
> > +struct alt_instr {
> > +     s32 instr_offset;       /* offset to original instruction */
> > +     s32 replace_offset;     /* offset to replacement instruction */
> > +     u16 feature;            /* feature bit set for replacement */
> > +     u8  instrlen;           /* length of original instruction */
> > +     u8  replacementlen;     /* length of new instruction */
> > +} __packed;
> > +
> > +/*
> > + * Debug flag that can be tested to see whether alternative
> > + * instructions were patched in already:
> > + */
> > +extern int alternatives_patched;
> > +extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
> > +
> > +extern void alternative_instructions(void);
> > +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
> > +
> > +#define b_replacement(num)   "664"#num
> > +#define e_replacement(num)   "665"#num
> > +
> > +#define alt_end_marker               "663"
> > +#define alt_slen             "662b-661b"
> > +#define alt_total_slen               alt_end_marker"b-661b"
> > +#define alt_rlen(num)                e_replacement(num)"f-"b_replacement(num)"f"
> > +
> > +#define __OLDINSTR(oldinstr, num)                                    \
> > +     "661:\n\t" oldinstr "\n662:\n"                                  \
> > +     ".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * "          \
> > +             "((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n"
> > +
> > +#define OLDINSTR(oldinstr, num)                                              \
> > +     __OLDINSTR(oldinstr, num)                                       \
> > +     alt_end_marker ":\n"
> > +
> > +#define alt_max_short(a, b)  "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
> > +
> > +/*
> > + * Pad the second replacement alternative with additional NOPs if it is
> > + * additionally longer than the first replacement alternative.
> > + */
> > +#define OLDINSTR_2(oldinstr, num1, num2) \
> > +     "661:\n\t" oldinstr "\n662:\n"                                                          \
> > +     ".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * "  \
> > +             "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, "    \
> > +             "4, 0x03400000\n"       \
> > +     alt_end_marker ":\n"
> > +
> > +#define ALTINSTR_ENTRY(feature, num)                                       \
> > +     " .long 661b - .\n"                             /* label           */ \
> > +     " .long " b_replacement(num)"f - .\n"           /* new instruction */ \
> > +     " .2byte " __stringify(feature) "\n"            /* feature bit     */ \
> > +     " .byte " alt_total_slen "\n"                   /* source len      */ \
> > +     " .byte " alt_rlen(num) "\n"                    /* replacement len */
> > +
> > +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */     \
> > +     b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
> > +
> > +/* alternative assembly primitive: */
> > +#define ALTERNATIVE(oldinstr, newinstr, feature)                     \
> > +     OLDINSTR(oldinstr, 1)                                           \
> > +     ".pushsection .altinstructions,\"a\"\n"                         \
> > +     ALTINSTR_ENTRY(feature, 1)                                      \
> > +     ".popsection\n"                                                 \
> > +     ".subsection 1\n" \
> > +     ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
> > +     ".previous\n"
> > +
> > +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
> > +     OLDINSTR_2(oldinstr, 1, 2)                                      \
> > +     ".pushsection .altinstructions,\"a\"\n"                         \
> > +     ALTINSTR_ENTRY(feature1, 1)                                     \
> > +     ALTINSTR_ENTRY(feature2, 2)                                     \
> > +     ".popsection\n"                                                 \
> > +     ".subsection 1\n" \
> > +     ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
> > +     ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
> > +     ".previous\n"
> > +
> > +/*
> > + * Alternative instructions for different CPU types or capabilities.
> > + *
> > + * This allows to use optimized instructions even on generic binary
> > + * kernels.
> > + *
> > + * length of oldinstr must be longer or equal the length of newinstr
> > + * It can be padded with nops as needed.
> > + *
> > + * For non barrier like inlines please define new variants
> > + * without volatile and memory clobber.
> > + */
> > +#define alternative(oldinstr, newinstr, feature)                     \
> > +     (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory"))
> > +
> > +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
> > +     (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
> > +
> > +/*
> > + * Alternative inline assembly with input.
> > + *
> > + * Pecularities:
> > + * No memory clobber here.
> > + * Argument numbers start with 1.
> > + * Best is to use constraints that are fixed size (like (%1) ... "r")
> > + * If you use variable sized constraints like "m" or "g" in the
> > + * replacement make sure to pad to the worst case length.
> > + * Leaving an unused argument 0 to keep API compatibility.
> > + */
> > +#define alternative_input(oldinstr, newinstr, feature, input...)     \
> > +     (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)         \
> > +             : : "i" (0), ## input))
> > +
> > +/*
> > + * This is similar to alternative_input. But it has two features and
> > + * respective instructions.
> > + *
> > + * If CPU has feature2, newinstr2 is used.
> > + * Otherwise, if CPU has feature1, newinstr1 is used.
> > + * Otherwise, oldinstr is used.
> > + */
> > +#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2,             \
> > +                        feature2, input...)                               \
> > +     (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1,           \
> > +             newinstr2, feature2)                                         \
> > +             : : "i" (0), ## input))
> > +
> > +/* Like alternative_input, but with a single output argument */
> > +#define alternative_io(oldinstr, newinstr, feature, output, input...)        \
> > +     (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)         \
> > +             : output : "i" (0), ## input))
> > +
> > +/* Like alternative_io, but for replacing a direct call with another one. */
> > +#define alternative_call(oldfunc, newfunc, feature, output, input...)        \
> > +     (asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
>
> Not 'call' but 'bl'? And IMHO it is better to separate these patches
> to several patches and convenient for code review. Such as this patch,
> the 'ALTERNATIVE_2' and 'C inline assembly' is not used in [patch2/2],
> these features can be added after [patch2/2] or in the future.
OK, unused macros will be removed at this time, and be added in future.

>
>
> > +             : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input))
> > +
> > +/*
> > + * Like alternative_call, but there are two features and respective functions.
> > + * If CPU has feature2, function2 is used.
> > + * Otherwise, if CPU has feature1, function1 is used.
> > + * Otherwise, old function is used.
> > + */
> > +#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2,   \
> > +                        output, input...)                                  \
> > +     (asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
> > +             "call %P[new2]", feature2)                                    \
> > +             : output, ASM_CALL_CONSTRAINT                                 \
> > +             : [old] "i" (oldfunc), [new1] "i" (newfunc1),                 \
> > +               [new2] "i" (newfunc2), ## input))
> > +
> > +/*
> > + * use this macro(s) if you need more than one output parameter
> > + * in alternative_io
> > + */
> > +#define ASM_OUTPUT2(a...) a
> > +
> > +/*
> > + * use this macro if you need clobbers but no inputs in
> > + * alternative_{input,io,call}()
> > + */
> > +#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
> > +
> > +#endif /* __ASSEMBLY__ */
> > +
> > +#endif /* _ASM_ALTERNATIVE_H */
> > diff --git a/arch/loongarch/include/asm/bugs.h b/arch/loongarch/include/asm/bugs.h
> > new file mode 100644
> > index 000000000000..651fffe1f743
> > --- /dev/null
> > +++ b/arch/loongarch/include/asm/bugs.h
> > @@ -0,0 +1,15 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/*
> > + * This is included by init/main.c to check for architecture-dependent bugs.
> > + *
> > + * Copyright (C) 2020-2021 Loongson Technology Corporation Limited
> > + */
> > +#ifndef _ASM_BUGS_H
> > +#define _ASM_BUGS_H
> > +
> > +#include <asm/cpu.h>
> > +#include <asm/cpu-info.h>
> > +
> > +extern void check_bugs(void);
> > +
> > +#endif /* _ASM_BUGS_H */
> > diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> > index 889d6c9fc2b6..bd4c116aa73d 100644
> > --- a/arch/loongarch/include/asm/inst.h
> > +++ b/arch/loongarch/include/asm/inst.h
> > @@ -8,6 +8,7 @@
> >   #include <linux/types.h>
> >   #include <asm/asm.h>
> >
> > +#define INSN_NOP             0x03400000
> >   #define INSN_BREAK          0x002a0000
> >
> >   #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
> > @@ -28,6 +29,7 @@ enum reg0i26_op {
> >   enum reg1i20_op {
> >       lu12iw_op       = 0x0a,
> >       lu32id_op       = 0x0b,
> > +     pcaddi_op       = 0x0c,
> >       pcaddu12i_op    = 0x0e,
> >       pcaddu18i_op    = 0x0f,
> >   };
> > @@ -35,6 +37,8 @@ enum reg1i20_op {
> >   enum reg1i21_op {
> >       beqz_op         = 0x10,
> >       bnez_op         = 0x11,
> > +     bceqz_op        = 0x12,
> > +     bcnez_op        = 0x12,
> >   };
> >
> >   enum reg2_op {
> > @@ -315,6 +319,12 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit)
> >       return val & (1UL << (bit - 1));
> >   }
> >
> > +static inline bool is_pc_ins(union loongarch_instruction *ip)
> > +{
> > +     return ip->reg1i20_format.opcode >= pcaddi_op &&
> > +                     ip->reg1i20_format.opcode <= pcaddu18i_op;
> > +}
> > +
> >   static inline bool is_branch_ins(union loongarch_instruction *ip)
> >   {
> >       return ip->reg1i21_format.opcode >= beqz_op &&
> > diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> > index 2ad2555b53ea..86744531b100 100644
> > --- a/arch/loongarch/kernel/Makefile
> > +++ b/arch/loongarch/kernel/Makefile
> > @@ -8,7 +8,7 @@ extra-y               := vmlinux.lds
> >   obj-y               += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
> >                  traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
> >                  elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
> > -                unaligned.o
> > +                alternative.o unaligned.o
> >
> >   obj-$(CONFIG_ACPI)          += acpi.o
> >   obj-$(CONFIG_EFI)           += efi.o
> > diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
> > new file mode 100644
> > index 000000000000..43434150b853
> > --- /dev/null
> > +++ b/arch/loongarch/kernel/alternative.c
> > @@ -0,0 +1,263 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +#include <linux/mm.h>
> > +#include <linux/module.h>
> > +#include <asm/alternative.h>
> > +#include <asm/cacheflush.h>
> > +#include <asm/inst.h>
> > +#include <asm/sections.h>
> > +
> > +int __read_mostly alternatives_patched;
> > +
> > +EXPORT_SYMBOL_GPL(alternatives_patched);
> > +
> > +#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
> > +
> > +static int __initdata_or_module debug_alternative;
> > +
> > +static int __init debug_alt(char *str)
> > +{
> > +     debug_alternative = 1;
> > +     return 1;
> > +}
> > +__setup("debug-alternative", debug_alt);
> > +
> > +#define DPRINTK(fmt, args...)                                                \
> > +do {                                                                 \
> > +     if (debug_alternative)                                          \
> > +             printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);   \
> > +} while (0)
> > +
> > +#define DUMP_WORDS(buf, count, fmt, args...)                         \
> > +do {                                                                 \
> > +     if (unlikely(debug_alternative)) {                              \
> > +             int _j;                                                 \
> > +             union loongarch_instruction *_buf = buf;                \
> > +                                                                     \
> > +             if (!(count))                                           \
> > +                     break;                                          \
> > +                                                                     \
> > +             printk(KERN_DEBUG fmt, ##args);                         \
> > +             for (_j = 0; _j < count - 1; _j++)                      \
> > +                     printk(KERN_CONT "<%08x> ", _buf[_j].word);     \
> > +             printk(KERN_CONT "<%08x>\n", _buf[_j].word);            \
> > +     }                                                               \
> > +} while (0)
> > +
> > +#define __SIGNEX(X, SIDX) ((X) >= (1 << SIDX) ? ~((1 << SIDX) - 1) | (X) : (X))
> > +#define SIGNEX16(X) __SIGNEX(((unsigned long)(X)), 15)
> > +#define SIGNEX20(X) __SIGNEX(((unsigned long)(X)), 19)
> > +#define SIGNEX21(X) __SIGNEX(((unsigned long)(X)), 20)
> > +#define SIGNEX26(X) __SIGNEX(((unsigned long)(X)), 25)
> > +
> > +static inline unsigned long bs_dest_16(unsigned long now, unsigned int si)
> > +{
> > +     return now + (SIGNEX16(si) << 2);
> > +}
> > +
> > +static inline unsigned long bs_dest_21(unsigned long now, unsigned int h, unsigned int l)
> > +{
> > +     return now + (SIGNEX21(h << 16 | l) << 2);
> > +}
> > +
> > +static inline unsigned long bs_dest_26(unsigned long now, unsigned int h, unsigned int l)
> > +{
> > +     return now + (SIGNEX26(h << 16 | l) << 2);
> > +}
> > +
> > +/* Use this to add nops to a buffer, then text_poke the whole buffer. */
> > +static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
> > +{
> > +     while (count--) {
> > +             insn->word = INSN_NOP;
> > +             insn++;
> > +     }
> > +}
> > +
> > +/* Is the jump addr in local .altinstructions */
> > +static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
> > +{
> > +     return jump >= (unsigned long)start && jump < (unsigned long)end;
> > +}
> > +
> > +static void __init_or_module recompute_jump(union loongarch_instruction *buf,
> > +             union loongarch_instruction *dest, union loongarch_instruction *src,
> > +             void *start, void *end)
> > +{
> > +     unsigned int si, si_l, si_h;
> > +     unsigned long cur_pc, jump_addr, pc;
> > +     long offset;
> > +
> > +     cur_pc = (unsigned long)src;
> > +     pc = (unsigned long)dest;
> > +
> > +     si_l = src->reg0i26_format.immediate_l;
> > +     si_h = src->reg0i26_format.immediate_h;
> > +     switch (src->reg0i26_format.opcode) {
> > +     case b_op:
> > +     case bl_op:
> > +             jump_addr = bs_dest_26(cur_pc, si_h, si_l);
> > +             if (in_alt_jump(jump_addr, start, end))
> > +                     return;
> > +             offset = jump_addr - pc;
> > +             BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
> > +             offset >>= 2;
> > +             buf->reg0i26_format.immediate_h = offset >> 16;
> > +             buf->reg0i26_format.immediate_l = offset;
> > +             return;
> > +     }
> > +
> > +     si_l = src->reg1i21_format.immediate_l;
> > +     si_h = src->reg1i21_format.immediate_h;
> > +     switch (src->reg1i21_format.opcode) {
> > +     case beqz_op:
> > +     case bnez_op:
> > +     case bceqz_op:
> > +             jump_addr = bs_dest_21(cur_pc, si_h, si_l);
> > +             if (in_alt_jump(jump_addr, start, end))
> > +                     return;
> > +             offset = jump_addr - pc;
> > +             BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
> > +             offset >>= 2;
> > +             buf->reg1i21_format.immediate_h = offset >> 16;
> > +             buf->reg1i21_format.immediate_l = offset;
> > +             return;
> > +     }
> > +
> > +     si = src->reg2i16_format.immediate;
> > +     switch (src->reg2i16_format.opcode) {
> > +     case beq_op:
> > +     case bne_op:
> > +     case blt_op:
> > +     case bge_op:
> > +     case bltu_op:
> > +     case bgeu_op:
> > +             jump_addr = bs_dest_16(cur_pc, si);
> > +             if (in_alt_jump(jump_addr, start, end))
> > +                     return;
> > +             offset = jump_addr - pc;
> > +             BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
> > +             offset >>= 2;
> > +                buf->reg2i16_format.immediate = offset;
>
> code indent should use tabs where possible
OK, thanks.

>
>
> > +             return;
> > +     }
> > +}
> > +
> > +static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
> > +     union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
> > +{
> > +     int i;
> > +
> > +     for (i = 0; i < nr; i++) {
> > +             buf[i].word = src[i].word;
> > +
> > +             if (is_branch_ins(&src[i]) &&
> > +                 src[i].reg2i16_format.opcode != jirl_op) {
> > +                     recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
> > +             } else if (is_pc_ins(&src[i])) {
> > +                     pr_err("Not support pcrel instruction at present!");
> > +                     return -EINVAL;
> > +             }
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> > +/*
> > + * text_poke_early - Update instructions on a live kernel at boot time
> > + *
> > + * When you use this code to patch more than one byte of an instruction
> > + * you need to make sure that other CPUs cannot execute this code in parallel.
> > + * Also no thread must be currently preempted in the middle of these
> > + * instructions. And on the local CPU you need to be protected again NMI or MCE
> > + * handlers seeing an inconsistent instruction while you patch.
> > + */
> > +static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
> > +                           union loongarch_instruction *buf, unsigned int nr)
> > +{
> > +     int i;
> > +     unsigned long flags;
> > +
> > +     local_irq_save(flags);
> > +
> > +     for (i = 0; i < nr; i++)
> > +             insn[i].word = buf[i].word;
> > +
> > +     local_irq_restore(flags);
> > +
> > +     wbflush();
> > +     flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
>
> nr * LOONGARCH_INSN_SIZE
insn is a pointer, so (insn + nr) is just OK.

Huacai
>
>
> Thanks,
>
> Jinyang
>
>
> > +
> > +     return insn;
> > +}
> > +
> > +/*
> > + * Replace instructions with better alternatives for this CPU type. This runs
> > + * before SMP is initialized to avoid SMP problems with self modifying code.
> > + * This implies that asymmetric systems where APs have less capabilities than
> > + * the boot processor are not handled. Tough. Make sure you disable such
> > + * features by hand.
> > + */
> > +void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
> > +{
> > +     struct alt_instr *a;
> > +     unsigned int nr_instr, nr_repl, nr_insnbuf;
> > +     union loongarch_instruction *instr, *replacement;
> > +     union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
> > +
> > +     DPRINTK("alt table %px, -> %px", start, end);
> > +     /*
> > +      * The scan order should be from start to end. A later scanned
> > +      * alternative code can overwrite previously scanned alternative code.
> > +      * Some kernel functions (e.g. memcpy, memset, etc) use this order to
> > +      * patch code.
> > +      *
> > +      * So be careful if you want to change the scan order to any other
> > +      * order.
> > +      */
> > +     for (a = start; a < end; a++) {
> > +             nr_insnbuf = 0;
> > +
> > +             instr = (void *)&a->instr_offset + a->instr_offset;
> > +             replacement = (void *)&a->replace_offset + a->replace_offset;
> > +
> > +             BUG_ON(a->instrlen > sizeof(insnbuf));
> > +             BUG_ON(a->instrlen & 0x3);
> > +             BUG_ON(a->replacementlen & 0x3);
> > +
> > +             nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
> > +             nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
> > +
> > +             if (!cpu_has(a->feature)) {
> > +                     DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
> > +                             a->feature, instr, a->instrlen,
> > +                             replacement, a->replacementlen);
> > +
> > +                     continue;
> > +             }
> > +
> > +             DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
> > +                     a->feature, instr, a->instrlen,
> > +                     replacement, a->replacementlen);
> > +
> > +             DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
> > +             DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
> > +
> > +             copy_alt_insns(insnbuf, instr, replacement, nr_repl);
> > +             nr_insnbuf = nr_repl;
> > +
> > +             if (nr_instr > nr_repl) {
> > +                     add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
> > +                     nr_insnbuf += nr_instr - nr_repl;
> > +             }
> > +             DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
> > +
> > +             text_poke_early(instr, insnbuf, nr_insnbuf);
> > +     }
> > +}
> > +
> > +void __init alternative_instructions(void)
> > +{
> > +     apply_alternatives(__alt_instructions, __alt_instructions_end);
> > +
> > +     alternatives_patched = 1;
> > +}
> > diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
> > index 097595b2fc14..669e750917a3 100644
> > --- a/arch/loongarch/kernel/module.c
> > +++ b/arch/loongarch/kernel/module.c
> > @@ -17,6 +17,7 @@
> >   #include <linux/fs.h>
> >   #include <linux/string.h>
> >   #include <linux/kernel.h>
> > +#include <asm/alternative.h>
> >
> >   static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
> >   {
> > @@ -456,3 +457,18 @@ void *module_alloc(unsigned long size)
> >       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
> >                       GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
> >   }
> > +
> > +int module_finalize(const Elf_Ehdr *hdr,
> > +                 const Elf_Shdr *sechdrs,
> > +                 struct module *mod)
> > +{
> > +     const Elf_Shdr *s, *se;
> > +     const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
> > +
> > +     for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
> > +             if (!strcmp(".altinstructions", secstrs + s->sh_name))
> > +                     apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
> > +     }
> > +
> > +     return 0;
> > +}
> > diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
> > index 1eb63fa9bc81..96b6cb5db004 100644
> > --- a/arch/loongarch/kernel/setup.c
> > +++ b/arch/loongarch/kernel/setup.c
> > @@ -31,7 +31,9 @@
> >   #include <linux/swiotlb.h>
> >
> >   #include <asm/addrspace.h>
> > +#include <asm/alternative.h>
> >   #include <asm/bootinfo.h>
> > +#include <asm/bugs.h>
> >   #include <asm/cache.h>
> >   #include <asm/cpu.h>
> >   #include <asm/dma.h>
> > @@ -80,6 +82,11 @@ const char *get_system_type(void)
> >       return "generic-loongson-machine";
> >   }
> >
> > +void __init check_bugs(void)
> > +{
> > +     alternative_instructions();
> > +}
> > +
> >   static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
> >   {
> >       const u8 *bp = ((u8 *) dm) + dm->length;
> > diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
> > index efecda0c2361..733b16e8d55d 100644
> > --- a/arch/loongarch/kernel/vmlinux.lds.S
> > +++ b/arch/loongarch/kernel/vmlinux.lds.S
> > @@ -54,6 +54,18 @@ SECTIONS
> >       . = ALIGN(PECOFF_SEGMENT_ALIGN);
> >       _etext = .;
> >
> > +     /*
> > +      * struct alt_inst entries. From the header (alternative.h):
> > +      * "Alternative instructions for different CPU types or capabilities"
> > +      * Think locking instructions on spinlocks.
> > +      */
> > +     . = ALIGN(4);
> > +     .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
> > +             __alt_instructions = .;
> > +             *(.altinstructions)
> > +             __alt_instructions_end = .;
> > +     }
> > +
> >       .got : ALIGN(16) { *(.got) }
> >       .plt : ALIGN(16) { *(.plt) }
> >       .got.plt : ALIGN(16) { *(.got.plt) }
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-10-24 16:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-24  7:04 [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Huacai Chen
2022-10-24  7:04 ` [PATCH 2/2] LoongArch: Use alternative to optimize libraries Huacai Chen
2022-10-24 12:51 ` [PATCH 1/2] LoongArch: Add alternative runtime patching mechanism Jinyang He
2022-10-24 14:57   ` Huacai Chen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).