All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch
@ 2022-12-02 13:08 Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions Tiezhu Yang
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-02 13:08 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui, Masami Hiramatsu; +Cc: loongarch, linux-kernel

v7:
  -- Remove stop_machine_cpuslocked() related code

v6:
  -- Add a new patch to redefine larch_insn_patch_text() with
     stop_machine_cpuslocked()
  -- Modify kprobe_breakpoint_handler() to consider the original
     insn is break and return the correct value
  -- Modify do_bp() to refresh bcode when original insn is break

v5:
  -- Rebase on the latest code
  -- Use stop_machine_cpuslocked() to modify insn to avoid CPU race

v4:
  -- Remove kprobe_exceptions_notify() in kprobes.c
  -- Call kprobe_breakpoint_handler() and kprobe_singlestep_handler()
     in do_bp()

v3:
  -- Rebase on the latest code
  -- Check the alignment of PC in simu_branch() and simu_pc()
  -- Add ibar in flush_insn_slot()
  -- Rename kprobe_{pre,post}_handler() to {post_}kprobe_handler
  -- Add preempt_disable() and preempt_enable_no_resched()
  -- Remove r0 save/restore and do some minor changes
     in kprobes_trampoline.S
  -- Do not enable CONFIG_KPROBES by default

v2:
  -- Split simu_branch() and simu_pc() into a single patch
  -- Call kprobe_page_fault() in do_page_fault()
  -- Add kprobes_trampoline.S for kretprobe

Tiezhu Yang (4):
  LoongArch: Simulate branch and PC instructions
  LoongArch: Add kprobe support
  LoongArch: Add kretprobe support
  samples/kprobes: Add LoongArch support

 arch/loongarch/Kconfig                     |   2 +
 arch/loongarch/include/asm/inst.h          |  27 +++
 arch/loongarch/include/asm/kprobes.h       |  59 +++++
 arch/loongarch/include/asm/ptrace.h        |   1 +
 arch/loongarch/kernel/Makefile             |   2 +
 arch/loongarch/kernel/inst.c               | 123 +++++++++++
 arch/loongarch/kernel/kprobes.c            | 335 +++++++++++++++++++++++++++++
 arch/loongarch/kernel/kprobes_trampoline.S |  96 +++++++++
 arch/loongarch/kernel/traps.c              |  13 +-
 arch/loongarch/mm/fault.c                  |   4 +
 samples/kprobes/kprobe_example.c           |   8 +
 11 files changed, 666 insertions(+), 4 deletions(-)
 create mode 100644 arch/loongarch/include/asm/kprobes.h
 create mode 100644 arch/loongarch/kernel/kprobes.c
 create mode 100644 arch/loongarch/kernel/kprobes_trampoline.S

-- 
2.1.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions
  2022-12-02 13:08 [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch Tiezhu Yang
@ 2022-12-02 13:08 ` Tiezhu Yang
  2022-12-07  3:06   ` Huacai Chen
  2022-12-02 13:08 ` [PATCH v7 2/4] LoongArch: Add kprobe support Tiezhu Yang
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-02 13:08 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui, Masami Hiramatsu; +Cc: loongarch, linux-kernel

According to LoongArch Reference Manual, simulate branch and
PC instructions, this is preparation for later patch.

Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#branch-instructions
Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_pcaddi_pcaddu121_pcaddu18l_pcalau12i

Co-developed-by: Jinyang He <hejinyang@loongson.cn>
Signed-off-by: Jinyang He <hejinyang@loongson.cn>
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 arch/loongarch/include/asm/inst.h   |  12 ++++
 arch/loongarch/include/asm/ptrace.h |   1 +
 arch/loongarch/kernel/inst.c        | 123 ++++++++++++++++++++++++++++++++++++
 3 files changed, 136 insertions(+)

diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 6cd994d..a91798b 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -7,6 +7,7 @@
 
 #include <linux/types.h>
 #include <asm/asm.h>
+#include <asm/ptrace.h>
 
 #define INSN_NOP		0x03400000
 #define INSN_BREAK		0x002a0000
@@ -32,6 +33,7 @@ enum reg1i20_op {
 	lu12iw_op	= 0x0a,
 	lu32id_op	= 0x0b,
 	pcaddi_op	= 0x0c,
+	pcalau12i_op	= 0x0d,
 	pcaddu12i_op	= 0x0e,
 	pcaddu18i_op	= 0x0f,
 };
@@ -366,6 +368,16 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
 u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
 u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
 u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn);
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn);
+
+static inline unsigned long sign_extended(unsigned long val, unsigned int idx)
+{
+	if (val & (1UL << idx))
+		return ~((1UL << (idx + 1)) - 1) | val;
+	else
+		return ((1UL << (idx + 1)) - 1) & val;
+}
 
 static inline bool signed_imm_check(long val, unsigned int bit)
 {
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index 59c4608..58596c4 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -6,6 +6,7 @@
 #define _ASM_PTRACE_H
 
 #include <asm/page.h>
+#include <asm/irqflags.h>
 #include <asm/thread_info.h>
 #include <uapi/asm/ptrace.h>
 
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 512579d..aaaf9de 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -165,3 +165,126 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l
 
 	return insn.word;
 }
+
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
+{
+	unsigned int imm, imm_l, imm_h, rd, rj;
+	unsigned long pc = regs->csr_era;
+
+	if (pc & 3) {
+		pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+		return;
+	}
+
+	imm_l = insn.reg0i26_format.immediate_l;
+	imm_h = insn.reg0i26_format.immediate_h;
+	switch (insn.reg0i26_format.opcode) {
+	case b_op:
+		regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 27);
+		return;
+	case bl_op:
+		regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 27);
+		regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+		return;
+	}
+
+	imm_l = insn.reg1i21_format.immediate_l;
+	imm_h = insn.reg1i21_format.immediate_h;
+	rj = insn.reg1i21_format.rj;
+	switch (insn.reg1i21_format.opcode) {
+	case beqz_op:
+		if (regs->regs[rj] == 0)
+			regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 22);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		return;
+	case bnez_op:
+		if (regs->regs[rj] != 0)
+			regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 22);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		return;
+	}
+
+	imm = insn.reg2i16_format.immediate;
+	rj = insn.reg2i16_format.rj;
+	rd = insn.reg2i16_format.rd;
+	switch (insn.reg2i16_format.opcode) {
+	case beq_op:
+		if (regs->regs[rj] == regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case bne_op:
+		if (regs->regs[rj] != regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case blt_op:
+		if ((long)regs->regs[rj] < (long)regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case bge_op:
+		if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case bltu_op:
+		if (regs->regs[rj] < regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case bgeu_op:
+		if (regs->regs[rj] >= regs->regs[rd])
+			regs->csr_era = pc + sign_extended(imm << 2, 17);
+		else
+			regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+		break;
+	case jirl_op:
+		regs->csr_era = regs->regs[rj] + sign_extended(imm << 2, 17);
+		regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+		break;
+	default:
+		pr_info("%s: unknown opcode\n", __func__);
+		return;
+	}
+}
+
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
+{
+	unsigned long pc = regs->csr_era;
+	unsigned int rd = insn.reg1i20_format.rd;
+	unsigned int imm = insn.reg1i20_format.immediate;
+
+	if (pc & 3) {
+		pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+		return;
+	}
+
+	switch (insn.reg1i20_format.opcode) {
+	case pcaddi_op:
+		regs->regs[rd] = pc + sign_extended(imm << 2, 21);
+		break;
+	case pcaddu12i_op:
+		regs->regs[rd] = pc + sign_extended(imm << 12, 31);
+		break;
+	case pcaddu18i_op:
+		regs->regs[rd] = pc + sign_extended(imm << 18, 37);
+		break;
+	case pcalau12i_op:
+		regs->regs[rd] = pc + sign_extended(imm << 12, 31);
+		regs->regs[rd] &= ~((1 << 12) - 1);
+		break;
+	default:
+		pr_info("%s: unknown opcode\n", __func__);
+		return;
+	}
+
+	regs->csr_era += LOONGARCH_INSN_SIZE;
+}
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v7 2/4] LoongArch: Add kprobe support
  2022-12-02 13:08 [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions Tiezhu Yang
@ 2022-12-02 13:08 ` Tiezhu Yang
  2022-12-07  3:08   ` Huacai Chen
  2022-12-02 13:08 ` [PATCH v7 3/4] LoongArch: Add kretprobe support Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 4/4] samples/kprobes: Add LoongArch support Tiezhu Yang
  3 siblings, 1 reply; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-02 13:08 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui, Masami Hiramatsu; +Cc: loongarch, linux-kernel

Kprobes allows you to trap at almost any kernel address and
execute a callback function, this commit adds kprobe support
for LoongArch.

Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 arch/loongarch/Kconfig               |   1 +
 arch/loongarch/include/asm/inst.h    |  15 ++
 arch/loongarch/include/asm/kprobes.h |  59 +++++++
 arch/loongarch/kernel/Makefile       |   2 +
 arch/loongarch/kernel/kprobes.c      | 311 +++++++++++++++++++++++++++++++++++
 arch/loongarch/kernel/traps.c        |  13 +-
 arch/loongarch/mm/fault.c            |   4 +
 7 files changed, 401 insertions(+), 4 deletions(-)
 create mode 100644 arch/loongarch/include/asm/kprobes.h
 create mode 100644 arch/loongarch/kernel/kprobes.c

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 16bf1b6..f6fc156 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -102,6 +102,7 @@ config LOONGARCH
 	select HAVE_IOREMAP_PROT
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
 	select HAVE_IRQ_TIME_ACCOUNTING
+	select HAVE_KPROBES
 	select HAVE_MOD_ARCH_SPECIFIC
 	select HAVE_NMI
 	select HAVE_PCI
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index a91798b..42984d5 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -24,6 +24,10 @@
 
 #define ADDR_IMM(addr, INSN)	((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
 
+enum reg0i15_op {
+	break_op	= 0x54,
+};
+
 enum reg0i26_op {
 	b_op		= 0x14,
 	bl_op		= 0x15,
@@ -180,6 +184,11 @@ enum reg3sa2_op {
 	alsld_op	= 0x16,
 };
 
+struct reg0i15_format {
+	unsigned int immediate : 15;
+	unsigned int opcode : 17;
+};
+
 struct reg0i26_format {
 	unsigned int immediate_h : 10;
 	unsigned int immediate_l : 16;
@@ -265,6 +274,7 @@ struct reg3sa2_format {
 
 union loongarch_instruction {
 	unsigned int word;
+	struct reg0i15_format	reg0i15_format;
 	struct reg0i26_format	reg0i26_format;
 	struct reg1i20_format	reg1i20_format;
 	struct reg1i21_format	reg1i21_format;
@@ -335,6 +345,11 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
 		ip->reg1i21_format.opcode <= bgeu_op;
 }
 
+static inline bool is_break_ins(union loongarch_instruction *ip)
+{
+	return ip->reg0i15_format.opcode == break_op;
+}
+
 static inline bool is_ra_save_ins(union loongarch_instruction *ip)
 {
 	/* st.d $ra, $sp, offset */
diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h
new file mode 100644
index 0000000..d3903f3
--- /dev/null
+++ b/arch/loongarch/include/asm/kprobes.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_LOONGARCH_KPROBES_H
+#define __ASM_LOONGARCH_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_KPROBES
+
+#include <asm/inst.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE			2
+
+#define flush_insn_slot(p)						\
+do {									\
+	if (p->addr)							\
+		flush_icache_range((unsigned long)p->addr,		\
+			   (unsigned long)p->addr +			\
+			   (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));	\
+} while (0)
+
+#define kretprobe_blacklist_size	0
+
+typedef union loongarch_instruction kprobe_opcode_t;
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of the original instruction */
+	kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_irq;
+	unsigned long saved_era;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_irq;
+	unsigned long kprobe_saved_era;
+	struct prev_kprobe prev_kprobe;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+bool kprobe_breakpoint_handler(struct pt_regs *regs);
+bool kprobe_singlestep_handler(struct pt_regs *regs);
+
+#else /* !CONFIG_KPROBES */
+
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return 0; }
+static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return 0; }
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_LOONGARCH_KPROBES_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index fcaa024..6fe4a4e 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
 
 obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_regs.o
 
+obj-$(CONFIG_KPROBES)		+= kprobes.o
+
 CPPFLAGS_vmlinux.lds		:= $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
new file mode 100644
index 0000000..820a633
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/preempt.h>
+#include <asm/break.h>
+
+static const union loongarch_instruction breakpoint_insn = {
+	.reg0i15_format = {
+		.opcode = break_op,
+		.immediate = BRK_KPROBE_BP,
+	}
+};
+
+static const union loongarch_instruction singlestep_insn = {
+	.reg0i15_format = {
+		.opcode = break_op,
+		.immediate = BRK_KPROBE_SSTEPBP,
+	}
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static bool insns_are_not_supported(union loongarch_instruction insn)
+{
+	switch (insn.reg2i14_format.opcode) {
+	case llw_op:
+	case lld_op:
+	case scw_op:
+	case scd_op:
+		pr_notice("kprobe: ll and sc instructions are not supported\n");
+		return true;
+	}
+
+	switch (insn.reg1i21_format.opcode) {
+	case bceqz_op:
+		pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
+		return true;
+	}
+
+	return false;
+}
+NOKPROBE_SYMBOL(insns_are_not_supported);
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+	union loongarch_instruction insn;
+
+	insn = p->addr[0];
+	if (insns_are_not_supported(insn))
+		return -EINVAL;
+
+	p->ainsn.insn = get_insn_slot();
+	if (!p->ainsn.insn)
+		return -ENOMEM;
+
+	p->ainsn.insn[0] = *p->addr;
+	p->ainsn.insn[1] = singlestep_insn;
+
+	p->opcode = *p->addr;
+
+	return 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
+
+/* Install breakpoint in text */
+void arch_arm_kprobe(struct kprobe *p)
+{
+	*p->addr = breakpoint_insn;
+	flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+/* Remove breakpoint from text */
+void arch_disarm_kprobe(struct kprobe *p)
+{
+	*p->addr = p->opcode;
+	flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+	if (p->ainsn.insn) {
+		free_insn_slot(p->ainsn.insn, 0);
+		p->ainsn.insn = NULL;
+	}
+}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
+	kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
+}
+NOKPROBE_SYMBOL(save_previous_kprobe);
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
+	kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
+}
+NOKPROBE_SYMBOL(restore_previous_kprobe);
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+			       struct kprobe_ctlblk *kcb)
+{
+	__this_cpu_write(current_kprobe, p);
+	kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
+	kcb->kprobe_saved_era = regs->csr_era;
+}
+NOKPROBE_SYMBOL(set_current_kprobe);
+
+static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs *regs)
+{
+	if (is_branch_ins(&p->opcode)) {
+		simu_branch(regs, p->opcode);
+		return false;
+	} else if (is_pc_ins(&p->opcode)) {
+		simu_pc(regs, p->opcode);
+		return false;
+	} else {
+		return true;
+	}
+}
+NOKPROBE_SYMBOL(insns_are_not_simulated);
+
+static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+			     struct kprobe_ctlblk *kcb, int reenter)
+{
+	if (reenter) {
+		save_previous_kprobe(kcb);
+		set_current_kprobe(p, regs, kcb);
+		kcb->kprobe_status = KPROBE_REENTER;
+	} else {
+		kcb->kprobe_status = KPROBE_HIT_SS;
+	}
+
+	if (p->ainsn.insn->word == breakpoint_insn.word) {
+		regs->csr_prmd &= ~CSR_PRMD_PIE;
+		regs->csr_prmd |= kcb->kprobe_saved_irq;
+		preempt_enable_no_resched();
+		return;
+	}
+
+	regs->csr_prmd &= ~CSR_PRMD_PIE;
+
+	if (insns_are_not_simulated(p, regs)) {
+		kcb->kprobe_status = KPROBE_HIT_SS;
+		regs->csr_era = (unsigned long)&p->ainsn.insn[0];
+	} else {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		if (p->post_handler)
+			p->post_handler(p, regs, 0);
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+}
+NOKPROBE_SYMBOL(setup_singlestep);
+
+static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
+			  struct kprobe_ctlblk *kcb)
+{
+	switch (kcb->kprobe_status) {
+	case KPROBE_HIT_SSDONE:
+	case KPROBE_HIT_ACTIVE:
+		kprobes_inc_nmissed_count(p);
+		setup_singlestep(p, regs, kcb, 1);
+		break;
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+		pr_warn("Failed to recover from reentered kprobes.\n");
+		dump_kprobe(p);
+		BUG();
+		break;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+
+	return true;
+}
+NOKPROBE_SYMBOL(reenter_kprobe);
+
+bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb;
+	struct kprobe *p, *cur_kprobe;
+	kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
+
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+	cur_kprobe = kprobe_running();
+
+	p = get_kprobe(addr);
+	if (p) {
+		if (cur_kprobe) {
+			if (reenter_kprobe(p, regs, kcb))
+				return true;
+		} else {
+			/* Probe hit */
+			set_current_kprobe(p, regs, kcb);
+			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+			/*
+			 * If we have no pre-handler or it returned 0, we
+			 * continue with normal processing.  If we have a
+			 * pre-handler and it returned non-zero, it will
+			 * modify the execution path and no need to single
+			 * stepping. Let's just reset current kprobe and exit.
+			 *
+			 * pre_handler can hit a breakpoint and can step thru
+			 * before return.
+			 */
+			if (!p->pre_handler || !p->pre_handler(p, regs)) {
+				setup_singlestep(p, regs, kcb, 0);
+				return true;
+			} else {
+				reset_current_kprobe();
+				preempt_enable_no_resched();
+				return true;
+			}
+		}
+	}
+
+	if (addr->word != breakpoint_insn.word) {
+		/* Handle the original "break" instruction. */
+		if (is_break_ins(addr))
+			goto out;
+
+		/*
+		 * The breakpoint instruction was removed right
+		 * after we hit it.  Another cpu has removed
+		 * either a probepoint or a debugger breakpoint
+		 * at this address.  In either case, no further
+		 * handling of this interrupt is appropriate.
+		 * Return back to original instruction, and continue.
+		 */
+		preempt_enable_no_resched();
+		return true;
+	}
+
+out:
+	preempt_enable_no_resched();
+	return false;
+}
+NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
+
+bool kprobe_singlestep_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return false;
+
+	/* Restore back the original saved kprobes variables and continue */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+
+	/* Call post handler */
+	kcb->kprobe_status = KPROBE_HIT_SSDONE;
+	if (cur->post_handler)
+		cur->post_handler(cur, regs, 0);
+
+	regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
+	regs->csr_prmd |= kcb->kprobe_saved_irq;
+
+	reset_current_kprobe();
+out:
+	preempt_enable_no_resched();
+	return true;
+}
+NOKPROBE_SYMBOL(kprobe_singlestep_handler);
+
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (kcb->kprobe_status & KPROBE_HIT_SS) {
+		regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
+		regs->csr_prmd |= kcb->kprobe_saved_irq;
+
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+
+	return false;
+}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+	return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+					 (unsigned long)__irqentry_text_end);
+}
+
+int __init arch_init_kprobes(void)
+{
+	return 0;
+}
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index a19bb32..4d9f775 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
 	 */
 	switch (bcode) {
 	case BRK_KPROBE_BP:
-		if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
-			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+		if (kprobe_breakpoint_handler(regs))
 			goto out;
 		else
 			break;
 	case BRK_KPROBE_SSTEPBP:
-		if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
-			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+		if (kprobe_singlestep_handler(regs))
 			goto out;
 		else
 			break;
@@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
 			break;
 	}
 
+	if (bcode == BRK_KPROBE_BP) {
+		if (__get_inst(&opcode, (u32 *)era, user))
+			goto out_sigsegv;
+
+		bcode = (opcode & 0x7fff);
+	}
+
 	switch (bcode) {
 	case BRK_BUG:
 		bug_handler(regs);
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 1ccd536..fc9225a 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -253,12 +253,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 {
 	irqentry_state_t state = irqentry_enter(regs);
 
+	if (kprobe_page_fault(regs, current->thread.trap_nr))
+		goto out;
+
 	/* Enable interrupt if enabled in parent context */
 	if (likely(regs->csr_prmd & CSR_PRMD_PIE))
 		local_irq_enable();
 
 	__do_page_fault(regs, write, address);
 
+out:
 	local_irq_disable();
 
 	irqentry_exit(regs, state);
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v7 3/4] LoongArch: Add kretprobe support
  2022-12-02 13:08 [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 2/4] LoongArch: Add kprobe support Tiezhu Yang
@ 2022-12-02 13:08 ` Tiezhu Yang
  2022-12-02 13:08 ` [PATCH v7 4/4] samples/kprobes: Add LoongArch support Tiezhu Yang
  3 siblings, 0 replies; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-02 13:08 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui, Masami Hiramatsu; +Cc: loongarch, linux-kernel

Use the generic kretprobe trampoline handler to add kretprobe
support for LoongArch.

Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 arch/loongarch/Kconfig                     |  1 +
 arch/loongarch/kernel/Makefile             |  2 +-
 arch/loongarch/kernel/kprobes.c            | 24 ++++++++
 arch/loongarch/kernel/kprobes_trampoline.S | 96 ++++++++++++++++++++++++++++++
 4 files changed, 122 insertions(+), 1 deletion(-)
 create mode 100644 arch/loongarch/kernel/kprobes_trampoline.S

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index f6fc156..12571ee 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -103,6 +103,7 @@ config LOONGARCH
 	select HAVE_IRQ_EXIT_ON_IRQ_STACK
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_KPROBES
+	select HAVE_KRETPROBES
 	select HAVE_MOD_ARCH_SPECIFIC
 	select HAVE_NMI
 	select HAVE_PCI
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 6fe4a4e..7ca6519 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -47,6 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
 
 obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o perf_regs.o
 
-obj-$(CONFIG_KPROBES)		+= kprobes.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o kprobes_trampoline.o
 
 CPPFLAGS_vmlinux.lds		:= $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
index 820a633..8abd8b9 100644
--- a/arch/loongarch/kernel/kprobes.c
+++ b/arch/loongarch/kernel/kprobes.c
@@ -305,6 +305,30 @@ int __init arch_populate_kprobe_blacklist(void)
 					 (unsigned long)__irqentry_text_end);
 }
 
+/* Called from __kretprobe_trampoline */
+void __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+	return (void *)kretprobe_trampoline_handler(regs, NULL);
+}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
+
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+			    struct pt_regs *regs)
+{
+	ri->ret_addr = (kprobe_opcode_t *)regs->regs[1];
+	ri->fp = NULL;
+
+	/* Replace the return addr with trampoline addr */
+	regs->regs[1] = (unsigned long)&__kretprobe_trampoline;
+}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
+
+int arch_trampoline_kprobe(struct kprobe *p)
+{
+	return 0;
+}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+
 int __init arch_init_kprobes(void)
 {
 	return 0;
diff --git a/arch/loongarch/kernel/kprobes_trampoline.S b/arch/loongarch/kernel/kprobes_trampoline.S
new file mode 100644
index 0000000..af94b0d
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes_trampoline.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/linkage.h>
+#include <asm/stackframe.h>
+
+	.text
+
+	.macro save_all_base_regs
+	cfi_st  ra, PT_R1
+	cfi_st	tp, PT_R2
+	cfi_st	a0, PT_R4
+	cfi_st	a1, PT_R5
+	cfi_st	a2, PT_R6
+	cfi_st	a3, PT_R7
+	cfi_st	a4, PT_R8
+	cfi_st	a5, PT_R9
+	cfi_st	a6, PT_R10
+	cfi_st	a7, PT_R11
+	cfi_st	t0, PT_R12
+	cfi_st	t1, PT_R13
+	cfi_st	t2, PT_R14
+	cfi_st	t3, PT_R15
+	cfi_st	t4, PT_R16
+	cfi_st	t5, PT_R17
+	cfi_st	t6, PT_R18
+	cfi_st	t7, PT_R19
+	cfi_st	t8, PT_R20
+	cfi_st	u0, PT_R21
+	cfi_st	fp, PT_R22
+	cfi_st	s0, PT_R23
+	cfi_st	s1, PT_R24
+	cfi_st	s2, PT_R25
+	cfi_st	s3, PT_R26
+	cfi_st	s4, PT_R27
+	cfi_st	s5, PT_R28
+	cfi_st	s6, PT_R29
+	cfi_st	s7, PT_R30
+	cfi_st	s8, PT_R31
+	csrrd	t0, LOONGARCH_CSR_CRMD
+	andi	t0, t0, 0x7 /* extract bit[1:0] PLV, bit[2] IE */
+	LONG_S	t0, sp, PT_CRMD
+	.endm
+
+	.macro restore_all_base_regs
+	cfi_ld	tp, PT_R2
+	cfi_ld	a0, PT_R4
+	cfi_ld	a1, PT_R5
+	cfi_ld	a2, PT_R6
+	cfi_ld	a3, PT_R7
+	cfi_ld	a4, PT_R8
+	cfi_ld	a5, PT_R9
+	cfi_ld	a6, PT_R10
+	cfi_ld	a7, PT_R11
+	cfi_ld	t0, PT_R12
+	cfi_ld	t1, PT_R13
+	cfi_ld	t2, PT_R14
+	cfi_ld	t3, PT_R15
+	cfi_ld	t4, PT_R16
+	cfi_ld	t5, PT_R17
+	cfi_ld	t6, PT_R18
+	cfi_ld	t7, PT_R19
+	cfi_ld	t8, PT_R20
+	cfi_ld	u0, PT_R21
+	cfi_ld	fp, PT_R22
+	cfi_ld	s0, PT_R23
+	cfi_ld	s1, PT_R24
+	cfi_ld	s2, PT_R25
+	cfi_ld	s3, PT_R26
+	cfi_ld	s4, PT_R27
+	cfi_ld	s5, PT_R28
+	cfi_ld	s6, PT_R29
+	cfi_ld	s7, PT_R30
+	cfi_ld	s8, PT_R31
+	LONG_L  t0, sp, PT_CRMD
+	li.d	t1, 0x7 /* mask bit[1:0] PLV, bit[2] IE */
+	csrxchg t0, t1, LOONGARCH_CSR_CRMD
+	.endm
+
+SYM_CODE_START(__kretprobe_trampoline)
+	addi.d	sp, sp, -PT_SIZE
+	save_all_base_regs
+
+	addi.d	t0, sp, PT_SIZE
+	LONG_S	t0, sp, PT_R3
+
+	move a0, sp /* pt_regs */
+
+	bl trampoline_probe_handler
+
+	/* use the result as the return-address */
+	move ra, a0
+
+	restore_all_base_regs
+	addi.d	sp, sp, PT_SIZE
+
+	jr ra
+SYM_CODE_END(__kretprobe_trampoline)
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v7 4/4] samples/kprobes: Add LoongArch support
  2022-12-02 13:08 [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch Tiezhu Yang
                   ` (2 preceding siblings ...)
  2022-12-02 13:08 ` [PATCH v7 3/4] LoongArch: Add kretprobe support Tiezhu Yang
@ 2022-12-02 13:08 ` Tiezhu Yang
  3 siblings, 0 replies; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-02 13:08 UTC (permalink / raw)
  To: Huacai Chen, WANG Xuerui, Masami Hiramatsu; +Cc: loongarch, linux-kernel

Add LoongArch specific info in handler_pre() and handler_post().

Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 samples/kprobes/kprobe_example.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index fd346f5..ef44c61 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -55,6 +55,10 @@ static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs)
 	pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
 		p->symbol_name, p->addr, regs->psw.addr, regs->flags);
 #endif
+#ifdef CONFIG_LOONGARCH
+	pr_info("<%s> p->addr = 0x%p, era = 0x%lx, estat = 0x%lx\n",
+		p->symbol_name, p->addr, regs->csr_era, regs->csr_estat);
+#endif
 
 	/* A dump_stack() here will give a stack backtrace */
 	return 0;
@@ -92,6 +96,10 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
 	pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n",
 		p->symbol_name, p->addr, regs->flags);
 #endif
+#ifdef CONFIG_LOONGARCH
+	pr_info("<%s> p->addr = 0x%p, estat = 0x%lx\n",
+		p->symbol_name, p->addr, regs->csr_estat);
+#endif
 }
 
 static int __init kprobe_init(void)
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions
  2022-12-02 13:08 ` [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions Tiezhu Yang
@ 2022-12-07  3:06   ` Huacai Chen
  2022-12-07  9:13     ` Tiezhu Yang
  0 siblings, 1 reply; 9+ messages in thread
From: Huacai Chen @ 2022-12-07  3:06 UTC (permalink / raw)
  To: Tiezhu Yang; +Cc: WANG Xuerui, Masami Hiramatsu, loongarch, linux-kernel

Hi, Tiezhu,

On Fri, Dec 2, 2022 at 9:08 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>
> According to LoongArch Reference Manual, simulate branch and
> PC instructions, this is preparation for later patch.
>
> Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#branch-instructions
> Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_pcaddi_pcaddu121_pcaddu18l_pcalau12i
>
> Co-developed-by: Jinyang He <hejinyang@loongson.cn>
> Signed-off-by: Jinyang He <hejinyang@loongson.cn>
> Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
> ---
>  arch/loongarch/include/asm/inst.h   |  12 ++++
>  arch/loongarch/include/asm/ptrace.h |   1 +
>  arch/loongarch/kernel/inst.c        | 123 ++++++++++++++++++++++++++++++++++++
>  3 files changed, 136 insertions(+)
>
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index 6cd994d..a91798b 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -7,6 +7,7 @@
>
>  #include <linux/types.h>
>  #include <asm/asm.h>
> +#include <asm/ptrace.h>
>
>  #define INSN_NOP               0x03400000
>  #define INSN_BREAK             0x002a0000
> @@ -32,6 +33,7 @@ enum reg1i20_op {
>         lu12iw_op       = 0x0a,
>         lu32id_op       = 0x0b,
>         pcaddi_op       = 0x0c,
> +       pcalau12i_op    = 0x0d,
>         pcaddu12i_op    = 0x0e,
>         pcaddu18i_op    = 0x0f,
>  };
> @@ -366,6 +368,16 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
>  u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
>  u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
>  u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
> +void simu_branch(struct pt_regs *regs, union loongarch_instruction insn);
> +void simu_pc(struct pt_regs *regs, union loongarch_instruction insn);
> +
> +static inline unsigned long sign_extended(unsigned long val, unsigned int idx)
> +{
> +       if (val & (1UL << idx))
> +               return ~((1UL << (idx + 1)) - 1) | val;
> +       else
> +               return ((1UL << (idx + 1)) - 1) & val;
> +}
You can use existing __SIGNEX and its friends rather than reinvent them.

Huacai

>
>  static inline bool signed_imm_check(long val, unsigned int bit)
>  {
> diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
> index 59c4608..58596c4 100644
> --- a/arch/loongarch/include/asm/ptrace.h
> +++ b/arch/loongarch/include/asm/ptrace.h
> @@ -6,6 +6,7 @@
>  #define _ASM_PTRACE_H
>
>  #include <asm/page.h>
> +#include <asm/irqflags.h>
>  #include <asm/thread_info.h>
>  #include <uapi/asm/ptrace.h>
>
> diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
> index 512579d..aaaf9de 100644
> --- a/arch/loongarch/kernel/inst.c
> +++ b/arch/loongarch/kernel/inst.c
> @@ -165,3 +165,126 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l
>
>         return insn.word;
>  }
> +
> +void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
> +{
> +       unsigned int imm, imm_l, imm_h, rd, rj;
> +       unsigned long pc = regs->csr_era;
> +
> +       if (pc & 3) {
> +               pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
> +               return;
> +       }
> +
> +       imm_l = insn.reg0i26_format.immediate_l;
> +       imm_h = insn.reg0i26_format.immediate_h;
> +       switch (insn.reg0i26_format.opcode) {
> +       case b_op:
> +               regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 27);
> +               return;
> +       case bl_op:
> +               regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 27);
> +               regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
> +               return;
> +       }
> +
> +       imm_l = insn.reg1i21_format.immediate_l;
> +       imm_h = insn.reg1i21_format.immediate_h;
> +       rj = insn.reg1i21_format.rj;
> +       switch (insn.reg1i21_format.opcode) {
> +       case beqz_op:
> +               if (regs->regs[rj] == 0)
> +                       regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 22);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               return;
> +       case bnez_op:
> +               if (regs->regs[rj] != 0)
> +                       regs->csr_era = pc + sign_extended((imm_h << 16 | imm_l) << 2, 22);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               return;
> +       }
> +
> +       imm = insn.reg2i16_format.immediate;
> +       rj = insn.reg2i16_format.rj;
> +       rd = insn.reg2i16_format.rd;
> +       switch (insn.reg2i16_format.opcode) {
> +       case beq_op:
> +               if (regs->regs[rj] == regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case bne_op:
> +               if (regs->regs[rj] != regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case blt_op:
> +               if ((long)regs->regs[rj] < (long)regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case bge_op:
> +               if ((long)regs->regs[rj] >= (long)regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case bltu_op:
> +               if (regs->regs[rj] < regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case bgeu_op:
> +               if (regs->regs[rj] >= regs->regs[rd])
> +                       regs->csr_era = pc + sign_extended(imm << 2, 17);
> +               else
> +                       regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       case jirl_op:
> +               regs->csr_era = regs->regs[rj] + sign_extended(imm << 2, 17);
> +               regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
> +               break;
> +       default:
> +               pr_info("%s: unknown opcode\n", __func__);
> +               return;
> +       }
> +}
> +
> +void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
> +{
> +       unsigned long pc = regs->csr_era;
> +       unsigned int rd = insn.reg1i20_format.rd;
> +       unsigned int imm = insn.reg1i20_format.immediate;
> +
> +       if (pc & 3) {
> +               pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
> +               return;
> +       }
> +
> +       switch (insn.reg1i20_format.opcode) {
> +       case pcaddi_op:
> +               regs->regs[rd] = pc + sign_extended(imm << 2, 21);
> +               break;
> +       case pcaddu12i_op:
> +               regs->regs[rd] = pc + sign_extended(imm << 12, 31);
> +               break;
> +       case pcaddu18i_op:
> +               regs->regs[rd] = pc + sign_extended(imm << 18, 37);
> +               break;
> +       case pcalau12i_op:
> +               regs->regs[rd] = pc + sign_extended(imm << 12, 31);
> +               regs->regs[rd] &= ~((1 << 12) - 1);
> +               break;
> +       default:
> +               pr_info("%s: unknown opcode\n", __func__);
> +               return;
> +       }
> +
> +       regs->csr_era += LOONGARCH_INSN_SIZE;
> +}
> --
> 2.1.0
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 2/4] LoongArch: Add kprobe support
  2022-12-02 13:08 ` [PATCH v7 2/4] LoongArch: Add kprobe support Tiezhu Yang
@ 2022-12-07  3:08   ` Huacai Chen
  2022-12-07  9:17     ` Tiezhu Yang
  0 siblings, 1 reply; 9+ messages in thread
From: Huacai Chen @ 2022-12-07  3:08 UTC (permalink / raw)
  To: Tiezhu Yang; +Cc: WANG Xuerui, Masami Hiramatsu, loongarch, linux-kernel

Hi, Tiezhu,

On Fri, Dec 2, 2022 at 9:08 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>
> Kprobes allows you to trap at almost any kernel address and
> execute a callback function, this commit adds kprobe support
> for LoongArch.
>
> Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
> ---
>  arch/loongarch/Kconfig               |   1 +
>  arch/loongarch/include/asm/inst.h    |  15 ++
>  arch/loongarch/include/asm/kprobes.h |  59 +++++++
>  arch/loongarch/kernel/Makefile       |   2 +
>  arch/loongarch/kernel/kprobes.c      | 311 +++++++++++++++++++++++++++++++++++
>  arch/loongarch/kernel/traps.c        |  13 +-
>  arch/loongarch/mm/fault.c            |   4 +
>  7 files changed, 401 insertions(+), 4 deletions(-)
>  create mode 100644 arch/loongarch/include/asm/kprobes.h
>  create mode 100644 arch/loongarch/kernel/kprobes.c
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 16bf1b6..f6fc156 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -102,6 +102,7 @@ config LOONGARCH
>         select HAVE_IOREMAP_PROT
>         select HAVE_IRQ_EXIT_ON_IRQ_STACK
>         select HAVE_IRQ_TIME_ACCOUNTING
> +       select HAVE_KPROBES
>         select HAVE_MOD_ARCH_SPECIFIC
>         select HAVE_NMI
>         select HAVE_PCI
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index a91798b..42984d5 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -24,6 +24,10 @@
>
>  #define ADDR_IMM(addr, INSN)   ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
>
> +enum reg0i15_op {
> +       break_op        = 0x54,
> +};
> +
>  enum reg0i26_op {
>         b_op            = 0x14,
>         bl_op           = 0x15,
> @@ -180,6 +184,11 @@ enum reg3sa2_op {
>         alsld_op        = 0x16,
>  };
>
> +struct reg0i15_format {
> +       unsigned int immediate : 15;
> +       unsigned int opcode : 17;
> +};
> +
>  struct reg0i26_format {
>         unsigned int immediate_h : 10;
>         unsigned int immediate_l : 16;
> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>
>  union loongarch_instruction {
>         unsigned int word;
> +       struct reg0i15_format   reg0i15_format;
>         struct reg0i26_format   reg0i26_format;
>         struct reg1i20_format   reg1i20_format;
>         struct reg1i21_format   reg1i21_format;
> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
>                 ip->reg1i21_format.opcode <= bgeu_op;
>  }
>
> +static inline bool is_break_ins(union loongarch_instruction *ip)
> +{
> +       return ip->reg0i15_format.opcode == break_op;
> +}
> +
>  static inline bool is_ra_save_ins(union loongarch_instruction *ip)
>  {
>         /* st.d $ra, $sp, offset */
> diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h
> new file mode 100644
> index 0000000..d3903f3
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kprobes.h
> @@ -0,0 +1,59 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +#ifndef __ASM_LOONGARCH_KPROBES_H
> +#define __ASM_LOONGARCH_KPROBES_H
> +
> +#include <asm-generic/kprobes.h>
> +#include <asm/cacheflush.h>
> +
> +#ifdef CONFIG_KPROBES
> +
> +#include <asm/inst.h>
> +
> +#define __ARCH_WANT_KPROBES_INSN_SLOT
> +#define MAX_INSN_SIZE                  2
> +
> +#define flush_insn_slot(p)                                             \
> +do {                                                                   \
> +       if (p->addr)                                                    \
> +               flush_icache_range((unsigned long)p->addr,              \
> +                          (unsigned long)p->addr +                     \
> +                          (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));  \
> +} while (0)
> +
> +#define kretprobe_blacklist_size       0
> +
> +typedef union loongarch_instruction kprobe_opcode_t;
> +
> +/* Architecture specific copy of original instruction */
> +struct arch_specific_insn {
> +       /* copy of the original instruction */
> +       kprobe_opcode_t *insn;
> +};
> +
> +struct prev_kprobe {
> +       struct kprobe *kp;
> +       unsigned long status;
> +       unsigned long saved_irq;
> +       unsigned long saved_era;
> +};
> +
> +/* per-cpu kprobe control block */
> +struct kprobe_ctlblk {
> +       unsigned long kprobe_status;
> +       unsigned long kprobe_saved_irq;
> +       unsigned long kprobe_saved_era;
> +       struct prev_kprobe prev_kprobe;
> +};
> +
> +void arch_remove_kprobe(struct kprobe *p);
> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
> +bool kprobe_singlestep_handler(struct pt_regs *regs);
> +
> +#else /* !CONFIG_KPROBES */
> +
> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return 0; }
> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return 0; }
> +
> +#endif /* CONFIG_KPROBES */
> +#endif /* __ASM_LOONGARCH_KPROBES_H */
> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> index fcaa024..6fe4a4e 100644
> --- a/arch/loongarch/kernel/Makefile
> +++ b/arch/loongarch/kernel/Makefile
> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>
>  obj-$(CONFIG_PERF_EVENTS)      += perf_event.o perf_regs.o
>
> +obj-$(CONFIG_KPROBES)          += kprobes.o
> +
>  CPPFLAGS_vmlinux.lds           := $(KBUILD_CFLAGS)
> diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
> new file mode 100644
> index 0000000..820a633
> --- /dev/null
> +++ b/arch/loongarch/kernel/kprobes.c
> @@ -0,0 +1,311 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +#include <linux/kprobes.h>
> +#include <linux/kdebug.h>
> +#include <linux/preempt.h>
> +#include <asm/break.h>
> +
> +static const union loongarch_instruction breakpoint_insn = {
> +       .reg0i15_format = {
> +               .opcode = break_op,
> +               .immediate = BRK_KPROBE_BP,
> +       }
> +};
> +
> +static const union loongarch_instruction singlestep_insn = {
> +       .reg0i15_format = {
> +               .opcode = break_op,
> +               .immediate = BRK_KPROBE_SSTEPBP,
> +       }
> +};
> +
> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> +
> +static bool insns_are_not_supported(union loongarch_instruction insn)
> +{
> +       switch (insn.reg2i14_format.opcode) {
> +       case llw_op:
> +       case lld_op:
> +       case scw_op:
> +       case scd_op:
> +               pr_notice("kprobe: ll and sc instructions are not supported\n");
> +               return true;
> +       }
> +
> +       switch (insn.reg1i21_format.opcode) {
> +       case bceqz_op:
> +               pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
> +               return true;
> +       }
> +
> +       return false;
> +}
> +NOKPROBE_SYMBOL(insns_are_not_supported);
> +
> +int arch_prepare_kprobe(struct kprobe *p)
> +{
> +       union loongarch_instruction insn;
> +
> +       insn = p->addr[0];
> +       if (insns_are_not_supported(insn))
> +               return -EINVAL;
> +
> +       p->ainsn.insn = get_insn_slot();
> +       if (!p->ainsn.insn)
> +               return -ENOMEM;
> +
> +       p->ainsn.insn[0] = *p->addr;
> +       p->ainsn.insn[1] = singlestep_insn;
> +
> +       p->opcode = *p->addr;
> +
> +       return 0;
> +}
> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
> +
> +/* Install breakpoint in text */
> +void arch_arm_kprobe(struct kprobe *p)
> +{
> +       *p->addr = breakpoint_insn;
> +       flush_insn_slot(p);
> +}
> +NOKPROBE_SYMBOL(arch_arm_kprobe);
> +
> +/* Remove breakpoint from text */
> +void arch_disarm_kprobe(struct kprobe *p)
> +{
> +       *p->addr = p->opcode;
> +       flush_insn_slot(p);
> +}
> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
> +
> +void arch_remove_kprobe(struct kprobe *p)
> +{
> +       if (p->ainsn.insn) {
> +               free_insn_slot(p->ainsn.insn, 0);
> +               p->ainsn.insn = NULL;
> +       }
> +}
> +NOKPROBE_SYMBOL(arch_remove_kprobe);
> +
> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
> +{
> +       kcb->prev_kprobe.kp = kprobe_running();
> +       kcb->prev_kprobe.status = kcb->kprobe_status;
> +       kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
> +       kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
> +}
> +NOKPROBE_SYMBOL(save_previous_kprobe);
> +
> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
> +{
> +       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
> +       kcb->kprobe_status = kcb->prev_kprobe.status;
> +       kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
> +       kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
> +}
> +NOKPROBE_SYMBOL(restore_previous_kprobe);
> +
> +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
> +                              struct kprobe_ctlblk *kcb)
> +{
> +       __this_cpu_write(current_kprobe, p);
> +       kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
> +       kcb->kprobe_saved_era = regs->csr_era;
> +}
> +NOKPROBE_SYMBOL(set_current_kprobe);
> +
> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs *regs)
> +{
> +       if (is_branch_ins(&p->opcode)) {
> +               simu_branch(regs, p->opcode);
> +               return false;
> +       } else if (is_pc_ins(&p->opcode)) {
> +               simu_pc(regs, p->opcode);
> +               return false;
> +       } else {
> +               return true;
> +       }
> +}
> +NOKPROBE_SYMBOL(insns_are_not_simulated);
> +
> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
> +                            struct kprobe_ctlblk *kcb, int reenter)
> +{
> +       if (reenter) {
> +               save_previous_kprobe(kcb);
> +               set_current_kprobe(p, regs, kcb);
> +               kcb->kprobe_status = KPROBE_REENTER;
> +       } else {
> +               kcb->kprobe_status = KPROBE_HIT_SS;
> +       }
> +
> +       if (p->ainsn.insn->word == breakpoint_insn.word) {
> +               regs->csr_prmd &= ~CSR_PRMD_PIE;
> +               regs->csr_prmd |= kcb->kprobe_saved_irq;
> +               preempt_enable_no_resched();
> +               return;
> +       }
> +
> +       regs->csr_prmd &= ~CSR_PRMD_PIE;
You can put this line before the previous if, then the line in the if
can be removed.

> +
> +       if (insns_are_not_simulated(p, regs)) {
> +               kcb->kprobe_status = KPROBE_HIT_SS;
> +               regs->csr_era = (unsigned long)&p->ainsn.insn[0];
> +       } else {
> +               kcb->kprobe_status = KPROBE_HIT_SSDONE;
> +               if (p->post_handler)
> +                       p->post_handler(p, regs, 0);
> +               reset_current_kprobe();
> +               preempt_enable_no_resched();
> +       }
> +}
> +NOKPROBE_SYMBOL(setup_singlestep);
> +
> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
> +                         struct kprobe_ctlblk *kcb)
> +{
> +       switch (kcb->kprobe_status) {
> +       case KPROBE_HIT_SSDONE:
> +       case KPROBE_HIT_ACTIVE:
> +               kprobes_inc_nmissed_count(p);
> +               setup_singlestep(p, regs, kcb, 1);
> +               break;
> +       case KPROBE_HIT_SS:
> +       case KPROBE_REENTER:
> +               pr_warn("Failed to recover from reentered kprobes.\n");
> +               dump_kprobe(p);
> +               BUG();
> +               break;
> +       default:
> +               WARN_ON(1);
> +               return false;
> +       }
> +
> +       return true;
> +}
> +NOKPROBE_SYMBOL(reenter_kprobe);
> +
> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
> +{
> +       struct kprobe_ctlblk *kcb;
> +       struct kprobe *p, *cur_kprobe;
> +       kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
> +
> +       preempt_disable();
> +       kcb = get_kprobe_ctlblk();
> +       cur_kprobe = kprobe_running();
> +
> +       p = get_kprobe(addr);
> +       if (p) {
> +               if (cur_kprobe) {
> +                       if (reenter_kprobe(p, regs, kcb))
> +                               return true;
> +               } else {
> +                       /* Probe hit */
> +                       set_current_kprobe(p, regs, kcb);
> +                       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> +
> +                       /*
> +                        * If we have no pre-handler or it returned 0, we
> +                        * continue with normal processing.  If we have a
> +                        * pre-handler and it returned non-zero, it will
> +                        * modify the execution path and no need to single
> +                        * stepping. Let's just reset current kprobe and exit.
> +                        *
> +                        * pre_handler can hit a breakpoint and can step thru
> +                        * before return.
> +                        */
> +                       if (!p->pre_handler || !p->pre_handler(p, regs)) {
> +                               setup_singlestep(p, regs, kcb, 0);
> +                               return true;
> +                       } else {
> +                               reset_current_kprobe();
> +                               preempt_enable_no_resched();
> +                               return true;
> +                       }
> +               }
> +       }
> +
> +       if (addr->word != breakpoint_insn.word) {
> +               /* Handle the original "break" instruction. */
> +               if (is_break_ins(addr))
> +                       goto out;
> +
> +               /*
> +                * The breakpoint instruction was removed right
> +                * after we hit it.  Another cpu has removed
> +                * either a probepoint or a debugger breakpoint
> +                * at this address.  In either case, no further
> +                * handling of this interrupt is appropriate.
> +                * Return back to original instruction, and continue.
> +                */
> +               preempt_enable_no_resched();
> +               return true;
> +       }
> +
> +out:
> +       preempt_enable_no_resched();
> +       return false;
> +}
> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
> +
> +bool kprobe_singlestep_handler(struct pt_regs *regs)
> +{
> +       struct kprobe *cur = kprobe_running();
> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> +       if (!cur)
> +               return false;
> +
> +       /* Restore back the original saved kprobes variables and continue */
> +       if (kcb->kprobe_status == KPROBE_REENTER) {
> +               restore_previous_kprobe(kcb);
> +               goto out;
> +       }
> +
> +       /* Call post handler */
> +       kcb->kprobe_status = KPROBE_HIT_SSDONE;
> +       if (cur->post_handler)
> +               cur->post_handler(cur, regs, 0);
> +
> +       regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
> +       regs->csr_prmd |= kcb->kprobe_saved_irq;
> +
> +       reset_current_kprobe();
> +out:
> +       preempt_enable_no_resched();
You didn't disable preemption, why enable preemption here? I think you
should do similar things like kprobe_breakpoint_handler().

> +       return true;
> +}
> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
> +
> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
> +{
> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> +       if (kcb->kprobe_status & KPROBE_HIT_SS) {
> +               regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
> +               regs->csr_prmd |= kcb->kprobe_saved_irq;
> +
> +               reset_current_kprobe();
> +               preempt_enable_no_resched();
Here has the same problem as before, I doubt you haven't tested your
code (at least insufficient), and this is completely unacceptable for
me.

> +       }
> +
> +       return false;
> +}
> +NOKPROBE_SYMBOL(kprobe_fault_handler);
> +
> +/*
> + * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
> + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
> + */
> +int __init arch_populate_kprobe_blacklist(void)
> +{
> +       return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
> +                                        (unsigned long)__irqentry_text_end);
> +}
> +
> +int __init arch_init_kprobes(void)
> +{
> +       return 0;
> +}
> diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
> index a19bb32..4d9f775 100644
> --- a/arch/loongarch/kernel/traps.c
> +++ b/arch/loongarch/kernel/traps.c
> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
>          */
>         switch (bcode) {
>         case BRK_KPROBE_BP:
> -               if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
> -                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
> +               if (kprobe_breakpoint_handler(regs))
>                         goto out;
>                 else
>                         break;
>         case BRK_KPROBE_SSTEPBP:
> -               if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
> -                              current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
> +               if (kprobe_singlestep_handler(regs))
>                         goto out;
>                 else
>                         break;
> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
>                         break;
>         }
>
> +       if (bcode == BRK_KPROBE_BP) {
> +               if (__get_inst(&opcode, (u32 *)era, user))
> +                       goto out_sigsegv;
> +
> +               bcode = (opcode & 0x7fff);
> +       }
> +
>         switch (bcode) {
>         case BRK_BUG:
>                 bug_handler(regs);
> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
> index 1ccd536..fc9225a 100644
> --- a/arch/loongarch/mm/fault.c
> +++ b/arch/loongarch/mm/fault.c
> @@ -253,12 +253,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
>  {
>         irqentry_state_t state = irqentry_enter(regs);
>
> +       if (kprobe_page_fault(regs, current->thread.trap_nr))
> +               goto out;
> +
This should be after the conditional local_irq_enable(), I think.


Huacai
>         /* Enable interrupt if enabled in parent context */
>         if (likely(regs->csr_prmd & CSR_PRMD_PIE))
>                 local_irq_enable();
>
>         __do_page_fault(regs, write, address);
>
> +out:
>         local_irq_disable();
>
>         irqentry_exit(regs, state);
> --
> 2.1.0
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions
  2022-12-07  3:06   ` Huacai Chen
@ 2022-12-07  9:13     ` Tiezhu Yang
  0 siblings, 0 replies; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-07  9:13 UTC (permalink / raw)
  To: Huacai Chen; +Cc: WANG Xuerui, Masami Hiramatsu, loongarch, linux-kernel



On 12/07/2022 11:06 AM, Huacai Chen wrote:
> Hi, Tiezhu,
>
> On Fri, Dec 2, 2022 at 9:08 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>>
>> According to LoongArch Reference Manual, simulate branch and
>> PC instructions, this is preparation for later patch.
>>

...

>> +static inline unsigned long sign_extended(unsigned long val, unsigned int idx)
>> +{
>> +       if (val & (1UL << idx))
>> +               return ~((1UL << (idx + 1)) - 1) | val;
>> +       else
>> +               return ((1UL << (idx + 1)) - 1) & val;
>> +}
> You can use existing __SIGNEX and its friends rather than reinvent them.

Thanks for your reminder.

In my opinion, this static inline function sign_extended()
is much more clear and readable than the macro __SIGNEX()
defined in alternative.c, the helper function bs_dest_*()
seems redundant too, use "pc + sign_extended()" is a more
straightforward way to simulate instruction according to
the ISA Manual, so here I prefer to keep it as is.

Additionally, we can use sign_extended() instead of __SIGNEX()
in alternative.c, the __SIGNEX() related code can be removed
in a seperate patch in some day.

Thanks,
Tiezhu


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 2/4] LoongArch: Add kprobe support
  2022-12-07  3:08   ` Huacai Chen
@ 2022-12-07  9:17     ` Tiezhu Yang
  0 siblings, 0 replies; 9+ messages in thread
From: Tiezhu Yang @ 2022-12-07  9:17 UTC (permalink / raw)
  To: Huacai Chen; +Cc: WANG Xuerui, Masami Hiramatsu, loongarch, linux-kernel



On 12/07/2022 11:08 AM, Huacai Chen wrote:
> Hi, Tiezhu,
>
> On Fri, Dec 2, 2022 at 9:08 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>>
>> Kprobes allows you to trap at almost any kernel address and
>> execute a callback function, this commit adds kprobe support
>> for LoongArch.

...

>> +
>> +       if (p->ainsn.insn->word == breakpoint_insn.word) {
>> +               regs->csr_prmd &= ~CSR_PRMD_PIE;
>> +               regs->csr_prmd |= kcb->kprobe_saved_irq;
>> +               preempt_enable_no_resched();
>> +               return;
>> +       }
>> +
>> +       regs->csr_prmd &= ~CSR_PRMD_PIE;
> You can put this line before the previous if, then the line in the if
> can be removed.

OK, will modify it.

>> +
>> +       if (insns_are_not_simulated(p, regs)) {
>> +               kcb->kprobe_status = KPROBE_HIT_SS;
>> +               regs->csr_era = (unsigned long)&p->ainsn.insn[0];
>> +       } else {
>> +               kcb->kprobe_status = KPROBE_HIT_SSDONE;
>> +               if (p->post_handler)
>> +                       p->post_handler(p, regs, 0);
>> +               reset_current_kprobe();
>> +               preempt_enable_no_resched();
>> +       }
>> +}
>> +NOKPROBE_SYMBOL(setup_singlestep);

...

>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
>> +{
>> +       struct kprobe *cur = kprobe_running();
>> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>> +
>> +       if (!cur)
>> +               return false;
>> +
>> +       /* Restore back the original saved kprobes variables and continue */
>> +       if (kcb->kprobe_status == KPROBE_REENTER) {
>> +               restore_previous_kprobe(kcb);
>> +               goto out;
>> +       }
>> +
>> +       /* Call post handler */
>> +       kcb->kprobe_status = KPROBE_HIT_SSDONE;
>> +       if (cur->post_handler)
>> +               cur->post_handler(cur, regs, 0);
>> +
>> +       regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>> +       regs->csr_prmd |= kcb->kprobe_saved_irq;
>> +
>> +       reset_current_kprobe();
>> +out:
>> +       preempt_enable_no_resched();
> You didn't disable preemption, why enable preemption here? I think you
> should do similar things like kprobe_breakpoint_handler().
>
>> +       return true;
>> +}
>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
>> +
>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
>> +{
>> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>> +
>> +       if (kcb->kprobe_status & KPROBE_HIT_SS) {
>> +               regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>> +               regs->csr_prmd |= kcb->kprobe_saved_irq;
>> +
>> +               reset_current_kprobe();
>> +               preempt_enable_no_resched();
> Here has the same problem as before, I doubt you haven't tested your
> code (at least insufficient), and this is completely unacceptable for
> me.

preempt_disable() in kprobe_breakpoint_handler() is valid for
the entire duration of kprobe processing, so call the function
preempt_enable_no_resched() in kprobe_singlestep_handler() and
kprobe_fault_handler() has no problem, let me add a code comment
before preempt_disable(), like this:

+       /*
+        * We don't want to be preempted for the entire
+        * duration of kprobe processing.
+        */
         preempt_disable();

>> +       }
>> +
>> +       return false;
>> +}
>> +NOKPROBE_SYMBOL(kprobe_fault_handler);

...

>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
>> index 1ccd536..fc9225a 100644
>> --- a/arch/loongarch/mm/fault.c
>> +++ b/arch/loongarch/mm/fault.c
>> @@ -253,12 +253,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
>>  {
>>         irqentry_state_t state = irqentry_enter(regs);
>>
>> +       if (kprobe_page_fault(regs, current->thread.trap_nr))
>> +               goto out;
>> +
> This should be after the conditional local_irq_enable(), I think.

OK, will modify it.

Thanks,
Tiezhu


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-12-07  9:17 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-02 13:08 [PATCH v7 0/4] Add kprobe and kretprobe support for LoongArch Tiezhu Yang
2022-12-02 13:08 ` [PATCH v7 1/4] LoongArch: Simulate branch and PC instructions Tiezhu Yang
2022-12-07  3:06   ` Huacai Chen
2022-12-07  9:13     ` Tiezhu Yang
2022-12-02 13:08 ` [PATCH v7 2/4] LoongArch: Add kprobe support Tiezhu Yang
2022-12-07  3:08   ` Huacai Chen
2022-12-07  9:17     ` Tiezhu Yang
2022-12-02 13:08 ` [PATCH v7 3/4] LoongArch: Add kretprobe support Tiezhu Yang
2022-12-02 13:08 ` [PATCH v7 4/4] samples/kprobes: Add LoongArch support Tiezhu Yang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.