linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] kprobes for s390 architecture
@ 2006-06-12 13:15 Mike Grundy
  2006-06-12 19:40 ` Martin Schwidefsky
  2006-06-21 16:23 ` Jan Glauber
  0 siblings, 2 replies; 31+ messages in thread
From: Mike Grundy @ 2006-06-12 13:15 UTC (permalink / raw)
  To: linux-kernel; +Cc: schwidefsky, systemtap

Hi Folks -

This patch provides kprobes support for s390 architecture

Thanks
Mike
--

Signed-off-by: Michael Grundy <grundym@us.ibm.com>

 arch/s390/Kconfig              |   14
 arch/s390/kernel/Makefile      |    1
 arch/s390/kernel/entry.S       |    4
 arch/s390/kernel/entry64.S     |   18 +
 arch/s390/kernel/kprobes.c     |  648 +++++++++++++++++++++++++++++++++++++++++
 arch/s390/kernel/traps.c       |   42 ++
 arch/s390/kernel/vmlinux.lds.S |    1
 arch/s390/mm/fault.c           |    8
 include/asm-s390/kdebug.h      |   57 +++
 include/asm-s390/kprobes.h     |  208 +++++++++++++
 10 files changed, 995 insertions(+), 6 deletions(-)

diff -urNp linux-2.6.17-rc6/arch/s390/Kconfig linux-2.6.17-rc6-kp390/arch/s390/Kconfig
--- linux-2.6.17-rc6/arch/s390/Kconfig	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/Kconfig	2006-06-12 05:13:05.000000000 -0400
@@ -474,8 +474,22 @@ source "drivers/net/Kconfig"
 
 source "fs/Kconfig"
 
+menu "Instrumentation Support"
+	depends on EXPERIMENTAL
+
 source "arch/s390/oprofile/Kconfig"
 
+config KPROBES
+	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+endmenu
+
 source "arch/s390/Kconfig.debug"
 
 source "security/Kconfig"
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry64.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S
--- linux-2.6.17-rc6/arch/s390/kernel/entry64.S	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S	2006-06-12 05:13:05.000000000 -0400
@@ -478,6 +478,8 @@ pgm_per:
         clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
         je      pgm_svcper
 # no interesting special case, ignore PER event
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	lmg	%r12,%r15,__LC_SAVE_AREA
 	lpswe   __LC_PGM_OLD_PSW
 
@@ -497,6 +499,8 @@ pgm_no_vtime2:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -531,6 +535,20 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	j	sysc_do_svc
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3			 # clear per-event-bit and ilc
+	j	sysc_singlestep
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_leave		# load adr. of system ret, no work
+	brcl	15,do_single_step		# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S
--- linux-2.6.17-rc6/arch/s390/kernel/entry.S	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S	2006-06-12 05:13:05.000000000 -0400
@@ -456,6 +456,8 @@ pgm_per:
 # ok its one of the special cases, now we need to find out which one
         clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
         be      BASED(pgm_svcper)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	BASED(sysc_singlestep)
 # no interesting special case, ignore PER event
         lm      %r12,%r15,__LC_SAVE_AREA
 	lpsw    0x28
@@ -480,6 +482,8 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	BASED(sysc_singlestep)
 	l	%r3,__LC_PGM_ILC	 # load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3                  # clear per-event-bit and ilc
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/kprobes.c linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c
--- linux-2.6.17-rc6/arch/s390/kernel/kprobes.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c	2006-06-12 08:27:27.000000000 -0400
@@ -0,0 +1,648 @@
+/*
+ *  Kernel Probes (KProbes)
+ *  arch/s390/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	int ret = 0;
+
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+		ret = -EINVAL;
+
+	/* Use the get_insn_slot() facility for correctness */
+	if (!ret) {
+		p->ainsn.insn = get_insn_slot();
+		if (!p->ainsn.insn) {
+			ret = -ENOMEM;
+		} else {
+			/* this should only happen if you got the slot */
+			memcpy(p->ainsn.insn, p->addr,
+			       MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+			p->ainsn.inst_type =
+			    get_instruction_type(p->ainsn.insn);
+		}
+	}
+	p->opcode = *p->addr;
+	return ret;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+	__u8 opcode[6];
+	int ret = 0;
+
+	memcpy(opcode, instruction, 6 * sizeof(__u8));
+
+	switch (opcode[0]) {
+	case OPCODE_BASSM:
+	case OPCODE_BSM:
+	case OPCODE_DIAG:
+	case OPCODE_EX:
+		ret = -EINVAL;
+		break;
+	case OPCODE_PR:
+		if (opcode[1] == OPCODE_PR)
+			ret = -EINVAL;
+		break;
+	case 0xB2:
+		switch (opcode[1]) {
+		case OPCODE_BSA:
+		case OPCODE_BAKR:
+		case OPCODE_BSG:
+		case OPCODE_PC:
+		case OPCODE_PT:
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	return ret;
+}
+
+/* get_instruction type will return 0 if only the regular offset adjustments
+ * after out of line singlestep are required. If a register needs to be fixed,
+ * bits 16-23 will contain the register number, bits 24-31 contain the length
+ * of the instruction unit. If fixup is only required when the branch is not
+ * taken, bits 0-16 will all be set.
+ */
+int __kprobes get_instruction_type(kprobe_opcode_t * instruction)
+{
+	__u8 opcode[6];
+	int ret = 0;
+
+	memcpy(opcode, instruction, 6 * sizeof(__u8));
+
+	switch (opcode[0]) {
+		/* RR Format - instruction unit length = 2
+		 *  ________ ____ ____
+		 * |Op Code | R1 | R2 |
+		 * |________|_M1_|____|
+		 * 0         8   12  15
+		 */
+	case BALR:	/* PSW addr saved in R1, branch address in R2 */
+		ret = (opcode[1] & 0xf0) + 2;
+		/* Special non branching use of BALR */
+		if ((opcode[1] & 0x0f) == 0)
+			ret &= FIXUP_NOBRANCH;
+		break;
+	case BASR:	/* PSW addr saved in R1, branch address in R2 */
+		ret = (opcode[1] & 0xf0) + 2;
+		/* Special non branching use of BASR */
+		if ((opcode[1] & 0x0f) == 0)
+			ret &= FIXUP_NOBRANCH;
+		break;
+	case BCR:	/* M1 is mask val (condition), branch addr in R2 */
+		ret = FIXUP_NOBRANCH & 2;
+		break;
+	case BCTR:	/* R1 is count down, R2 is branch addr until R1 = 0 */
+		ret = FIXUP_NOBRANCH & 2;
+		break;
+		/* RX Format - instruction unit length = 4
+		 *  ________ ____ ____ ____ ____________
+		 * |Op Code | R1 | X2 | B2 |     D2     |
+		 * |________|_M1_|____|____|____________|
+		 * 0         8   12   16   20          31
+		 */
+	case BAL:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
+		ret = (opcode[1] & 0xf0) + 4;
+		break;
+	case BAS:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
+		ret = (opcode[1] & 0xf0) + 4;
+		break;
+	case BC:	/* M1 is mask val (condition), branch addr D2(X2,B2) */
+		ret = FIXUP_NOBRANCH & 4;
+		break;
+	case BCT:	/* R1 is count down, D2(X2,B2) is branch addr */
+		ret = FIXUP_NOBRANCH & 4;
+		break;
+		/* RI Format - instruction unit length = 4
+		 *  ________ ____ ____ _________________
+		 * |Op Code | R1 |OpCd|       I2        |
+		 * |________|____|____|_________________|
+		 * 0         8   12   16               31
+		 */
+	case 0xA7:	/* first byte (multiple ops have same 1st byte) */
+		if ((opcode[1] & 0x0f) == BRAS) {
+			ret = (opcode[1] & 0xf0) + 4;
+		}
+		break;
+		/* RS Format - instruction unit length = 4
+		 *  ________ ____ ____ ____ ____________
+		 * |Op Code | R1 | R3 | B2 |     D2     |
+		 * |________|____|_M3_|____|____________|
+		 * 0         8   12   16   20          31
+		 */
+	case BXH:
+		ret = FIXUP_NOBRANCH & 4;
+		break;
+	case BXLE:
+		ret = FIXUP_NOBRANCH & 4;
+		break;
+		/* RIL Format - instruction unit length = 6
+		 *  ________ ____ ____ _____________/______________
+		 * |Op Code | R1 |OpCd|            I2              |
+		 * |________|_M1_|____|_____________/______________|
+		 * 0         8   12   16                          47
+		 */
+	case 0xC0:
+		if ((opcode[1] & 0x0f) == BRASL) {
+			ret = (opcode[1] & 0xf0) + 6;
+		} else if ((opcode[1] & 0x0f) == BRCL) {
+			ret = FIXUP_NOBRANCH & 6;
+		}
+		break;
+		/* RSY Format - instruction unit length = 6
+		 *  ________ ____ ____ ____ __/__ ________ ________
+		 * |Op Code | R1 | R3 | B2 | DL2 |  DH2   |Op Code |
+		 * |________|____|_M3_|____|__/__|________|________|
+		 * 0         8   12   16   20    32       40      47
+		 */
+	case 0xEB:
+		if (opcode[5] == BXHG || opcode[5] == BXLEG) {
+			ret = FIXUP_NOBRANCH & 6;
+		}
+		break;
+		/* RXY Format - instruction unit length = 6
+		 *  ________ ____ ____ ____ __/__ ________ ________
+		 * |Op Code | R1 | X2 | B2 | DL2 |  DH2   |Op Code |
+		 * |________|____|____|____|__/__|________|________|
+		 * 0         8   12   16   20    32       40      47
+		 */
+	case 0xE3:
+		if (opcode[5] == BCTG) {
+			ret = FIXUP_NOBRANCH & 6;
+		}
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	*p->addr = BREAKPOINT_INSTRUCTION;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	*p->addr = p->opcode;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	mutex_lock(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	per_cr_bits kprobe_per_regs[1];
+
+	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+	regs->psw.addr = (unsigned long)p->ainsn.insn;
+
+	/* Just make sure this gets done */
+	regs->psw.addr |= PSW_ADDR_AMODE;
+
+	/* Set up the per control reg info, will pass to lctl */
+	kprobe_per_regs[0].em_instruction_fetch = 1;
+	kprobe_per_regs[0].starting_addr = regs->psw.addr;
+	kprobe_per_regs[0].ending_addr = regs->psw.addr + 4;
+
+	/* Set the PER control regs, turns on single step for this address */
+	__ctl_load(kprobe_per_regs, 9, 11);
+	regs->psw.mask |= PSW_MASK_PER;
+	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+				      struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+	/* Save the interrupt and per flags */
+	kcb->kprobe_saved_imask = regs->psw.mask &
+	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+	/* Save the control regs that govern PER */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+				      struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri;
+
+	if ((ri = get_free_rp_inst(rp)) != NULL) {
+		ri->rp = rp;
+		ri->task = current;
+		ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+		/* Replace the return addr with trampoline addr */
+		regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+		add_rp_inst(ri);
+	} else {
+		rp->nmissed++;
+	}
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	unsigned long *addr = (unsigned long *)
+	    ((regs->psw.addr & PSW_ADDR_INSN) - 2);
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				regs->psw.mask &= ~PSW_MASK_PER;
+				regs->psw.mask |= kcb->kprobe_saved_imask;
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 *
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	set_current_kprobe(p, regs, kcb);
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+	ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+	no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (".global kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n" "bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	spin_lock_irqsave(&kretprobe_lock, flags);
+	head = kretprobe_inst_table_head(current);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+	BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+	reset_current_kprobe();
+	spin_unlock_irqrestore(&kretprobe_lock, flags);
+	preempt_enable_no_resched();
+
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	int ilen, reg;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	/* regular fixup, just apply the offset */
+	if (p->ainsn.inst_type == 0)
+		regs->psw.addr = fix_offset((unsigned long)p->addr,
+					    (unsigned long)p->ainsn.insn,
+					    (unsigned long)regs->psw.addr);
+	/* only apply the offset if the branch wasn't taken */
+	else if (p->ainsn.inst_type < 0) {
+		ilen = p->ainsn.inst_type & 0x0f;
+		reg = (p->ainsn.inst_type & 0xf0) >> 4;
+		if ((unsigned long)regs->psw.addr -
+		    (unsigned long)p->ainsn.insn == ilen) {
+			/* reg slot is only nonzero here on basr
+			 * and balr special cases, fixup reg too
+			 */
+			if (reg != 0)
+				regs->gprs[reg] = (unsigned long)p->addr + ilen;
+			regs->psw.addr = (unsigned long)p->addr + ilen;
+		}
+	} else {
+		ilen = p->ainsn.inst_type & 0x0f;
+		reg = (p->ainsn.inst_type & 0xf0) >> 4;
+		regs->gprs[reg] = (unsigned long)p->addr + ilen;
+
+		regs->psw.addr = fix_offset((unsigned long)p->addr,
+					    (unsigned long)p->ainsn.insn,
+					    (unsigned long)regs->psw.addr);
+	}
+
+	/* set amode for 31bit */
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	/* turn off PER mode */
+	regs->psw.mask &= ~PSW_MASK_PER;
+	/* Restore the original per control regs */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+/* if this isn't getting any more complicated, turn into macro? */
+unsigned long __kprobes fix_offset(unsigned long orig_addr,
+			 unsigned long offset_start, unsigned long offset)
+{
+	return (orig_addr + (offset - offset_start));
+}
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs);
+
+	/*Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+	out:
+	preempt_enable_no_resched();
+
+	/*
+	 * if somebody else is singlestepping across a probe point, psw mask
+	 * will have PER set, in which case, continue the remaining processing
+	 * of do_single_step, as if this is not a probe hit.
+	 */
+	if (regs->psw.mask & PSW_MASK_PER) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+		return 1;
+
+	if (kcb->kprobe_status & KPROBE_HIT_SS) {
+		resume_execution(cur, regs);
+		regs->psw.mask |= kcb->kprobe_saved_imask;
+
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BPT:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id() */
+		preempt_disable();
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+	/* setup return addr to the jprobe handler routine */
+	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+	/* r14 is the function return address */
+	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+	/* r15 is the stack pointer */
+	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+	addr = (unsigned long)kcb->jprobe_saved_r15;
+
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile (".long 0x00020000");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+	asm volatile ("bcr 0,0");
+};
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+	/* Put the regs back */
+	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* put the stack back */
+	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+	       MIN_STACK_SIZE(stack_addr));
+	preempt_enable_no_resched();
+	return 1;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	return register_kprobe(&trampoline_p);
+}
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/Makefile linux-2.6.17-rc6-kp390/arch/s390/kernel/Makefile
--- linux-2.6.17-rc6/arch/s390/kernel/Makefile	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/Makefile	2006-06-12 05:13:05.000000000 -0400
@@ -21,6 +21,7 @@ obj-$(CONFIG_COMPAT)		+= compat_linux.o 
 obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf32.o
 
 obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/traps.c linux-2.6.17-rc6-kp390/arch/s390/kernel/traps.c
--- linux-2.6.17-rc6/arch/s390/kernel/traps.c	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/traps.c	2006-06-12 06:30:24.000000000 -0400
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -40,6 +41,7 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/kdebug.h>
 
 /* Called from entry.S only */
 extern void handle_per_exception(struct pt_regs *regs);
@@ -75,6 +77,20 @@ static int kstack_depth_to_print = 12;
 static int kstack_depth_to_print = 20;
 #endif /* CONFIG_64BIT */
 
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 /*
  * For show_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
@@ -308,8 +324,9 @@ report_user_fault(long interruption_code
 #endif
 }
 
-static void inline do_trap(long interruption_code, int signr, char *str,
-                           struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+					char *str, struct pt_regs *regs,
+					siginfo_t *info)
 {
 	/*
 	 * We got all needed information from the lowcore and can
@@ -318,6 +335,10 @@ static void inline do_trap(long interrup
         if (regs->psw.mask & PSW_MASK_PSTATE)
 		local_irq_enable();
 
+	if (notify_die(DIE_TRAP, str, regs, interruption_code,
+				interruption_code, signr) == NOTIFY_STOP)
+		return;
+
         if (regs->psw.mask & PSW_MASK_PSTATE) {
                 struct task_struct *tsk = current;
 
@@ -339,8 +360,12 @@ static inline void *get_check_address(st
 	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
 }
 
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
 {
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+					SIGTRAP) == NOTIFY_STOP){
+		return;
+	}
 	if ((current->ptrace & PT_PTRACED) != 0)
 		force_sig(SIGTRAP, current);
 }
@@ -466,8 +491,15 @@ asmlinkage void illegal_op(struct pt_reg
 #endif
 		} else
 			signal = SIGILL;
-	} else
-		signal = SIGILL;
+	} else {
+		/*
+		 * If we get an illegal op in kernel mode, send it through the
+		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+		 */
+		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+					3, SIGTRAP) != NOTIFY_STOP)
+			signal = SIGILL;
+	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/vmlinux.lds.S linux-2.6.17-rc6-kp390/arch/s390/kernel/vmlinux.lds.S
--- linux-2.6.17-rc6/arch/s390/kernel/vmlinux.lds.S	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/vmlinux.lds.S	2006-06-12 05:13:05.000000000 -0400
@@ -25,6 +25,7 @@ SECTIONS
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
+	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
 	} = 0x0700
diff -urNp linux-2.6.17-rc6/arch/s390/mm/fault.c linux-2.6.17-rc6-kp390/arch/s390/mm/fault.c
--- linux-2.6.17-rc6/arch/s390/mm/fault.c	2006-06-05 20:57:02.000000000 -0400
+++ linux-2.6.17-rc6-kp390/arch/s390/mm/fault.c	2006-06-12 05:13:05.000000000 -0400
@@ -26,10 +26,12 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/kdebug.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -160,7 +162,7 @@ static void do_sigsegv(struct pt_regs *r
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline void
+static inline void __kprobes
 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
 {
         struct task_struct *tsk;
@@ -174,6 +176,10 @@ do_exception(struct pt_regs *regs, unsig
         tsk = current;
         mm = tsk->mm;
 	
+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+					SIGSEGV) == NOTIFY_STOP)
+		return;
+
 	/* 
          * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code 
diff -urNp linux-2.6.17-rc6/include/asm-s390/kdebug.h linux-2.6.17-rc6-kp390/include/asm-s390/kdebug.h
--- linux-2.6.17-rc6/include/asm-s390/kdebug.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-rc6-kp390/include/asm-s390/kdebug.h	2006-06-12 06:27:25.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H 1
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+ * If you really want to do it first unregister - then synchronize_sched
+ *  - then free.
+ */
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head s390die_chain;
+
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BPT,
+	DIE_SSTEP,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&s390die_chain, val, &args);
+}
+
+#endif
diff -urNp linux-2.6.17-rc6/include/asm-s390/kprobes.h linux-2.6.17-rc6-kp390/include/asm-s390/kprobes.h
--- linux-2.6.17-rc6/include/asm-s390/kprobes.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-rc6-kp390/include/asm-s390/kprobes.h	2006-06-12 08:28:13.000000000 -0400
@@ -0,0 +1,208 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ * 2005-Dec	Used as a template for s390 by Mike Grundy
+ * 		<grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE		0x0003
+#define MAX_STACK_SIZE 		64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define FIXUP_NOBRANCH 0xFFFF0000
+
+/* These are the s390 opcodes that could be troublesome with probes */
+#define OPCODE_BASSM	0x0C
+#define OPCODE_BSM	0x0B
+#define OPCODE_DIAG	0x83
+#define OPCODE_EX	0x44
+#define OPCODE_PR	0x01
+/* These all have a first byte of 0xB2, second byte defined */
+#define OPCODE_BSA	0x5A
+#define OPCODE_BAKR	0x40
+#define OPCODE_BSG	0x58
+#define OPCODE_PC	0x18
+#define OPCODE_PT	0x28
+
+/* These are the various branch (jump) instructions available on the s390 in
+ * 32 and 64bit mode. Most of the instruction formats are pretty straight
+ * forward, but some have the opcode split in different places.
+ */
+/* RR Format
+*  ________ ____ ____
+* |Op Code | R1 | R2 |
+* |________|_M1_|____|
+* 0         8   12  15
+*/
+#define BALR 	0x05
+#define BASR 	0x0D
+#define BCR  	0x07
+#define BCTR 	0x06
+/* RX Format
+*  ________ ____ ____ ____ ____________
+* |Op Code | R1 | X2 | B2 |     D2     |
+* |________|_M1_|____|____|____________|
+* 0         8   12   16   20          31
+*/
+#define BAL  	0x45
+#define BAS  	0x4D
+#define BC   	0x47
+#define BCT  	0x46
+/* RI Format
+*  ________ ____ ____ _________________
+* |Op Code | R1 |OpCd|       I2        |
+* |________|____|____|_________________|
+* 0         8   12   16               31
+*/
+/* First byte is 0xA7 */
+#define BRC  	0x4
+#define BRAS 	0x5
+#define BRCT 	0x6
+#define BRCTG	0x7
+/* RIE Format
+*  ________ ____ ____ ____/_____ ________ ________
+* |Op Code | R1 | R3 |    I2    |////////|Op Code |
+* |________|____|____|____/_____|________|________|
+* 0         8   12   16         32       40      47
+*/
+/* First byte is 0xEC */
+#define BRXHG	0x44
+#define BRXLG	0x45
+/* RRE Format
+*  _________________ ________ ____ ____
+* |     Op Code     |////////| R1 | R2 |
+* |_________________|________|____|____|
+* 0                 16       24   28  31
+*/
+/* First byte is 0xB9 */
+#define BCTGR	0x46
+/* RS Format
+*  ________ ____ ____ ____ ____________
+* |Op Code | R1 | R3 | B2 |     D2     |
+* |________|____|_M3_|____|____________|
+* 0         8   12   16   20          31
+*/
+#define BXH  	0x86
+#define BXLE 	0x87
+/* RSI Format
+*  ________ ____ ____ _________________
+* |Op Code | R1 | R3 |       I2        |
+* |________|____|____|_________________|
+* 0         8   12   16               31
+*/
+#define BRXH 	0x84
+#define BRXLE	0x85
+/* RIL Format
+*  ________ ____ ____ _____________/______________
+* |Op Code | R1 |OpCd|            I2              |
+* |________|_M1_|____|_____________/______________|
+* 0         8   12   16                          47
+*/
+/* First byte is 0xC0 */
+#define BRASL	0x5
+#define BRCL 	0x4
+/* RSY Format
+*  ________ ____ ____ ____ __/__ ________ ________
+* |Op Code | R1 | R3 | B2 | DL2 |  DH2   |Op Code |
+* |________|____|_M3_|____|__/__|________|________|
+* 0         8   12   16   20    32       40      47
+*/
+/* First byte is 0xEB */
+#define BXHG 	0x44
+#define BXLEG	0x45
+/* RXY Format
+*  ________ ____ ____ ____ __/__ ________ ________
+* |Op Code | R1 | X2 | B2 | DL2 |  DH2   |Op Code |
+* |________|____|____|____|__/__|________|________|
+* 0         8   12   16   20    32       40      47
+*/
+/* First byte is 0xE3 */
+#define BCTG 	0x46
+
+#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define ARCH_SUPPORTS_KRETPROBES
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+int  is_prohibited_opcode(kprobe_opcode_t *instruction);
+int  get_instruction_type(kprobe_opcode_t *instruction);
+unsigned long fix_offset( unsigned long orig_addr,
+			  unsigned long offset_start,
+			  unsigned long offset);
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	int inst_type;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_psw;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobe_saved_r14;
+	unsigned long jprobe_saved_r15;
+	struct prev_kprobe prev_kprobe;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+#endif	/* _ASM_S390_KPROBES_H */
+#ifdef CONFIG_KPROBES
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+#else	/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+					   unsigned long val, void *data)
+{
+	return 0;
+}
+#endif

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-12 13:15 [PATCH] kprobes for s390 architecture Mike Grundy
@ 2006-06-12 19:40 ` Martin Schwidefsky
  2006-06-21  4:28   ` Mike Grundy
  2006-06-21  9:40   ` Jan Glauber
  2006-06-21 16:23 ` Jan Glauber
  1 sibling, 2 replies; 31+ messages in thread
From: Martin Schwidefsky @ 2006-06-12 19:40 UTC (permalink / raw)
  To: Mike Grundy, jan.glauber; +Cc: linux-kernel, systemtap

On Mon, 2006-06-12 at 09:15 -0400, Mike Grundy wrote:
> This patch provides kprobes support for s390 architecture

Looks like a good start. There are some bugs though and I have a few
suggestions:

> diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry64.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S
> --- linux-2.6.17-rc6/arch/s390/kernel/entry64.S	2006-06-05 20:57:02.000000000 -0400
> +++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S	2006-06-12 05:13:05.000000000 -0400
> @@ -478,6 +478,8 @@ pgm_per:
>          clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
>          je      pgm_svcper
>  # no interesting special case, ignore PER event
> +	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
> +	jz	kernel_per
>  	lmg	%r12,%r15,__LC_SAVE_AREA
>  	lpswe   __LC_PGM_OLD_PSW
>  

If this branch is ever taken it will crash at least the currently
running process. The program check handler will branch to pgm_per after
having done SAVE_ALL_BASE and if the per bit in the psw is set.
SAVE_ALL_SYNC and CRETE_STACK_FRAME have not been called yet and neither
kernel_per nor sysc_singlestep will do it. That means that no stack
frame for the kernel per event has been generated which will crash.

> @@ -497,6 +499,8 @@ pgm_no_vtime2:
>  #endif
>  	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
>  	lg	%r1,__TI_task(%r9)
> +	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
> +	jz	kernel_per
>  	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
>  	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
>  	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID

This branch is ok and is imho the only one we need.

> @@ -531,6 +535,20 @@ pgm_no_vtime3:
>  	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
>  	j	sysc_do_svc
>  
> +#
> +# per was called from kernel, must be kprobes
> +#
> +kernel_per:
> +	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
> +	lghi	%r8,0x7f
> +	ngr	%r8,%r3			 # clear per-event-bit and ilc
> +	j	sysc_singlestep
> +	lhi	%r0,__LC_PGM_OLD_PSW
> +	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
> +	la	%r2,SP_PTREGS(%r15)	# address of register-save area
> +	larl	%r14,sysc_leave		# load adr. of system ret, no work
> +	brcl	15,do_single_step		# branch to do_single_step
> +
>  /*
>   * IO interrupt handler routine
>   */

The code after "j sysc_singlestep" is unreachable. Either you can remove
it or there is a branch missing.


> diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S
> --- linux-2.6.17-rc6/arch/s390/kernel/entry.S	2006-06-05 20:57:02.000000000 -0400
> +++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S	2006-06-12 05:13:05.000000000 -0400
> @@ -456,6 +456,8 @@ pgm_per:
>  # ok its one of the special cases, now we need to find out which one
>          clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
>          be      BASED(pgm_svcper)
> +	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
> +	bz	BASED(sysc_singlestep)
>  # no interesting special case, ignore PER event
>          lm      %r12,%r15,__LC_SAVE_AREA
>  	lpsw    0x28

Same as for 64 bit, if the branch is taken it will crash.

> @@ -480,6 +482,8 @@ pgm_no_vtime2:
>  	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
>  	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
>  	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
> +	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
> +	bz	BASED(sysc_singlestep)
>  	l	%r3,__LC_PGM_ILC	 # load program interruption code
>  	la	%r8,0x7f
>  	nr	%r8,%r3                  # clear per-event-bit and ilc

The 31 bit code branches to sysc_singlestep directly. Why is kernel_per
only needed for 64 bit? I think 31 bit needs kernel_per as well.

> diff -urNp linux-2.6.17-rc6/arch/s390/kernel/kprobes.c linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c
> --- linux-2.6.17-rc6/arch/s390/kernel/kprobes.c	1969-12-31 19:00:00.000000000 -0500
> +++ linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c	2006-06-12 08:27:27.000000000 -0400
> @@ -0,0 +1,648 @@
> +/*
> + *  Kernel Probes (KProbes)
> + *  arch/s390/kernel/kprobes.c
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (C) IBM Corporation, 2002, 2006
> + *
> + * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
> + */
> +
> +#include <linux/config.h>
> +#include <linux/kprobes.h>
> +#include <linux/ptrace.h>
> +#include <linux/preempt.h>
> +#include <asm/cacheflush.h>
> +#include <asm/kdebug.h>
> +#include <asm/sections.h>
> +
> +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> +
> +int __kprobes arch_prepare_kprobe(struct kprobe *p)
> +{
> +	int ret = 0;
> +
> +	/* Make sure the probe isn't going on a difficult instruction */
> +	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
> +		ret = -EINVAL;
> +
> +	/* Use the get_insn_slot() facility for correctness */
> +	if (!ret) {
> +		p->ainsn.insn = get_insn_slot();
> +		if (!p->ainsn.insn) {
> +			ret = -ENOMEM;
> +		} else {
> +			/* this should only happen if you got the slot */
> +			memcpy(p->ainsn.insn, p->addr,
> +			       MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
> +			p->ainsn.inst_type =
> +			    get_instruction_type(p->ainsn.insn);
> +		}
> +	}
> +	p->opcode = *p->addr;
> +	return ret;
> +}
> +

Hmm, you are assigning to p->opcode even if the function returns an
error. I have the vague feeling that this is not a good idea. And I
suggest that you use early returns, that way you get rid of two levels
of indention.

> +int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
> +{
> +	__u8 opcode[6];
> +	int ret = 0;
> +
> +	memcpy(opcode, instruction, 6 * sizeof(__u8));
> +
> +	switch (opcode[0]) {
> +	case OPCODE_BASSM:
> +	case OPCODE_BSM:
> +	case OPCODE_DIAG:
> +	case OPCODE_EX:
> +		ret = -EINVAL;
> +		break;
> +	case OPCODE_PR:
> +		if (opcode[1] == OPCODE_PR)
> +			ret = -EINVAL;
> +		break;
> +	case 0xB2:
> +		switch (opcode[1]) {
> +		case OPCODE_BSA:
> +		case OPCODE_BAKR:
> +		case OPCODE_BSG:
> +		case OPCODE_PC:
> +		case OPCODE_PT:
> +			ret = -EINVAL;
> +			break;
> +		}
> +		break;
> +	}
> +	return ret;
> +}
> +

I would write the function like this:

int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
{
        switch (*(__u8 *) instruction) {
        case 0x0c:      /* bassm */
        case 0x0b:      /* bsm   */
        case 0x83:      /* diag  */
        case 0x44:      /* ex    */
                return -EINVAL;
        }
        switch (*(__u16 *) instruction) {
        case 0x0101:    /* pr    */
        case 0xb25a:    /* bsa   */
        case 0xb240:    /* bakr  */
        case 0xb258:    /* bsg   */
        case 0xb218:    /* pc    */
        case 0xb228:    /* pt    */
                return -EINVAL;
        }
        return 0;
}

Remove the OPCODE_xxx #defines. To have one-byte opcode defines for 2, 4
and 6 bytes instructions is quite confusing. The full blown opcode
masking solution as implemented e.g. in the binutils would be overkill.
A "stand-alone" function like the above is preferable because it is
simple and you don't have to look at several files to figure out what it
does.

> +/* get_instruction type will return 0 if only the regular offset adjustments
> + * after out of line singlestep are required. If a register needs to be fixed,
> + * bits 16-23 will contain the register number, bits 24-31 contain the length
> + * of the instruction unit. If fixup is only required when the branch is not
> + * taken, bits 0-16 will all be set.
> + */
> +int __kprobes get_instruction_type(kprobe_opcode_t * instruction)
> +{
> +	__u8 opcode[6];
> +	int ret = 0;
> +
> +	memcpy(opcode, instruction, 6 * sizeof(__u8));

Again that memcpy. Why don't you just cast the instruction pointer to
__u8 and __u16 and deference it? 

The following switch deals with all the instructions that need special
handling. Please get rid of the BALR/BASR/BCR/BCTR/... defines and use
the opcode number directly. Add a comment which instruction it is like
in the new is_prohibited_opcode. All these defines are only used in
get_instruction_type and the opcodes will certainly not change, only new
ones will get added. No point in all those #defines.

> +
> +	switch (opcode[0]) {
> +		/* RR Format - instruction unit length = 2
> +		 *  ________ ____ ____
> +		 * |Op Code | R1 | R2 |
> +		 * |________|_M1_|____|
> +		 * 0         8   12  15
> +		 */
> +	case BALR:	/* PSW addr saved in R1, branch address in R2 */
> +		ret = (opcode[1] & 0xf0) + 2;
> +		/* Special non branching use of BALR */
> +		if ((opcode[1] & 0x0f) == 0)
> +			ret &= FIXUP_NOBRANCH;
> +		break;

((opcode[1] & 0xf0) + 2) & FIXUP_NOBRANCH is always 0. If the target
register is 0 no branch takes place but R1 still needs fixup.
resume_execution will just fixup the psw address in that case. I think
you meant "ret |= FIXUP_NOBRANCH".


> +	case BASR:	/* PSW addr saved in R1, branch address in R2 */
> +		ret = (opcode[1] & 0xf0) + 2;
> +		/* Special non branching use of BASR */
> +		if ((opcode[1] & 0x0f) == 0)
> +			ret &= FIXUP_NOBRANCH;
> +		break;

Same here..

> +	case BCR:	/* M1 is mask val (condition), branch addr in R2 */
> +		ret = FIXUP_NOBRANCH & 2;
> +		break;

..here..

> +	case BCTR:	/* R1 is count down, R2 is branch addr until R1 = 0 */
> +		ret = FIXUP_NOBRANCH & 2;
> +		break;

..here..

> +		/* RX Format - instruction unit length = 4
> +		 *  ________ ____ ____ ____ ____________
> +		 * |Op Code | R1 | X2 | B2 |     D2     |
> +		 * |________|_M1_|____|____|____________|
> +		 * 0         8   12   16   20          31
> +		 */
> +	case BAL:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
> +		ret = (opcode[1] & 0xf0) + 4;
> +		break;
> +	case BAS:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
> +		ret = (opcode[1] & 0xf0) + 4;
> +		break;
> +	case BC:	/* M1 is mask val (condition), branch addr D2(X2,B2) */
> +		ret = FIXUP_NOBRANCH & 4;
> +		break;

..here..

> +	case BCT:	/* R1 is count down, D2(X2,B2) is branch addr */
> +		ret = FIXUP_NOBRANCH & 4;
> +		break;

..here..

> +		/* RI Format - instruction unit length = 4
> +		 *  ________ ____ ____ _________________
> +		 * |Op Code | R1 |OpCd|       I2        |
> +		 * |________|____|____|_________________|
> +		 * 0         8   12   16               31
> +		 */
> +	case 0xA7:	/* first byte (multiple ops have same 1st byte) */
> +		if ((opcode[1] & 0x0f) == BRAS) {
> +			ret = (opcode[1] & 0xf0) + 4;
> +		}
> +		break;
> +		/* RS Format - instruction unit length = 4
> +		 *  ________ ____ ____ ____ ____________
> +		 * |Op Code | R1 | R3 | B2 |     D2     |
> +		 * |________|____|_M3_|____|____________|
> +		 * 0         8   12   16   20          31
> +		 */
> +	case BXH:
> +		ret = FIXUP_NOBRANCH & 4;
> +		break;

..here..

> +	case BXLE:
> +		ret = FIXUP_NOBRANCH & 4;
> +		break;

..here..

> +		/* RIL Format - instruction unit length = 6
> +		 *  ________ ____ ____ _____________/______________
> +		 * |Op Code | R1 |OpCd|            I2              |
> +		 * |________|_M1_|____|_____________/______________|
> +		 * 0         8   12   16                          47
> +		 */
> +	case 0xC0:
> +		if ((opcode[1] & 0x0f) == BRASL) {
> +			ret = (opcode[1] & 0xf0) + 6;
> +		} else if ((opcode[1] & 0x0f) == BRCL) {
> +			ret = FIXUP_NOBRANCH & 6;
> +		}
> +		break;

..here..

> +		/* RSY Format - instruction unit length = 6
> +		 *  ________ ____ ____ ____ __/__ ________ ________
> +		 * |Op Code | R1 | R3 | B2 | DL2 |  DH2   |Op Code |
> +		 * |________|____|_M3_|____|__/__|________|________|
> +		 * 0         8   12   16   20    32       40      47
> +		 */
> +	case 0xEB:
> +		if (opcode[5] == BXHG || opcode[5] == BXLEG) {
> +			ret = FIXUP_NOBRANCH & 6;
> +		}
> +		break;

..here..

> +		/* RXY Format - instruction unit length = 6
> +		 *  ________ ____ ____ ____ __/__ ________ ________
> +		 * |Op Code | R1 | X2 | B2 | DL2 |  DH2   |Op Code |
> +		 * |________|____|____|____|__/__|________|________|
> +		 * 0         8   12   16   20    32       40      47
> +		 */
> +	case 0xE3:
> +		if (opcode[5] == BCTG) {
> +			ret = FIXUP_NOBRANCH & 6;
> +		}
> +		break;

..and here.

> +	default:
> +		break;
> +	}
> +	return ret;
> +}
> +

There are some more instructions missing that need fixup:
"brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
"brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
"brxhg" 0xec????????44 and "brxlg" 0xec??????45.

A suggestion: I think the code would be easier to understand if you'd
use three bits for the three actions that are needed after single
stepping the out-of-line instruction:
1) fixup psw.addr to point to the instruction after the breakpoint
   (FIXUP_PSW_NORMAL 0x80000000)
2) fixup psw.addr to point to the instruction after the breakpoint if
the branch has not been taken (FIXUP_PSW_BRANCH 0x40000000)
3) update register with the return address (FIXUP_RETURN_REGISTER).

1) and 2) are mutual exclusive, 3) can be combined with either 1) or 2).
In addition a structure instead of an int for inst_type might improve
readability.

> +void __kprobes arch_arm_kprobe(struct kprobe *p)
> +{
> +	*p->addr = BREAKPOINT_INSTRUCTION;
> +}
> +
> +void __kprobes arch_disarm_kprobe(struct kprobe *p)
> +{
> +	*p->addr = p->opcode;
> +}
> +

I would feel better if the kernel code is changed in a more controlled
manner. Do an smp_call_function to avoid concurrent execution of the
instruction you are changing.

> +void __kprobes arch_remove_kprobe(struct kprobe *p)
> +{
> +	mutex_lock(&kprobe_mutex);
> +	free_insn_slot(p->ainsn.insn);
> +	mutex_unlock(&kprobe_mutex);
> +}
> +
> +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
> +{
> +	per_cr_bits kprobe_per_regs[1];
> +
> +	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
> +	regs->psw.addr = (unsigned long)p->ainsn.insn;
> +
> +	/* Just make sure this gets done */
> +	regs->psw.addr |= PSW_ADDR_AMODE;
> +

Just make sure this gets done... I don't think that comment helps much.
Please collapse the two regs->psw.addr lines into a single one and
remove the comment.

> +	/* Set up the per control reg info, will pass to lctl */
> +	kprobe_per_regs[0].em_instruction_fetch = 1;
> +	kprobe_per_regs[0].starting_addr = regs->psw.addr;
> +	kprobe_per_regs[0].ending_addr = regs->psw.addr + 4;
> +

"ending_addr = regs->psw.addr + 1" is enough, see pop 4-31:
an instruction-fetching event occurs if the first byte of the
instruction is within the storage area designated by control
registers 10 and 11.

> +	/* Set the PER control regs, turns on single step for this address */
> +	__ctl_load(kprobe_per_regs, 9, 11);
> +	regs->psw.mask |= PSW_MASK_PER;
> +	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
> +}
> +

Why do you disable the interrupts and the machine checks?

> +void __kprobes jprobe_return(void)
> +{
> +	asm volatile (".long 0x00020000");
> +}
> +

0x0002 is a two bytes instruction, any specific reason why you
use .long ?

So much for now. I skipped over a lot of code quite quickly and can't
comment on it yet. Hope I will find some more time tomorrow to review
these parts as well.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-12 19:40 ` Martin Schwidefsky
@ 2006-06-21  4:28   ` Mike Grundy
  2006-06-21 16:38     ` Martin Schwidefsky
  2006-06-21  9:40   ` Jan Glauber
  1 sibling, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-06-21  4:28 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: jan.glauber, linux-kernel, systemtap

Hi Martin - This patch implements the suggestions from your review. There were
a couple points I wanted to go over:
> There are some more instructions missing that need fixup:
> "brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
> "brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
> "brxhg" 0xec????????44 and "brxlg" 0xec??????45.
Since all of these are relative branches, and they don't save the psw, the
standard clean up of adjusting the original psw by the offset from the out of
line address after single step. Unless I'm just being dense :-) 

> > +void __kprobes arch_arm_kprobe(struct kprobe *p)
> > +{
> > +	*p->addr = BREAKPOINT_INSTRUCTION;
> > +}
> > +
> > +void __kprobes arch_disarm_kprobe(struct kprobe *p)
> > +{
> > +	*p->addr = p->opcode;
> > +}
> > +
> 
> I would feel better if the kernel code is changed in a more controlled
> manner. Do an smp_call_function to avoid concurrent execution of the
> instruction you are changing.
I'm not sure that repeatedly executing the same operation on the same area of
memory on multiple CPUs is the answer. I'm not sure there is a right answer:
If the instruction is brought into the pipeline while it is being changed
on another cpu it will either execute the original instruction and that probe
opportunity will be missed, execute the modified opcode and cause a probe hit
(which is ok, everything is ready for it), or stall the pipeline while it 
figures out why the heck someone is modifying something in the I-cache (IIRC
that will slow things down a tad. (I'd really like to plead "all the other
architectures do it this way", but instead I'll read every paper I can find
on I-cache operations and zseries processors over diner)

> Why do you disable the interrupts and the machine checks?
No longer sure if it is necessary. Originally it was so that an interrupt
wouldn't cause recursion on a probe point (e.g. you are handling a probe
that was in the machine check code and you get a machine check interrupt). So,
now I wonder, will get back to you on that one.

Thanks
Mike

 arch/s390/Kconfig              |   14 +
 arch/s390/kernel/Makefile      |    1
 arch/s390/kernel/entry.S       |   15 +
 arch/s390/kernel/entry64.S     |   16 +
 arch/s390/kernel/kprobes.c     |  543 +++++++++++++++++++++++++++++++++++++++++
 arch/s390/kernel/traps.c       |   42 ++-
 arch/s390/kernel/vmlinux.lds.S |    1
 arch/s390/mm/fault.c           |    8
 include/asm-s390/kdebug.h      |   57 ++++
 include/asm-s390/kprobes.h     |  105 +++++++
 10 files changed, 796 insertions(+), 6 deletions(-)

diff -urNp linux-2.6.17-rc6/arch/s390/Kconfig linux-2.6.17-rc6-kp390/arch/s390/Kconfig
--- linux-2.6.17-rc6/arch/s390/Kconfig	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/Kconfig	2006-06-12 02:13:05.000000000 -0700
@@ -474,8 +474,22 @@ source "drivers/net/Kconfig"
 
 source "fs/Kconfig"
 
+menu "Instrumentation Support"
+	depends on EXPERIMENTAL
+
 source "arch/s390/oprofile/Kconfig"
 
+config KPROBES
+	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+endmenu
+
 source "arch/s390/Kconfig.debug"
 
 source "security/Kconfig"
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry64.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S
--- linux-2.6.17-rc6/arch/s390/kernel/entry64.S	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry64.S	2006-06-20 11:25:11.000000000 -0700
@@ -497,6 +497,8 @@ pgm_no_vtime2:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -531,6 +533,20 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	j	sysc_do_svc
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3			 # clear per-event-bit and ilc
+#	j	sysc_singlestep
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_leave		# load adr. of system ret, no work
+	brcl	15,do_single_step	# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/entry.S linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S
--- linux-2.6.17-rc6/arch/s390/kernel/entry.S	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/entry.S	2006-06-20 20:24:47.000000000 -0700
@@ -480,6 +480,8 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	kernel_per
 	l	%r3,__LC_PGM_ILC	 # load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3                  # clear per-event-bit and ilc
@@ -510,6 +512,19 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	b	BASED(sysc_do_svc)
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	l	%r3,__LC_PGM_ILC	# load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3			# clear per-event-bit and ilc
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_leave)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/kprobes.c linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c
--- linux-2.6.17-rc6/arch/s390/kernel/kprobes.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/kprobes.c	2006-06-20 21:16:30.000000000 -0700
@@ -0,0 +1,543 @@
+/*
+ *  Kernel Probes (KProbes)
+ *  arch/s390/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+		return -EINVAL;
+
+	/* Use the get_insn_slot() facility for correctness */
+	if (!(p->ainsn.insn = get_insn_slot()))
+		return -ENOMEM;
+
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	get_instruction_type(&p->ainsn);
+	p->opcode = *p->addr;
+	return 0;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+	switch (*(__u8 *) instruction) {
+	case 0x0c:      /* bassm */
+	case 0x0b:      /* bsm   */
+	case 0x83:      /* diag  */
+	case 0x44:      /* ex    */
+		return -EINVAL;
+	}
+	switch (*(__u16 *) instruction) {
+	case 0x0101:    /* pr    */
+	case 0xb25a:    /* bsa   */
+	case 0xb240:    /* bakr  */
+	case 0xb258:    /* bsg   */
+	case 0xb218:    /* pc    */
+	case 0xb228:    /* pt    */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+{
+	/* save r1 operand */
+	ainsn->reg = *(__u8 *) (ainsn->insn + 1) & 0xf0;
+
+	/* save the instruction length (pop 5-5) */
+	ainsn->ilen = (*(__u8 *) (ainsn->insn) & 0xf0) >> 4;
+	if (ainsn->ilen < 2)
+		ainsn->ilen += 1;
+
+	switch (*(__u8 *) ainsn->insn) {
+	case 0x05:	/* balr	*/
+	case 0x0d:	/* basr */
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		/* if r2 = 0, no branch will be taken */
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0)
+			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x06:	/* bctr	*/
+	case 0x07:	/* bcr	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x45:	/* bal	*/
+	case 0x4D:	/* bas	*/
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		break;
+	case 0x47:	/* bc	*/
+	case 0x46:	/* bct	*/
+	case 0x86:	/* bxh	*/
+	case 0x87:	/* bxle	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0xA7:	/* bras	*/
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05) {
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xC0:	/* brasl */
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05) {
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xEB:
+		if (*(__u8 *) (ainsn->insn + 5) == 0x44 ||	/* bxhg  */
+			*(__u8 *) (ainsn->insn + 5) == 0x45) {	/* bxleg */
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	case 0xE3:	/* bctg	*/
+		if (*(__u8 *) (ainsn->insn + 5) == 0x46) {
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	default:
+		ainsn->fixup = FIXUP_PSW_NORMAL;
+	}
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	*p->addr = BREAKPOINT_INSTRUCTION;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	*p->addr = p->opcode;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	mutex_lock(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	per_cr_bits kprobe_per_regs[1];
+
+	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+
+	/* Set up the per control reg info, will pass to lctl */
+	kprobe_per_regs[0].em_instruction_fetch = 1;
+	kprobe_per_regs[0].starting_addr = regs->psw.addr;
+	kprobe_per_regs[0].ending_addr = regs->psw.addr + 1;
+
+	/* Set the PER control regs, turns on single step for this address */
+	__ctl_load(kprobe_per_regs, 9, 11);
+	regs->psw.mask |= PSW_MASK_PER;
+	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+						struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+	/* Save the interrupt and per flags */
+	kcb->kprobe_saved_imask = regs->psw.mask &
+	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+	/* Save the control regs that govern PER */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+					struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri;
+
+	if ((ri = get_free_rp_inst(rp)) != NULL) {
+		ri->rp = rp;
+		ri->task = current;
+		ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+		/* Replace the return addr with trampoline addr */
+		regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+		add_rp_inst(ri);
+	} else {
+		rp->nmissed++;
+	}
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	unsigned long *addr = (unsigned long *)
+		((regs->psw.addr & PSW_ADDR_INSN) - 2);
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				regs->psw.mask &= ~PSW_MASK_PER;
+				regs->psw.mask |= kcb->kprobe_saved_imask;
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 *
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	set_current_kprobe(p, regs, kcb);
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+	ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+	no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (".global kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n" "bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	spin_lock_irqsave(&kretprobe_lock, flags);
+	head = kretprobe_inst_table_head(current);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+	BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+	reset_current_kprobe();
+	spin_unlock_irqrestore(&kretprobe_lock, flags);
+	preempt_enable_no_resched();
+
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	regs->psw.addr &= PSW_ADDR_INSN;
+
+	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
+		regs->psw.addr = (unsigned long)p->addr +
+				((unsigned long)p->ainsn.insn -
+				 (unsigned long)regs->psw.addr);
+
+	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
+		if ((unsigned long)regs->psw.addr -
+		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
+			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+
+	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
+		regs->gprs[p->ainsn.reg] = (unsigned long)p->addr + p->ainsn.ilen;
+
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	/* turn off PER mode */
+	regs->psw.mask &= ~PSW_MASK_PER;
+	/* Restore the original per control regs */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs);
+
+	/*Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+	out:
+	preempt_enable_no_resched();
+
+	/*
+	 * if somebody else is singlestepping across a probe point, psw mask
+	 * will have PER set, in which case, continue the remaining processing
+	 * of do_single_step, as if this is not a probe hit.
+	 */
+	if (regs->psw.mask & PSW_MASK_PER) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+		return 1;
+
+	if (kcb->kprobe_status & KPROBE_HIT_SS) {
+		resume_execution(cur, regs);
+		regs->psw.mask |= kcb->kprobe_saved_imask;
+
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BPT:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_TRAP:
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id() */
+		preempt_disable();
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+	/* setup return addr to the jprobe handler routine */
+	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+	/* r14 is the function return address */
+	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+	/* r15 is the stack pointer */
+	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+	addr = (unsigned long)kcb->jprobe_saved_r15;
+
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile (".long 0x00020000");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+	asm volatile ("bcr 0,0");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+	/* Put the regs back */
+	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* put the stack back */
+	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+	       MIN_STACK_SIZE(stack_addr));
+	preempt_enable_no_resched();
+	return 1;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	return register_kprobe(&trampoline_p);
+}
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/Makefile linux-2.6.17-rc6-kp390/arch/s390/kernel/Makefile
--- linux-2.6.17-rc6/arch/s390/kernel/Makefile	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/Makefile	2006-06-12 02:13:05.000000000 -0700
@@ -21,6 +21,7 @@ obj-$(CONFIG_COMPAT)		+= compat_linux.o 
 obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf32.o
 
 obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/traps.c linux-2.6.17-rc6-kp390/arch/s390/kernel/traps.c
--- linux-2.6.17-rc6/arch/s390/kernel/traps.c	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/traps.c	2006-06-12 03:30:24.000000000 -0700
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -40,6 +41,7 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/kdebug.h>
 
 /* Called from entry.S only */
 extern void handle_per_exception(struct pt_regs *regs);
@@ -75,6 +77,20 @@ static int kstack_depth_to_print = 12;
 static int kstack_depth_to_print = 20;
 #endif /* CONFIG_64BIT */
 
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 /*
  * For show_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
@@ -308,8 +324,9 @@ report_user_fault(long interruption_code
 #endif
 }
 
-static void inline do_trap(long interruption_code, int signr, char *str,
-                           struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+					char *str, struct pt_regs *regs,
+					siginfo_t *info)
 {
 	/*
 	 * We got all needed information from the lowcore and can
@@ -318,6 +335,10 @@ static void inline do_trap(long interrup
         if (regs->psw.mask & PSW_MASK_PSTATE)
 		local_irq_enable();
 
+	if (notify_die(DIE_TRAP, str, regs, interruption_code,
+				interruption_code, signr) == NOTIFY_STOP)
+		return;
+
         if (regs->psw.mask & PSW_MASK_PSTATE) {
                 struct task_struct *tsk = current;
 
@@ -339,8 +360,12 @@ static inline void *get_check_address(st
 	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
 }
 
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
 {
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+					SIGTRAP) == NOTIFY_STOP){
+		return;
+	}
 	if ((current->ptrace & PT_PTRACED) != 0)
 		force_sig(SIGTRAP, current);
 }
@@ -466,8 +491,15 @@ asmlinkage void illegal_op(struct pt_reg
 #endif
 		} else
 			signal = SIGILL;
-	} else
-		signal = SIGILL;
+	} else {
+		/*
+		 * If we get an illegal op in kernel mode, send it through the
+		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+		 */
+		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+					3, SIGTRAP) != NOTIFY_STOP)
+			signal = SIGILL;
+	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
diff -urNp linux-2.6.17-rc6/arch/s390/kernel/vmlinux.lds.S linux-2.6.17-rc6-kp390/arch/s390/kernel/vmlinux.lds.S
--- linux-2.6.17-rc6/arch/s390/kernel/vmlinux.lds.S	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/kernel/vmlinux.lds.S	2006-06-12 02:13:05.000000000 -0700
@@ -25,6 +25,7 @@ SECTIONS
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
+	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
 	} = 0x0700
diff -urNp linux-2.6.17-rc6/arch/s390/mm/fault.c linux-2.6.17-rc6-kp390/arch/s390/mm/fault.c
--- linux-2.6.17-rc6/arch/s390/mm/fault.c	2006-06-05 17:57:02.000000000 -0700
+++ linux-2.6.17-rc6-kp390/arch/s390/mm/fault.c	2006-06-12 02:13:05.000000000 -0700
@@ -26,10 +26,12 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/kdebug.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -160,7 +162,7 @@ static void do_sigsegv(struct pt_regs *r
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline void
+static inline void __kprobes
 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
 {
         struct task_struct *tsk;
@@ -174,6 +176,10 @@ do_exception(struct pt_regs *regs, unsig
         tsk = current;
         mm = tsk->mm;
 	
+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+					SIGSEGV) == NOTIFY_STOP)
+		return;
+
 	/* 
          * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code 
diff -urNp linux-2.6.17-rc6/include/asm-s390/kdebug.h linux-2.6.17-rc6-kp390/include/asm-s390/kdebug.h
--- linux-2.6.17-rc6/include/asm-s390/kdebug.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17-rc6-kp390/include/asm-s390/kdebug.h	2006-06-12 03:27:25.000000000 -0700
@@ -0,0 +1,57 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H 1
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+ * If you really want to do it first unregister - then synchronize_sched
+ *  - then free.
+ */
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head s390die_chain;
+
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BPT,
+	DIE_SSTEP,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&s390die_chain, val, &args);
+}
+
+#endif
diff -urNp linux-2.6.17-rc6/include/asm-s390/kprobes.h linux-2.6.17-rc6-kp390/include/asm-s390/kprobes.h
--- linux-2.6.17-rc6/include/asm-s390/kprobes.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17-rc6-kp390/include/asm-s390/kprobes.h	2006-06-20 20:41:58.000000000 -0700
@@ -0,0 +1,105 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ * 2005-Dec	Used as a template for s390 by Mike Grundy
+ * 		<grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE		0x0003
+#define MAX_STACK_SIZE 		64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define FIXUP_NOBRANCH 0xFFFF0000
+
+#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define ARCH_SUPPORTS_KRETPROBES
+
+#define	FIXUP_PSW_NORMAL 0x08
+#define FIXUP_BRANCH_NOT_TAKEN	0x04
+#define FIXUP_PSW_BRANCH 0x04
+#define FIXUP_RETURN_REGISTER	0x02
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	int fixup;
+	int ilen;
+	int reg;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_psw;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobe_saved_r14;
+	unsigned long jprobe_saved_r15;
+	struct prev_kprobe prev_kprobe;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+int  is_prohibited_opcode(kprobe_opcode_t *instruction);
+void  get_instruction_type(struct arch_specific_insn *ainsn);
+#endif	/* _ASM_S390_KPROBES_H */
+
+#ifdef CONFIG_KPROBES
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+#else	/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+					   unsigned long val, void *data)
+{
+	return 0;
+}
+#endif

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-12 19:40 ` Martin Schwidefsky
  2006-06-21  4:28   ` Mike Grundy
@ 2006-06-21  9:40   ` Jan Glauber
  1 sibling, 0 replies; 31+ messages in thread
From: Jan Glauber @ 2006-06-21  9:40 UTC (permalink / raw)
  To: schwidefsky; +Cc: Mike Grundy, linux-kernel, systemtap

On Mon, 2006-06-12 at 21:40 +0200, Martin Schwidefsky wrote:
> On Mon, 2006-06-12 at 09:15 -0400, Mike Grundy wrote:
> > +/* get_instruction type will return 0 if only the regular offset adjustments
> > + * after out of line singlestep are required. If a register needs to be fixed,
> > + * bits 16-23 will contain the register number, bits 24-31 contain the length
> > + * of the instruction unit. If fixup is only required when the branch is not
> > + * taken, bits 0-16 will all be set.
> > + */
> > +int __kprobes get_instruction_type(kprobe_opcode_t * instruction)
> > +{
> > +	__u8 opcode[6];
> > +	int ret = 0;
> > +
> > +	memcpy(opcode, instruction, 6 * sizeof(__u8));
> 
> Again that memcpy. Why don't you just cast the instruction pointer to
> __u8 and __u16 and deference it? 
> 
> The following switch deals with all the instructions that need special
> handling. Please get rid of the BALR/BASR/BCR/BCTR/... defines and use
> the opcode number directly. Add a comment which instruction it is like
> in the new is_prohibited_opcode. All these defines are only used in
> get_instruction_type and the opcodes will certainly not change, only new
> ones will get added. No point in all those #defines.
> 
> > +
> > +	switch (opcode[0]) {
> > +		/* RR Format - instruction unit length = 2
> > +		 *  ________ ____ ____
> > +		 * |Op Code | R1 | R2 |
> > +		 * |________|_M1_|____|
> > +		 * 0         8   12  15
> > +		 */
> > +	case BALR:	/* PSW addr saved in R1, branch address in R2 */
> > +		ret = (opcode[1] & 0xf0) + 2;
> > +		/* Special non branching use of BALR */
> > +		if ((opcode[1] & 0x0f) == 0)
> > +			ret &= FIXUP_NOBRANCH;
> > +		break;
> 
> ((opcode[1] & 0xf0) + 2) & FIXUP_NOBRANCH is always 0. If the target
> register is 0 no branch takes place but R1 still needs fixup.
> resume_execution will just fixup the psw address in that case. I think
> you meant "ret |= FIXUP_NOBRANCH".
> 
> 
> > +	case BASR:	/* PSW addr saved in R1, branch address in R2 */
> > +		ret = (opcode[1] & 0xf0) + 2;
> > +		/* Special non branching use of BASR */
> > +		if ((opcode[1] & 0x0f) == 0)
> > +			ret &= FIXUP_NOBRANCH;
> > +		break;
> 
> Same here..
> 
> > +	case BCR:	/* M1 is mask val (condition), branch addr in R2 */
> > +		ret = FIXUP_NOBRANCH & 2;
> > +		break;
> 
> ..here..
> 
> > +	case BCTR:	/* R1 is count down, R2 is branch addr until R1 = 0 */
> > +		ret = FIXUP_NOBRANCH & 2;
> > +		break;
> 
> ..here..
> 
> > +		/* RX Format - instruction unit length = 4
> > +		 *  ________ ____ ____ ____ ____________
> > +		 * |Op Code | R1 | X2 | B2 |     D2     |
> > +		 * |________|_M1_|____|____|____________|
> > +		 * 0         8   12   16   20          31
> > +		 */
> > +	case BAL:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
> > +		ret = (opcode[1] & 0xf0) + 4;
> > +		break;
> > +	case BAS:	/* PSW addr saved in R1, branch addr D2(X2,B2) */
> > +		ret = (opcode[1] & 0xf0) + 4;
> > +		break;
> > +	case BC:	/* M1 is mask val (condition), branch addr D2(X2,B2) */
> > +		ret = FIXUP_NOBRANCH & 4;
> > +		break;
> 
> ..here..
> 
> > +	case BCT:	/* R1 is count down, D2(X2,B2) is branch addr */
> > +		ret = FIXUP_NOBRANCH & 4;
> > +		break;
> 
> ..here..
> 
> > +		/* RI Format - instruction unit length = 4
> > +		 *  ________ ____ ____ _________________
> > +		 * |Op Code | R1 |OpCd|       I2        |
> > +		 * |________|____|____|_________________|
> > +		 * 0         8   12   16               31
> > +		 */
> > +	case 0xA7:	/* first byte (multiple ops have same 1st byte) */
> > +		if ((opcode[1] & 0x0f) == BRAS) {
> > +			ret = (opcode[1] & 0xf0) + 4;
> > +		}
> > +		break;
> > +		/* RS Format - instruction unit length = 4
> > +		 *  ________ ____ ____ ____ ____________
> > +		 * |Op Code | R1 | R3 | B2 |     D2     |
> > +		 * |________|____|_M3_|____|____________|
> > +		 * 0         8   12   16   20          31
> > +		 */
> > +	case BXH:
> > +		ret = FIXUP_NOBRANCH & 4;
> > +		break;
> 
> ..here..
> 
> > +	case BXLE:
> > +		ret = FIXUP_NOBRANCH & 4;
> > +		break;
> 
> ..here..
> 
> > +		/* RIL Format - instruction unit length = 6
> > +		 *  ________ ____ ____ _____________/______________
> > +		 * |Op Code | R1 |OpCd|            I2              |
> > +		 * |________|_M1_|____|_____________/______________|
> > +		 * 0         8   12   16                          47
> > +		 */
> > +	case 0xC0:
> > +		if ((opcode[1] & 0x0f) == BRASL) {
> > +			ret = (opcode[1] & 0xf0) + 6;
> > +		} else if ((opcode[1] & 0x0f) == BRCL) {
> > +			ret = FIXUP_NOBRANCH & 6;
> > +		}
> > +		break;
> 
> ..here..
> 
> > +		/* RSY Format - instruction unit length = 6
> > +		 *  ________ ____ ____ ____ __/__ ________ ________
> > +		 * |Op Code | R1 | R3 | B2 | DL2 |  DH2   |Op Code |
> > +		 * |________|____|_M3_|____|__/__|________|________|
> > +		 * 0         8   12   16   20    32       40      47
> > +		 */
> > +	case 0xEB:
> > +		if (opcode[5] == BXHG || opcode[5] == BXLEG) {
> > +			ret = FIXUP_NOBRANCH & 6;
> > +		}
> > +		break;
> 
> ..here..
> 
> > +		/* RXY Format - instruction unit length = 6
> > +		 *  ________ ____ ____ ____ __/__ ________ ________
> > +		 * |Op Code | R1 | X2 | B2 | DL2 |  DH2   |Op Code |
> > +		 * |________|____|____|____|__/__|________|________|
> > +		 * 0         8   12   16   20    32       40      47
> > +		 */
> > +	case 0xE3:
> > +		if (opcode[5] == BCTG) {
> > +			ret = FIXUP_NOBRANCH & 6;
> > +		}
> > +		break;
> 
> ..and here.
> 
> > +	default:
> > +		break;
> > +	}
> > +	return ret;
> > +}
> > +
> 
> There are some more instructions missing that need fixup:
> "brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
> "brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
> "brxhg" 0xec????????44 and "brxlg" 0xec??????45.

We need to handle lpsw and larl too, since they change the instruction
pointer.

Jan

---
Jan Glauber
IBM Linux Technology Center
Linux on zSeries Development, Boeblingen


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-12 13:15 [PATCH] kprobes for s390 architecture Mike Grundy
  2006-06-12 19:40 ` Martin Schwidefsky
@ 2006-06-21 16:23 ` Jan Glauber
  1 sibling, 0 replies; 31+ messages in thread
From: Jan Glauber @ 2006-06-21 16:23 UTC (permalink / raw)
  To: Mike Grundy; +Cc: linux-kernel, schwidefsky, systemtap

On Mon, 2006-06-12 at 09:15 -0400, Mike Grundy wrote:
> +int __kprobes arch_prepare_kprobe(struct kprobe *p)
> +{
> +	int ret = 0;
> +
> +	/* Make sure the probe isn't going on a difficult instruction */
> +	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
> +		ret = -EINVAL;
> +
> +	/* Use the get_insn_slot() facility for correctness */
> +	if (!ret) {
> +		p->ainsn.insn = get_insn_slot();
> +		if (!p->ainsn.insn) {
> +			ret = -ENOMEM;
> +		} else {
> +			/* this should only happen if you got the slot */
> +			memcpy(p->ainsn.insn, p->addr,
> +			       MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
> +			p->ainsn.inst_type =
> +			    get_instruction_type(p->ainsn.insn);
> +		}
> +	}
> +	p->opcode = *p->addr;
> +	return ret;

I think we should also check for correct instruction alignment in this
function (2 bytes on s390), like:

if ((unsigned long)p->addr & 0x01) {
	printk("Attempt to register kprobe at an unaligned address\n");
	return -EINVAL;
}

Jan


---
Jan Glauber
IBM Linux Technology Center
Linux on zSeries Development, Boeblingen


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21  4:28   ` Mike Grundy
@ 2006-06-21 16:38     ` Martin Schwidefsky
  2006-06-21 17:15       ` Mike Grundy
                         ` (2 more replies)
  0 siblings, 3 replies; 31+ messages in thread
From: Martin Schwidefsky @ 2006-06-21 16:38 UTC (permalink / raw)
  To: Mike Grundy; +Cc: jan.glauber, linux-kernel, systemtap

On Tue, 2006-06-20 at 21:28 -0700, Mike Grundy wrote:
> Hi Martin - This patch implements the suggestions from your review. There were
> a couple points I wanted to go over:
> > There are some more instructions missing that need fixup:
> > "brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
> > "brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
> > "brxhg" 0xec????????44 and "brxlg" 0xec??????45.
> Since all of these are relative branches, and they don't save the psw, the
> standard clean up of adjusting the original psw by the offset from the out of
> line address after single step. Unless I'm just being dense :-) 

All of these are conditional branches, if the branch is not taken you
have to do a cleanup.

> > > +void __kprobes arch_arm_kprobe(struct kprobe *p)
> > > +{
> > > +	*p->addr = BREAKPOINT_INSTRUCTION;
> > > +}
> > > +
> > > +void __kprobes arch_disarm_kprobe(struct kprobe *p)
> > > +{
> > > +	*p->addr = p->opcode;
> > > +}
> > > +
> > 
> > I would feel better if the kernel code is changed in a more controlled
> > manner. Do an smp_call_function to avoid concurrent execution of the
> > instruction you are changing.
> I'm not sure that repeatedly executing the same operation on the same area of
> memory on multiple CPUs is the answer. I'm not sure there is a right answer:
> If the instruction is brought into the pipeline while it is being changed
> on another cpu it will either execute the original instruction and that probe
> opportunity will be missed, execute the modified opcode and cause a probe hit
> (which is ok, everything is ready for it), or stall the pipeline while it 
> figures out why the heck someone is modifying something in the I-cache (IIRC
> that will slow things down a tad. (I'd really like to plead "all the other
> architectures do it this way", but instead I'll read every paper I can find
> on I-cache operations and zseries processors over diner)

You misunderstood me here. I'm not talking about storing the same piece
of data to memory on each processor. I'm talking about isolating all
other cpus so that the initiating cpu can store the breakpoint to memory
without running into the danger that another cpu is trying to execute it
at the same time. But probably the store should be atomic in regard to
instruction fetching on the other cpus. It is only two bytes and it
should be aligned.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21 16:38     ` Martin Schwidefsky
@ 2006-06-21 17:15       ` Mike Grundy
  2006-06-27 11:56         ` Martin Schwidefsky
  2006-06-21 17:34       ` Mike Grundy
  2006-06-22  1:38       ` Mike Grundy
  2 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-06-21 17:15 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: jan.glauber, linux-kernel, systemtap

On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> On Tue, 2006-06-20 at 21:28 -0700, Mike Grundy wrote:
> > Hi Martin - This patch implements the suggestions from your review. There were
> > a couple points I wanted to go over:
> > > There are some more instructions missing that need fixup:
> > > "brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
> > > "brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
> > > "brxhg" 0xec????????44 and "brxlg" 0xec??????45.
> > Since all of these are relative branches, and they don't save the psw, the
> > standard clean up of adjusting the original psw by the offset from the out of
> > line address after single step. Unless I'm just being dense :-) 
> 
> All of these are conditional branches, if the branch is not taken you
> have to do a cleanup.
The reason I have a special cleanup for the other branches is the easy way to 
tell if the branch wasn't taken is the pswa = orig pswa + instruction length.
The relative branches get cleaned up the same way if the branch was taken or
not, pswa = probe_addr + (out of line end psw - out of line start psw). These
are all relative branches and while they need cleanup, they don't get treated
differently based on the branch status.

> You misunderstood me here. I'm not talking about storing the same piece
> of data to memory on each processor. I'm talking about isolating all
> other cpus so that the initiating cpu can store the breakpoint to memory
Yep, I misunderstood that. The serialization is the point, not the replacement
of a word in memory.

-- 
Thanks
Mike

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21 16:38     ` Martin Schwidefsky
  2006-06-21 17:15       ` Mike Grundy
@ 2006-06-21 17:34       ` Mike Grundy
  2006-06-22 11:28         ` Jan Glauber
  2006-06-22  1:38       ` Mike Grundy
  2 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-06-21 17:34 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: jan.glauber, linux-kernel, systemtap

On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> You misunderstood me here. I'm not talking about storing the same piece
> of data to memory on each processor. I'm talking about isolating all
> other cpus so that the initiating cpu can store the breakpoint to memory
> without running into the danger that another cpu is trying to execute it
> at the same time. But probably the store should be atomic in regard to
> instruction fetching on the other cpus. It is only two bytes and it
> should be aligned.

So maybe something like this:

void smp_replace_instruction(void *info) {
        struct ins_replace_args *parms;
        parms = (struct ins_replace_args *) info;
        *parms->addr = *parms->insn
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
{
        struct ins_replace_args parms;
        parms.addr = p->addr;
        parms.insn = BREAKPOINT_INSTRUCTION

        preempt_disable();
        smp_call_function(smp_replace_instruction, &parms, 0, 1);
        preempt_enable();
}

Thanks
Mike

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21 16:38     ` Martin Schwidefsky
  2006-06-21 17:15       ` Mike Grundy
  2006-06-21 17:34       ` Mike Grundy
@ 2006-06-22  1:38       ` Mike Grundy
  2 siblings, 0 replies; 31+ messages in thread
From: Mike Grundy @ 2006-06-22  1:38 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: jan.glauber, linux-kernel, systemtap

Hi Martin - New patch, has the smp_call_function for the instruction 
replacement and the alignment check Jan suggested. Also put in the lpsw, lpswe
and larl checks. A few other clean ups included. Happy reading...

Thanks
Mike

 arch/s390/Kconfig              |   14
 arch/s390/kernel/Makefile      |    1
 arch/s390/kernel/entry.S       |   15 +
 arch/s390/kernel/entry64.S     |   15 +
 arch/s390/kernel/kprobes.c     |  587 +++++++++++++++++++++++++++++++++++++++++
 arch/s390/kernel/traps.c       |   42 ++
 arch/s390/kernel/vmlinux.lds.S |    1
 arch/s390/mm/fault.c           |    8
 include/asm-s390/kdebug.h      |   57 +++
 include/asm-s390/kprobes.h     |  107 +++++++
 10 files changed, 841 insertions(+), 6 deletions(-)


diff -urNp linux-2.6.17.1/arch/s390/Kconfig linux-2.6.17.1-kp390/arch/s390/Kconfig
--- linux-2.6.17.1/arch/s390/Kconfig	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/Kconfig	2006-06-21 13:43:06.000000000 -0700
@@ -474,8 +474,22 @@ source "drivers/net/Kconfig"
 
 source "fs/Kconfig"
 
+menu "Instrumentation Support"
+	depends on EXPERIMENTAL
+
 source "arch/s390/oprofile/Kconfig"
 
+config KPROBES
+	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+endmenu
+
 source "arch/s390/Kconfig.debug"
 
 source "security/Kconfig"
diff -urNp linux-2.6.17.1/arch/s390/kernel/entry64.S linux-2.6.17.1-kp390/arch/s390/kernel/entry64.S
--- linux-2.6.17.1/arch/s390/kernel/entry64.S	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/kernel/entry64.S	2006-06-21 18:29:33.000000000 -0700
@@ -497,6 +497,8 @@ pgm_no_vtime2:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -531,6 +533,19 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	j	sysc_do_svc
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3			 # clear per-event-bit and ilc
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_leave		# load adr. of system ret, no work
+	brcl	15,do_single_step	# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -urNp linux-2.6.17.1/arch/s390/kernel/entry.S linux-2.6.17.1-kp390/arch/s390/kernel/entry.S
--- linux-2.6.17.1/arch/s390/kernel/entry.S	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/kernel/entry.S	2006-06-21 13:43:06.000000000 -0700
@@ -480,6 +480,8 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	kernel_per
 	l	%r3,__LC_PGM_ILC	 # load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3                  # clear per-event-bit and ilc
@@ -510,6 +512,19 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	b	BASED(sysc_do_svc)
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	l	%r3,__LC_PGM_ILC	# load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3			# clear per-event-bit and ilc
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_leave)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -urNp linux-2.6.17.1/arch/s390/kernel/kprobes.c linux-2.6.17.1-kp390/arch/s390/kernel/kprobes.c
--- linux-2.6.17.1/arch/s390/kernel/kprobes.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17.1-kp390/arch/s390/kernel/kprobes.c	2006-06-21 15:31:41.000000000 -0700
@@ -0,0 +1,587 @@
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+		return -EINVAL;
+
+	if ((unsigned long)p->addr & 0x01) {
+		printk("Attempt to register kprobe at an unaligned address\n");
+		return -EINVAL;
+		}
+
+	/* Use the get_insn_slot() facility for correctness */
+	if (!(p->ainsn.insn = get_insn_slot()))
+		return -ENOMEM;
+
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	get_instruction_type(&p->ainsn);
+	p->opcode = *p->addr;
+	return 0;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+	switch (*(__u8 *) instruction) {
+	case 0x0c:      /* bassm */
+	case 0x0b:      /* bsm   */
+	case 0x83:      /* diag  */
+	case 0x44:      /* ex    */
+		return -EINVAL;
+	}
+	switch (*(__u16 *) instruction) {
+	case 0x0101:    /* pr    */
+	case 0xb25a:    /* bsa   */
+	case 0xb240:    /* bakr  */
+	case 0xb258:    /* bsg   */
+	case 0xb218:    /* pc    */
+	case 0xb228:    /* pt    */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+{
+	/* default fixup method */
+	ainsn->fixup = FIXUP_PSW_NORMAL;
+
+	/* save r1 operand */
+	ainsn->reg = *(__u8 *) (ainsn->insn + 1) & 0xf0;
+
+	/* save the instruction length (pop 5-5) in bytes */
+	switch (*(__u8 *) (ainsn->insn) >> 4) {
+	case 0:
+		ainsn->ilen = 2;
+		break;
+	case 1:
+	case 2:
+		ainsn->ilen = 4;
+		break;
+	case 3:
+		ainsn->ilen = 6;
+		break;
+	}
+
+	switch (*(__u8 *) ainsn->insn) {
+	case 0x05:	/* balr	*/
+	case 0x0d:	/* basr */
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		/* if r2 = 0, no branch will be taken */
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0)
+			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x06:	/* bctr	*/
+	case 0x07:	/* bcr	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x45:	/* bal	*/
+	case 0x4d:	/* bas	*/
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		break;
+	case 0x47:	/* bc	*/
+	case 0x46:	/* bct	*/
+	case 0x86:	/* bxh	*/
+	case 0x87:	/* bxle	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x82:	/* lpsw	*/
+		ainsn->fixup = FIXUP_NOT_REQUIRED;
+		break;
+	case 0xb2:	/* lpswe */
+		if (*(__u8 *) (ainsn->insn + 1) == 0xb2) {
+			ainsn->fixup = FIXUP_NOT_REQUIRED;
+		}
+		break;
+	case 0xa7:	/* bras	*/
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05) {
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xc0:
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x00 ||    /*larl */
+			(*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05){ /*brasl*/
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xeb:
+		if (*(__u8 *) (ainsn->insn + 5) == 0x44 ||	/* bxhg  */
+			*(__u8 *) (ainsn->insn + 5) == 0x45) {	/* bxleg */
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	case 0xe3:	/* bctg	*/
+		if (*(__u8 *) (ainsn->insn + 5) == 0x46) {
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	}
+}
+
+void smp_replace_instruction(void *info)
+{
+	struct ins_replace_args *parms;
+	parms = (struct ins_replace_args *) info;
+	*parms->addr = parms->insn;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	struct ins_replace_args parms;
+	parms.addr = p->addr;
+	parms.insn = BREAKPOINT_INSTRUCTION;
+
+	preempt_disable();
+	smp_call_function(smp_replace_instruction, &parms, 0, 1);
+	preempt_enable();
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	struct ins_replace_args parms;
+	parms.addr = p->addr;
+	parms.insn = p->opcode;
+
+	preempt_disable();
+	smp_call_function(smp_replace_instruction, &parms, 0, 1);
+	preempt_enable();
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	mutex_lock(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	per_cr_bits kprobe_per_regs[1];
+
+	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+
+	/* Set up the per control reg info, will pass to lctl */
+	kprobe_per_regs[0].em_instruction_fetch = 1;
+	kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
+	kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
+
+	/* Set the PER control regs, turns on single step for this address */
+	__ctl_load(kprobe_per_regs, 9, 11);
+	regs->psw.mask |= PSW_MASK_PER;
+	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+	       sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+						struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+	/* Save the interrupt and per flags */
+	kcb->kprobe_saved_imask = regs->psw.mask &
+	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+	/* Save the control regs that govern PER */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+					struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri;
+
+	if ((ri = get_free_rp_inst(rp)) != NULL) {
+		ri->rp = rp;
+		ri->task = current;
+		ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+		/* Replace the return addr with trampoline addr */
+		regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+		add_rp_inst(ri);
+	} else {
+		rp->nmissed++;
+	}
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	unsigned long *addr = (unsigned long *)
+		((regs->psw.addr & PSW_ADDR_INSN) - 2);
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				regs->psw.mask &= ~PSW_MASK_PER;
+				regs->psw.mask |= kcb->kprobe_saved_imask;
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 *
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	set_current_kprobe(p, regs, kcb);
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+	ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+	no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (".global kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n" "bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	spin_lock_irqsave(&kretprobe_lock, flags);
+	head = kretprobe_inst_table_head(current);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+	BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+	reset_current_kprobe();
+	spin_unlock_irqrestore(&kretprobe_lock, flags);
+	preempt_enable_no_resched();
+
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	regs->psw.addr &= PSW_ADDR_INSN;
+
+	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
+		regs->psw.addr = (unsigned long)p->addr +
+				((unsigned long)regs->psw.addr -
+				 (unsigned long)p->ainsn.insn);
+
+	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
+		if ((unsigned long)regs->psw.addr -
+		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
+			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+
+	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
+		regs->gprs[p->ainsn.reg] = (unsigned long)p->addr +
+						(regs->gprs[p->ainsn.reg] -
+						(unsigned long)p->ainsn.insn);
+
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	/* turn off PER mode */
+	regs->psw.mask &= ~PSW_MASK_PER;
+	/* Restore the original per control regs */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs);
+
+	/*Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+	out:
+	preempt_enable_no_resched();
+
+	/*
+	 * if somebody else is singlestepping across a probe point, psw mask
+	 * will have PER set, in which case, continue the remaining processing
+	 * of do_single_step, as if this is not a probe hit.
+	 */
+	if (regs->psw.mask & PSW_MASK_PER) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+		return 1;
+
+	if (kcb->kprobe_status & KPROBE_HIT_SS) {
+		resume_execution(cur, regs);
+		regs->psw.mask |= kcb->kprobe_saved_imask;
+
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BPT:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_TRAP:
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id() */
+		preempt_disable();
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+	/* setup return addr to the jprobe handler routine */
+	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+	/* r14 is the function return address */
+	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+	/* r15 is the stack pointer */
+	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+	addr = (unsigned long)kcb->jprobe_saved_r15;
+
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile (".word 0x0002");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+	asm volatile ("bcr 0,0");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+	/* Put the regs back */
+	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* put the stack back */
+	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+	       MIN_STACK_SIZE(stack_addr));
+	preempt_enable_no_resched();
+	return 1;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	return register_kprobe(&trampoline_p);
+}
diff -urNp linux-2.6.17.1/arch/s390/kernel/Makefile linux-2.6.17.1-kp390/arch/s390/kernel/Makefile
--- linux-2.6.17.1/arch/s390/kernel/Makefile	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/kernel/Makefile	2006-06-21 13:43:06.000000000 -0700
@@ -21,6 +21,7 @@ obj-$(CONFIG_COMPAT)		+= compat_linux.o 
 obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf32.o
 
 obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff -urNp linux-2.6.17.1/arch/s390/kernel/traps.c linux-2.6.17.1-kp390/arch/s390/kernel/traps.c
--- linux-2.6.17.1/arch/s390/kernel/traps.c	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/kernel/traps.c	2006-06-21 13:43:06.000000000 -0700
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -40,6 +41,7 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/kdebug.h>
 
 /* Called from entry.S only */
 extern void handle_per_exception(struct pt_regs *regs);
@@ -75,6 +77,20 @@ static int kstack_depth_to_print = 12;
 static int kstack_depth_to_print = 20;
 #endif /* CONFIG_64BIT */
 
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 /*
  * For show_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
@@ -308,8 +324,9 @@ report_user_fault(long interruption_code
 #endif
 }
 
-static void inline do_trap(long interruption_code, int signr, char *str,
-                           struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+					char *str, struct pt_regs *regs,
+					siginfo_t *info)
 {
 	/*
 	 * We got all needed information from the lowcore and can
@@ -318,6 +335,10 @@ static void inline do_trap(long interrup
         if (regs->psw.mask & PSW_MASK_PSTATE)
 		local_irq_enable();
 
+	if (notify_die(DIE_TRAP, str, regs, interruption_code,
+				interruption_code, signr) == NOTIFY_STOP)
+		return;
+
         if (regs->psw.mask & PSW_MASK_PSTATE) {
                 struct task_struct *tsk = current;
 
@@ -339,8 +360,12 @@ static inline void *get_check_address(st
 	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
 }
 
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
 {
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+					SIGTRAP) == NOTIFY_STOP){
+		return;
+	}
 	if ((current->ptrace & PT_PTRACED) != 0)
 		force_sig(SIGTRAP, current);
 }
@@ -466,8 +491,15 @@ asmlinkage void illegal_op(struct pt_reg
 #endif
 		} else
 			signal = SIGILL;
-	} else
-		signal = SIGILL;
+	} else {
+		/*
+		 * If we get an illegal op in kernel mode, send it through the
+		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+		 */
+		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+					3, SIGTRAP) != NOTIFY_STOP)
+			signal = SIGILL;
+	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
diff -urNp linux-2.6.17.1/arch/s390/kernel/vmlinux.lds.S linux-2.6.17.1-kp390/arch/s390/kernel/vmlinux.lds.S
--- linux-2.6.17.1/arch/s390/kernel/vmlinux.lds.S	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/kernel/vmlinux.lds.S	2006-06-21 13:43:06.000000000 -0700
@@ -25,6 +25,7 @@ SECTIONS
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
+	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
 	} = 0x0700
diff -urNp linux-2.6.17.1/arch/s390/mm/fault.c linux-2.6.17.1-kp390/arch/s390/mm/fault.c
--- linux-2.6.17.1/arch/s390/mm/fault.c	2006-06-20 02:31:55.000000000 -0700
+++ linux-2.6.17.1-kp390/arch/s390/mm/fault.c	2006-06-21 13:43:06.000000000 -0700
@@ -26,10 +26,12 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/kdebug.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -160,7 +162,7 @@ static void do_sigsegv(struct pt_regs *r
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline void
+static inline void __kprobes
 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
 {
         struct task_struct *tsk;
@@ -174,6 +176,10 @@ do_exception(struct pt_regs *regs, unsig
         tsk = current;
         mm = tsk->mm;
 	
+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+					SIGSEGV) == NOTIFY_STOP)
+		return;
+
 	/* 
          * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code 
diff -urNp linux-2.6.17.1/include/asm-s390/kdebug.h linux-2.6.17.1-kp390/include/asm-s390/kdebug.h
--- linux-2.6.17.1/include/asm-s390/kdebug.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17.1-kp390/include/asm-s390/kdebug.h	2006-06-21 13:43:06.000000000 -0700
@@ -0,0 +1,57 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H 1
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+ * If you really want to do it first unregister - then synchronize_sched
+ *  - then free.
+ */
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head s390die_chain;
+
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BPT,
+	DIE_SSTEP,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&s390die_chain, val, &args);
+}
+
+#endif
diff -urNp linux-2.6.17.1/include/asm-s390/kprobes.h linux-2.6.17.1-kp390/include/asm-s390/kprobes.h
--- linux-2.6.17.1/include/asm-s390/kprobes.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.17.1-kp390/include/asm-s390/kprobes.h	2006-06-21 13:48:19.000000000 -0700
@@ -0,0 +1,107 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ * 2005-Dec	Used as a template for s390 by Mike Grundy
+ * 		<grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE		0x0003
+#define MAX_STACK_SIZE 		64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define ARCH_SUPPORTS_KRETPROBES
+
+#define FIXUP_PSW_NORMAL 	0x08
+#define FIXUP_BRANCH_NOT_TAKEN	0x04
+#define FIXUP_RETURN_REGISTER	0x02
+#define FIXUP_NOT_REQUIRED	0x01
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	int fixup;
+	int ilen;
+	int reg;
+};
+
+struct ins_replace_args {
+	kprobe_opcode_t insn;
+	kprobe_opcode_t *addr;
+};
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_psw;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobe_saved_r14;
+	unsigned long jprobe_saved_r15;
+	struct prev_kprobe prev_kprobe;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+int  is_prohibited_opcode(kprobe_opcode_t *instruction);
+void  get_instruction_type(struct arch_specific_insn *ainsn);
+#endif	/* _ASM_S390_KPROBES_H */
+
+#ifdef CONFIG_KPROBES
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+#else	/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+					   unsigned long val, void *data)
+{
+	return 0;
+}
+#endif

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21 17:34       ` Mike Grundy
@ 2006-06-22 11:28         ` Jan Glauber
  2006-06-22 16:36           ` Mike Grundy
  0 siblings, 1 reply; 31+ messages in thread
From: Jan Glauber @ 2006-06-22 11:28 UTC (permalink / raw)
  To: Mike Grundy; +Cc: Martin Schwidefsky, linux-kernel, systemtap

On Wed, 2006-06-21 at 10:34 -0700, Mike Grundy wrote:
> On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> > You misunderstood me here. I'm not talking about storing the same piece
> > of data to memory on each processor. I'm talking about isolating all
> > other cpus so that the initiating cpu can store the breakpoint to memory
> > without running into the danger that another cpu is trying to execute it
> > at the same time. But probably the store should be atomic in regard to
> > instruction fetching on the other cpus. It is only two bytes and it
> > should be aligned.
> 
> So maybe something like this:
> 
> void smp_replace_instruction(void *info) {
>         struct ins_replace_args *parms;
>         parms = (struct ins_replace_args *) info;
>         *parms->addr = *parms->insn
> }
> 
> void __kprobes arch_arm_kprobe(struct kprobe *p)
> {
>         struct ins_replace_args parms;
>         parms.addr = p->addr;
>         parms.insn = BREAKPOINT_INSTRUCTION
> 
>         preempt_disable();
>         smp_call_function(smp_replace_instruction, &parms, 0, 1);
>         preempt_enable();
> }

Preemption disabling is not necessary around smp_call_function(), since
smp_call_function() takes a spin lock. But smp_call_function() is wrong
here, it calls the code on all other CPUs but not on our own. Please use
on_each_cpu() instead.

Jan

---
Jan Glauber
IBM Linux Technology Center
Linux on zSeries Development, Boeblingen


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-22 11:28         ` Jan Glauber
@ 2006-06-22 16:36           ` Mike Grundy
  2006-06-23  8:50             ` Jan Glauber
  2006-06-23 14:38             ` Heiko Carstens
  0 siblings, 2 replies; 31+ messages in thread
From: Mike Grundy @ 2006-06-22 16:36 UTC (permalink / raw)
  To: Jan Glauber; +Cc: Martin Schwidefsky, linux-kernel, systemtap

On Thu, Jun 22, 2006 at 01:28:36PM +0200, Jan Glauber wrote:
> On Wed, 2006-06-21 at 10:34 -0700, Mike Grundy wrote:
> > On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> > > You misunderstood me here. I'm not talking about storing the same piece
> > > of data to memory on each processor. I'm talking about isolating all
> > > other cpus so that the initiating cpu can store the breakpoint to memory
> > > without running into the danger that another cpu is trying to execute it
> > > at the same time. But probably the store should be atomic in regard to
> > > instruction fetching on the other cpus. It is only two bytes and it
> > > should be aligned.
> 
> Preemption disabling is not necessary around smp_call_function(), since
> smp_call_function() takes a spin lock. But smp_call_function() is wrong
> here, it calls the code on all other CPUs but not on our own. Please use
> on_each_cpu() instead.

But on_each_cpu() does:

        preempt_disable();
        ret = smp_call_function(func, info, retry, wait);
        local_irq_disable();
        func(info);
        local_irq_enable();
        preempt_enable();
 
I'm confused. I really don't need to swap the instruction on each cpu. I really
need to make sure each cpu is not fetching that instruction while I change it.
s390 doesn't have a flush_icache_range() (which the other arches use after the 
swap). I thought that the synchronization that smp_call_function() does was the
primary reason for using it here, not repeatedly changing the same area of 
memory.  If you'd prefer I use on_each_cpu() instead of smp_call_function(), 
no problem.  

Thanks
Mike


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-22 16:36           ` Mike Grundy
@ 2006-06-23  8:50             ` Jan Glauber
  2006-06-23 14:38             ` Heiko Carstens
  1 sibling, 0 replies; 31+ messages in thread
From: Jan Glauber @ 2006-06-23  8:50 UTC (permalink / raw)
  To: Mike Grundy; +Cc: Martin Schwidefsky, linux-kernel, systemtap

On Thu, 2006-06-22 at 09:36 -0700, Mike Grundy wrote:
> On Thu, Jun 22, 2006 at 01:28:36PM +0200, Jan Glauber wrote:
> > On Wed, 2006-06-21 at 10:34 -0700, Mike Grundy wrote:
> > > On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> > > > You misunderstood me here. I'm not talking about storing the same piece
> > > > of data to memory on each processor. I'm talking about isolating all
> > > > other cpus so that the initiating cpu can store the breakpoint to memory
> > > > without running into the danger that another cpu is trying to execute it
> > > > at the same time. But probably the store should be atomic in regard to
> > > > instruction fetching on the other cpus. It is only two bytes and it
> > > > should be aligned.
> > 
> > Preemption disabling is not necessary around smp_call_function(), since
> > smp_call_function() takes a spin lock. But smp_call_function() is wrong
> > here, it calls the code on all other CPUs but not on our own. Please use
> > on_each_cpu() instead.
> 
> But on_each_cpu() does:
> 
>         preempt_disable();
>         ret = smp_call_function(func, info, retry, wait);
>         local_irq_disable();
>         func(info);
>         local_irq_enable();
>         preempt_enable();
>  
> I'm confused. I really don't need to swap the instruction on each cpu. I really
> need to make sure each cpu is not fetching that instruction while I change it.
> s390 doesn't have a flush_icache_range() (which the other arches use after the 
> swap). I thought that the synchronization that smp_call_function() does was the
> primary reason for using it here, not repeatedly changing the same area of 
> memory.  If you'd prefer I use on_each_cpu() instead of smp_call_function(), 
> no problem.  

If I'm not completely off-track you _do_ swap the instruction on all
other CPUs with the smp_call_function(). But since we don't have a
flush_icache_range() interface on s390 we must understand how the
instruction cache works and then we will know whether we need the smp
call at all.


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-22 16:36           ` Mike Grundy
  2006-06-23  8:50             ` Jan Glauber
@ 2006-06-23 14:38             ` Heiko Carstens
  1 sibling, 0 replies; 31+ messages in thread
From: Heiko Carstens @ 2006-06-23 14:38 UTC (permalink / raw)
  To: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

On Thu, Jun 22, 2006 at 09:36:43AM -0700, Mike Grundy wrote:
> On Thu, Jun 22, 2006 at 01:28:36PM +0200, Jan Glauber wrote:
> > On Wed, 2006-06-21 at 10:34 -0700, Mike Grundy wrote:
> > > On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> > > > You misunderstood me here. I'm not talking about storing the same piece
> > > > of data to memory on each processor. I'm talking about isolating all
> > > > other cpus so that the initiating cpu can store the breakpoint to memory
> > > > without running into the danger that another cpu is trying to execute it
> > > > at the same time. But probably the store should be atomic in regard to
> > > > instruction fetching on the other cpus. It is only two bytes and it
> > > > should be aligned.
> > 
> > Preemption disabling is not necessary around smp_call_function(), since
> > smp_call_function() takes a spin lock. But smp_call_function() is wrong
> > here, it calls the code on all other CPUs but not on our own. Please use
> > on_each_cpu() instead.
> 
> But on_each_cpu() does:
> 
>         preempt_disable();
>         ret = smp_call_function(func, info, retry, wait);
>         local_irq_disable();
>         func(info);
>         local_irq_enable();
>         preempt_enable();
>  
> I'm confused. I really don't need to swap the instruction on each cpu. I really
> need to make sure each cpu is not fetching that instruction while I change it.
> s390 doesn't have a flush_icache_range() (which the other arches use after the 
> swap). I thought that the synchronization that smp_call_function() does was the
> primary reason for using it here, not repeatedly changing the same area of 
> memory.  If you'd prefer I use on_each_cpu() instead of smp_call_function(), 
> no problem.  

This won't solve anything. What Martin probably meant is something like a poor
man's stop_machine_run() implemented by using smp_call_function(). This way
you synchronize all cpus and when all cpus are in a known state, you change
the instruction in question and make sure that serialization happens before
cpus leave the handler again... Except for the cpu that called
smp_call_function() you get the serialization for free, since the last
instruction of the handler is always an lpsw/lpswe instruction.

Otherwise there is still the possibility that a different cpu is fetching the
instruction concurrently while you change it. This doesn't sound very good,
especially if you take this paragraph of the Principles of Operation into
account (p.5-89 of SA22-7832-04):

"It is possible, if another CPU or a channel program concurrently modifies
the instruction, for one CPU to recognize the changes to some but not all bit
positions of an instruction."

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-21 17:15       ` Mike Grundy
@ 2006-06-27 11:56         ` Martin Schwidefsky
  0 siblings, 0 replies; 31+ messages in thread
From: Martin Schwidefsky @ 2006-06-27 11:56 UTC (permalink / raw)
  To: Mike Grundy; +Cc: jan.glauber, linux-kernel, systemtap

On Wed, 2006-06-21 at 10:15 -0700, Mike Grundy wrote:
> On Wed, Jun 21, 2006 at 06:38:40PM +0200, Martin Schwidefsky wrote:
> > On Tue, 2006-06-20 at 21:28 -0700, Mike Grundy wrote:
> > > Hi Martin - This patch implements the suggestions from your review. There were
> > > a couple points I wanted to go over:
> > > > There are some more instructions missing that need fixup:
> > > > "brxh" 0x84??????, "brxle" 0x85??????, "brc" 0xa7?4????,
> > > > "brct" 0xa7?6????, "brctg" 0xa7?7????, "bctgr" 0xb946????,
> > > > "brxhg" 0xec????????44 and "brxlg" 0xec??????45.
> > > Since all of these are relative branches, and they don't save the psw, the
> > > standard clean up of adjusting the original psw by the offset from the out of
> > > line address after single step. Unless I'm just being dense :-) 
> > 
> > All of these are conditional branches, if the branch is not taken you
> > have to do a cleanup.
> The reason I have a special cleanup for the other branches is the easy way to 
> tell if the branch wasn't taken is the pswa = orig pswa + instruction length.
> The relative branches get cleaned up the same way if the branch was taken or
> not, pswa = probe_addr + (out of line end psw - out of line start psw). These
> are all relative branches and while they need cleanup, they don't get treated
> differently based on the branch status.

So you are always doing a sort of branch cleanup, even for non-branch
instructions. Seems reasonable, since non-branch instructions don't
branch and the standard cleanup logic can deal with. 

> > You misunderstood me here. I'm not talking about storing the same piece
> > of data to memory on each processor. I'm talking about isolating all
> > other cpus so that the initiating cpu can store the breakpoint to memory
> Yep, I misunderstood that. The serialization is the point, not the replacement
> of a word in memory.

Exactly.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-11 13:54               ` Mike Grundy
@ 2006-07-11 14:13                 ` Martin Schwidefsky
  0 siblings, 0 replies; 31+ messages in thread
From: Martin Schwidefsky @ 2006-07-11 14:13 UTC (permalink / raw)
  To: Mike Grundy; +Cc: Heiko Carstens, Jan Glauber, linux-kernel, systemtap

On Tue, 2006-07-11 at 09:54 -0400, Mike Grundy wrote:
> I did a little measuring. On average stop_machine_run() adds 8.7 msec of
> overhead on a 4-way config. Of that %57 was sub-msec overhead. For the times
> where overhead was measurable, the average was 20.2 msec, lowest at 10msec
> highest at 100msec. That's on a z800 under vm and I have no idea how many real
> cpus the machine has :-)

So adding 100 probes will roughly take 1 second. Not too bad, I expected it
to take longer.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-07 17:25             ` Heiko Carstens
  2006-07-08 18:54               ` Mike Grundy
@ 2006-07-11 13:54               ` Mike Grundy
  2006-07-11 14:13                 ` Martin Schwidefsky
  1 sibling, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-07-11 13:54 UTC (permalink / raw)
  To: Heiko Carstens; +Cc: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap

On Fri, Jul 07, 2006 at 07:25:55PM +0200, Heiko Carstens wrote:
> > ok, I tried, but my "better ideas" made things worse. stop_machine_run() wins:
> How fast is this if you have to exchange several hundred instructions?

I did a little measuring. On average stop_machine_run() adds 8.7 msec of
overhead on a 4-way config. Of that %57 was sub-msec overhead. For the times
where overhead was measurable, the average was 20.2 msec, lowest at 10msec
highest at 100msec. That's on a z800 under vm and I have no idea how many real
cpus the machine has :-)

-- 
Thanks
Mike

=========================================
Michael Grundy - grundym@us.ibm.com
Advanced Linux Response Team (ALRT)
http://ltc.linux.ibm.com/teamweb/alrt/
845-435-8842 (T/L 295)

If at first you don't succeed, call in an air strike.


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-10  9:28                   ` Heiko Carstens
@ 2006-07-10 22:20                     ` Mike Grundy
  0 siblings, 0 replies; 31+ messages in thread
From: Mike Grundy @ 2006-07-10 22:20 UTC (permalink / raw)
  To: Heiko Carstens
  Cc: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap, dwilder

On Mon, Jul 10, 2006 at 11:28:52AM +0200, Heiko Carstens wrote:
> Whitespace :)
d'oh.
> You need a label behind the cs instruction and put that into the __ex_table,
> since the PSW will point to the instruction after cs if it fails.
Yeah, thought that was a nullify not a terminate. d'oh. d'oh.

> Also, on failure this function seems to return -EFAULT >> shift, which
> seems to be wrong.
Yeah. I think just returning the value without the shift would be ok. kprobes
never checks to see if the instruction swap was successful (which seems even
more wrong)

> > +EXPORT_SYMBOL(register_die_notifier);
> > +EXPORT_SYMBOL(unregister_die_notifier);
> _GPL?
Makes sense, but I kept it consistent with the rest of kprobes.

-- 
Thanks
Mike

=========================================
Michael Grundy - grundym@us.ibm.com

If at first you don't succeed, call in an air strike.


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-08 19:58                 ` Mike Grundy
@ 2006-07-10  9:28                   ` Heiko Carstens
  2006-07-10 22:20                     ` Mike Grundy
  0 siblings, 1 reply; 31+ messages in thread
From: Heiko Carstens @ 2006-07-10  9:28 UTC (permalink / raw)
  To: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap,
	dwilder, Mike Grundy

> +static int __kprobes swap_instruction(void *aref)
> +{
> +	unsigned long addr, prev, tmp;
> +	int shift;
> +	struct ins_replace_args *args = aref;
> +
> +	addr = (unsigned long) args->ptr;
> +	shift = (2 ^ (addr & 2)) << 3;
> +	addr ^= addr & 2;
> +	asm volatile(
> +		"    l   %0,0(%4)\n"
> +		"    nr  %0,%5\n"
> +                "    lr  %1,%0\n"

Whitespace :)

> +		"    or  %0,%2\n"
> +		"    or  %1,%3\n"
> +		"0:  cs  %0,%1,0(%4)\n"
> +		"    jnl 1f\n"
> +		"    xr  %1,%0\n"
> +		"    nr  %1,%5\n"
> +		"    jnz 0b\n"
> +		"1:"
> +#ifndef __s390x__
> +		".section .fixup,\"ax\"\n"
> +		"2: lhi    %0,%6\n"
> +		"   bras   1,3f\n"
> +		"   .long  1b\n"
> +		"3: l      1,0(1)\n"
> +		"   br     1\n"
> +		".previous\n"
> +		".section __ex_table,\"a\"\n"
> +		"   .align 4\n"
> +		"   .long  0b,2b\n"
> +		".previous"
> +#else /* __s390x__ */
> +		".section .fixup,\"ax\"\n"
> +		"2: lghi   %0,%6\n"
> +		"   jg     1b\n"
> +		".previous\n"
> +		".section __ex_table,\"a\"\n"
> +		"   .align 8\n"
> +		"   .quad  0b,2b\n"
> +		".previous"
> +#endif /* __s390x__ */
> +		: "=&d" (prev), "=&d" (tmp)
> +		: "d" (args->old << shift), "d" (args->new << shift),
> +		  "a" (args->ptr), "d" (~(65535 << shift)), "K" (-EFAULT)
> +		: "memory", "cc" );
> +	return prev >> shift;

You need a label behind the cs instruction and put that into the __ex_table,
since the PSW will point to the instruction after cs if it fails.

Also, on failure this function seems to return -EFAULT >> shift, which
seems to be wrong.

> +EXPORT_SYMBOL(register_die_notifier);
> +EXPORT_SYMBOL(unregister_die_notifier);

_GPL?

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-08 18:54               ` Mike Grundy
@ 2006-07-08 19:58                 ` Mike Grundy
  2006-07-10  9:28                   ` Heiko Carstens
  0 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-07-08 19:58 UTC (permalink / raw)
  To: Martin Schwidefsky
  Cc: Heiko Carstens, Jan Glauber, linux-kernel, systemtap, dwilder

On Sat, Jul 08, 2006 at 02:54:28PM -0400, Michael Grundy wrote:
> The latest patch is attached.

Yeah, I wasn't paying attention, the 2.6.18-rc1 version is attached, no 
difference really (other than testing it with the -rc1 code)

Thanks
Mike

--

Signed-off-by: Michael Grundy <grundym@us.ibm.com>

 arch/s390/Kconfig              |   14
 arch/s390/kernel/Makefile      |    1
 arch/s390/kernel/entry.S       |   12
 arch/s390/kernel/entry64.S     |   12
 arch/s390/kernel/kprobes.c     |  692 +++++++++++++++++++++++++++++++++++++++++
 arch/s390/kernel/traps.c       |   42 ++
 arch/s390/kernel/vmlinux.lds.S |    1
 arch/s390/mm/fault.c           |   40 ++
 include/asm-s390/kdebug.h      |   57 +++
 include/asm-s390/kprobes.h     |  111 ++++++
 10 files changed, 976 insertions(+), 6 deletions(-)

diff -Nurp linux-2.6.18-rc1/arch/s390/Kconfig linux-2.6.18-rc1-kp390/arch/s390/Kconfig
--- linux-2.6.18-rc1/arch/s390/Kconfig	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/Kconfig	2006-07-08 15:07:32.000000000 -0400
@@ -490,8 +490,22 @@ source "drivers/net/Kconfig"
 
 source "fs/Kconfig"
 
+menu "Instrumentation Support"
+	depends on EXPERIMENTAL
+
 source "arch/s390/oprofile/Kconfig"
 
+config KPROBES
+	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+endmenu
+
 source "arch/s390/Kconfig.debug"
 
 source "security/Kconfig"
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/entry64.S linux-2.6.18-rc1-kp390/arch/s390/kernel/entry64.S
--- linux-2.6.18-rc1/arch/s390/kernel/entry64.S	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/entry64.S	2006-07-08 15:07:32.000000000 -0400
@@ -518,6 +518,8 @@ pgm_no_vtime2:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -553,6 +555,16 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	j	sysc_do_svc
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_leave		# load adr. of system ret, no work
+	jg	do_single_step		# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/entry.S linux-2.6.18-rc1-kp390/arch/s390/kernel/entry.S
--- linux-2.6.18-rc1/arch/s390/kernel/entry.S	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/entry.S	2006-07-08 15:07:32.000000000 -0400
@@ -505,6 +505,8 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	BASED(kernel_per)
 	l	%r3,__LC_PGM_ILC	 # load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3                  # clear per-event-bit and ilc
@@ -536,6 +538,16 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	b	BASED(sysc_do_svc)
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_leave)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/kprobes.c linux-2.6.18-rc1-kp390/arch/s390/kernel/kprobes.c
--- linux-2.6.18-rc1/arch/s390/kernel/kprobes.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/kprobes.c	2006-07-08 15:07:32.000000000 -0400
@@ -0,0 +1,692 @@
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/stop_machine.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+		return -EINVAL;
+
+	if ((unsigned long)p->addr & 0x01) {
+		printk("Attempt to register kprobe at an unaligned address\n");
+		return -EINVAL;
+		}
+
+	/* Use the get_insn_slot() facility for correctness */
+	if (!(p->ainsn.insn = get_insn_slot()))
+		return -ENOMEM;
+
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+
+	get_instruction_type(&p->ainsn);
+	p->opcode = *p->addr;
+	return 0;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+	switch (*(__u8 *) instruction) {
+	case 0x0c:      /* bassm */
+	case 0x0b:      /* bsm   */
+	case 0x83:      /* diag  */
+	case 0x44:      /* ex    */
+		return -EINVAL;
+	}
+	switch (*(__u16 *) instruction) {
+	case 0x0101:    /* pr    */
+	case 0xb25a:    /* bsa   */
+	case 0xb240:    /* bakr  */
+	case 0xb258:    /* bsg   */
+	case 0xb218:    /* pc    */
+	case 0xb228:    /* pt    */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+{
+	/* default fixup method */
+	ainsn->fixup = FIXUP_PSW_NORMAL;
+
+	/* save r1 operand */
+	ainsn->reg = *(__u8 *) (ainsn->insn + 1) & 0xf0;
+
+	/* save the instruction length (pop 5-5) in bytes */
+	switch (*(__u8 *) (ainsn->insn) >> 4) {
+	case 0:
+		ainsn->ilen = 2;
+		break;
+	case 1:
+	case 2:
+		ainsn->ilen = 4;
+		break;
+	case 3:
+		ainsn->ilen = 6;
+		break;
+	}
+
+	switch (*(__u8 *) ainsn->insn) {
+	case 0x05:	/* balr	*/
+	case 0x0d:	/* basr */
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		/* if r2 = 0, no branch will be taken */
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0)
+			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x06:	/* bctr	*/
+	case 0x07:	/* bcr	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x45:	/* bal	*/
+	case 0x4d:	/* bas	*/
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		break;
+	case 0x47:	/* bc	*/
+	case 0x46:	/* bct	*/
+	case 0x86:	/* bxh	*/
+	case 0x87:	/* bxle	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x82:	/* lpsw	*/
+		ainsn->fixup = FIXUP_NOT_REQUIRED;
+		break;
+	case 0xb2:	/* lpswe */
+		if (*(__u8 *) (ainsn->insn + 1) == 0xb2) {
+			ainsn->fixup = FIXUP_NOT_REQUIRED;
+		}
+		break;
+	case 0xa7:	/* bras	*/
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05) {
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xc0:
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x00 ||    /*larl */
+			(*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05){ /*brasl*/
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xeb:
+		if (*(__u8 *) (ainsn->insn + 5) == 0x44 ||	/* bxhg  */
+			*(__u8 *) (ainsn->insn + 5) == 0x45) {	/* bxleg */
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	case 0xe3:	/* bctg	*/
+		if (*(__u8 *) (ainsn->insn + 5) == 0x46) {
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	}
+}
+static int __kprobes swap_instruction(void *aref)
+{
+	unsigned long addr, prev, tmp;
+	int shift;
+	struct ins_replace_args *args = aref;
+
+	addr = (unsigned long) args->ptr;
+	shift = (2 ^ (addr & 2)) << 3;
+	addr ^= addr & 2;
+	asm volatile(
+		"    l   %0,0(%4)\n"
+		"    nr  %0,%5\n"
+                "    lr  %1,%0\n"
+		"    or  %0,%2\n"
+		"    or  %1,%3\n"
+		"0:  cs  %0,%1,0(%4)\n"
+		"    jnl 1f\n"
+		"    xr  %1,%0\n"
+		"    nr  %1,%5\n"
+		"    jnz 0b\n"
+		"1:"
+#ifndef __s390x__
+		".section .fixup,\"ax\"\n"
+		"2: lhi    %0,%6\n"
+		"   bras   1,3f\n"
+		"   .long  1b\n"
+		"3: l      1,0(1)\n"
+		"   br     1\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 4\n"
+		"   .long  0b,2b\n"
+		".previous"
+#else /* __s390x__ */
+		".section .fixup,\"ax\"\n"
+		"2: lghi   %0,%6\n"
+		"   jg     1b\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 8\n"
+		"   .quad  0b,2b\n"
+		".previous"
+#endif /* __s390x__ */
+		: "=&d" (prev), "=&d" (tmp)
+		: "d" (args->old << shift), "d" (args->new << shift),
+		  "a" (args->ptr), "d" (~(65535 << shift)), "K" (-EFAULT)
+		: "memory", "cc" );
+	return prev >> shift;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long status = kcb->kprobe_status;
+	struct ins_replace_args args;
+
+	args.ptr = p->addr;
+	args.old = p->opcode;
+	args.new = BREAKPOINT_INSTRUCTION;
+
+	kcb->kprobe_status = KPROBE_SWAP_INST;
+	stop_machine_run(swap_instruction, &args, NR_CPUS);
+	kcb->kprobe_status = status;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long status = kcb->kprobe_status;
+	struct ins_replace_args args;
+
+	args.ptr = p->addr;
+	args.old = BREAKPOINT_INSTRUCTION;
+	args.new = p->opcode;
+
+	kcb->kprobe_status = KPROBE_SWAP_INST;
+	stop_machine_run(swap_instruction, &args, NR_CPUS);
+	kcb->kprobe_status = status;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	mutex_lock(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	per_cr_bits kprobe_per_regs[1];
+
+	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+
+	/* Set up the per control reg info, will pass to lctl */
+	kprobe_per_regs[0].em_instruction_fetch = 1;
+	kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
+	kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
+
+	/* Set the PER control regs, turns on single step for this address */
+	__ctl_load(kprobe_per_regs, 9, 11);
+	regs->psw.mask |= PSW_MASK_PER;
+	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+					sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+					sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+						struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+	/* Save the interrupt and per flags */
+	kcb->kprobe_saved_imask = regs->psw.mask &
+	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+	/* Save the control regs that govern PER */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+					struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri;
+
+	if ((ri = get_free_rp_inst(rp)) != NULL) {
+		ri->rp = rp;
+		ri->task = current;
+		ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+		/* Replace the return addr with trampoline addr */
+		regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+		add_rp_inst(ri);
+	} else {
+		rp->nmissed++;
+	}
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	unsigned long *addr = (unsigned long *)
+		((regs->psw.addr & PSW_ADDR_INSN) - 2);
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				regs->psw.mask &= ~PSW_MASK_PER;
+				regs->psw.mask |= kcb->kprobe_saved_imask;
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 *
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	set_current_kprobe(p, regs, kcb);
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (".global kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n" "bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	spin_lock_irqsave(&kretprobe_lock, flags);
+	head = kretprobe_inst_table_head(current);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+	BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+	reset_current_kprobe();
+	spin_unlock_irqrestore(&kretprobe_lock, flags);
+	preempt_enable_no_resched();
+
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	regs->psw.addr &= PSW_ADDR_INSN;
+
+	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
+		regs->psw.addr = (unsigned long)p->addr +
+				((unsigned long)regs->psw.addr -
+				 (unsigned long)p->ainsn.insn);
+
+	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
+		if ((unsigned long)regs->psw.addr -
+		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
+			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+
+	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
+		regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
+						(regs->gprs[p->ainsn.reg] -
+						(unsigned long)p->ainsn.insn))
+						| PSW_ADDR_AMODE;
+
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	/* turn off PER mode */
+	regs->psw.mask &= ~PSW_MASK_PER;
+	/* Restore the original per control regs */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs);
+
+	/*Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+out:
+	preempt_enable_no_resched();
+
+	/*
+	 * if somebody else is singlestepping across a probe point, psw mask
+	 * will have PER set, in which case, continue the remaining processing
+	 * of do_single_step, as if this is not a probe hit.
+	 */
+	if (regs->psw.mask & PSW_MASK_PER) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	const struct exception_table_entry *entry;
+
+	switch(kcb->kprobe_status) {
+	case KPROBE_SWAP_INST:
+		/* We are here because the instruction replacement failed */
+		return 0;
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+		/*
+		 * We are here because the instruction being single
+		 * stepped caused a page fault. We reset the current
+		 * kprobe and the nip points back to the probe address
+		 * and allow the page fault handler to continue as a
+		 * normal page fault.
+		 */
+		regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
+		regs->psw.mask &= ~PSW_MASK_PER;
+		regs->psw.mask |= kcb->kprobe_saved_imask;
+		if (kcb->kprobe_status == KPROBE_REENTER)
+			restore_previous_kprobe(kcb);
+		else
+			reset_current_kprobe();
+		preempt_enable_no_resched();
+		break;
+	case KPROBE_HIT_ACTIVE:
+	case KPROBE_HIT_SSDONE:
+		/*
+		 * We increment the nmissed count for accounting,
+		 * we can also use npre/npostfault count for accouting
+		 * these specific fault cases.
+		 */
+		kprobes_inc_nmissed_count(cur);
+
+		/*
+		 * We come here because instructions in the pre/post
+		 * handler caused the page_fault, this could happen
+		 * if handler tries to access user space by
+		 * copy_from_user(), get_user() etc. Let the
+		 * user-specified handler try to fix it first.
+		 */
+		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+			return 1;
+
+		/*
+		 * In case the user-specified fault handler returned
+		 * zero, try to fix up.
+		 */
+		entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+		if (entry) {
+			regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
+			return 1;
+		}
+
+		/*
+		 * fixup_exception() could not handle it,
+		 * Let do_page_fault() fix it.
+		 */
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BPT:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_TRAP:
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id() */
+		preempt_disable();
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+	/* setup return addr to the jprobe handler routine */
+	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+	/* r14 is the function return address */
+	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+	/* r15 is the stack pointer */
+	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+	addr = (unsigned long)kcb->jprobe_saved_r15;
+
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile (".word 0x0002");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+	asm volatile ("bcr 0,0");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+	/* Put the regs back */
+	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* put the stack back */
+	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+	       MIN_STACK_SIZE(stack_addr));
+	preempt_enable_no_resched();
+	return 1;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	return register_kprobe(&trampoline_p);
+}
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/Makefile linux-2.6.18-rc1-kp390/arch/s390/kernel/Makefile
--- linux-2.6.18-rc1/arch/s390/kernel/Makefile	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/Makefile	2006-07-08 15:07:32.000000000 -0400
@@ -22,6 +22,7 @@ obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf
 
 obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/traps.c linux-2.6.18-rc1-kp390/arch/s390/kernel/traps.c
--- linux-2.6.18-rc1/arch/s390/kernel/traps.c	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/traps.c	2006-07-08 15:07:32.000000000 -0400
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -39,6 +40,7 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/kdebug.h>
 
 /* Called from entry.S only */
 extern void handle_per_exception(struct pt_regs *regs);
@@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12;
 static int kstack_depth_to_print = 20;
 #endif /* CONFIG_64BIT */
 
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 /*
  * For show_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
@@ -305,8 +321,9 @@ report_user_fault(long interruption_code
 #endif
 }
 
-static void inline do_trap(long interruption_code, int signr, char *str,
-                           struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+					char *str, struct pt_regs *regs,
+					siginfo_t *info)
 {
 	/*
 	 * We got all needed information from the lowcore and can
@@ -315,6 +332,10 @@ static void inline do_trap(long interrup
         if (regs->psw.mask & PSW_MASK_PSTATE)
 		local_irq_enable();
 
+	if (notify_die(DIE_TRAP, str, regs, interruption_code,
+				interruption_code, signr) == NOTIFY_STOP)
+		return;
+
         if (regs->psw.mask & PSW_MASK_PSTATE) {
                 struct task_struct *tsk = current;
 
@@ -336,8 +357,12 @@ static inline void *get_check_address(st
 	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
 }
 
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
 {
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+					SIGTRAP) == NOTIFY_STOP){
+		return;
+	}
 	if ((current->ptrace & PT_PTRACED) != 0)
 		force_sig(SIGTRAP, current);
 }
@@ -463,8 +488,15 @@ asmlinkage void illegal_op(struct pt_reg
 #endif
 		} else
 			signal = SIGILL;
-	} else
-		signal = SIGILL;
+	} else {
+		/*
+		 * If we get an illegal op in kernel mode, send it through the
+		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+		 */
+		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+					3, SIGTRAP) != NOTIFY_STOP)
+			signal = SIGILL;
+	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
diff -Nurp linux-2.6.18-rc1/arch/s390/kernel/vmlinux.lds.S linux-2.6.18-rc1-kp390/arch/s390/kernel/vmlinux.lds.S
--- linux-2.6.18-rc1/arch/s390/kernel/vmlinux.lds.S	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/kernel/vmlinux.lds.S	2006-07-08 15:07:32.000000000 -0400
@@ -24,6 +24,7 @@ SECTIONS
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
+	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
 	} = 0x0700
diff -Nurp linux-2.6.18-rc1/arch/s390/mm/fault.c linux-2.6.18-rc1-kp390/arch/s390/mm/fault.c
--- linux-2.6.18-rc1/arch/s390/mm/fault.c	2006-07-06 00:09:49.000000000 -0400
+++ linux-2.6.18-rc1-kp390/arch/s390/mm/fault.c	2006-07-08 15:07:32.000000000 -0400
@@ -25,10 +25,12 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/kdebug.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug;
 
 extern void die(const char *,struct pt_regs *,long);
 
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	return NOTIFY_DONE;
+}
+#endif
+
 extern spinlock_t timerlist_lock;
 
 /*
@@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *r
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline void
+static inline void __kprobes
 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
 {
         struct task_struct *tsk;
@@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsig
         tsk = current;
         mm = tsk->mm;
 	
+	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+					SIGSEGV) == NOTIFY_STOP)
+		return;
+
 	/* 
          * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code 
diff -Nurp linux-2.6.18-rc1/include/asm-s390/kdebug.h linux-2.6.18-rc1-kp390/include/asm-s390/kdebug.h
--- linux-2.6.18-rc1/include/asm-s390/kdebug.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18-rc1-kp390/include/asm-s390/kdebug.h	2006-07-08 15:07:32.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+ * If you really want to do it first unregister - then synchronize_sched
+ *  - then free.
+ */
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head s390die_chain;
+
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BPT,
+	DIE_SSTEP,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&s390die_chain, val, &args);
+}
+
+#endif
diff -Nurp linux-2.6.18-rc1/include/asm-s390/kprobes.h linux-2.6.18-rc1-kp390/include/asm-s390/kprobes.h
--- linux-2.6.18-rc1/include/asm-s390/kprobes.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18-rc1-kp390/include/asm-s390/kprobes.h	2006-07-08 15:07:32.000000000 -0400
@@ -0,0 +1,111 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ * 2005-Dec	Used as a template for s390 by Mike Grundy
+ * 		<grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE		0x0003
+#define MAX_STACK_SIZE 		64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define ARCH_SUPPORTS_KRETPROBES
+#define  ARCH_INACTIVE_KPROBE_COUNT 0
+
+#define KPROBE_SWAP_INST	0x10
+
+#define FIXUP_PSW_NORMAL	0x08
+#define FIXUP_BRANCH_NOT_TAKEN	0x04
+#define FIXUP_RETURN_REGISTER	0x02
+#define FIXUP_NOT_REQUIRED	0x01
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	int fixup;
+	int ilen;
+	int reg;
+};
+
+struct ins_replace_args {
+	kprobe_opcode_t *ptr;
+	kprobe_opcode_t old;
+	kprobe_opcode_t new;
+};
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_psw;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobe_saved_r14;
+	unsigned long jprobe_saved_r15;
+	struct prev_kprobe prev_kprobe;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+int  is_prohibited_opcode(kprobe_opcode_t *instruction);
+void get_instruction_type(struct arch_specific_insn *ainsn);
+#endif	/* _ASM_S390_KPROBES_H */
+
+#ifdef CONFIG_KPROBES
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+					unsigned long val, void *data);
+#else	/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+						unsigned long val, void *data)
+{
+	return 0;
+}
+#endif

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-07 17:25             ` Heiko Carstens
@ 2006-07-08 18:54               ` Mike Grundy
  2006-07-08 19:58                 ` Mike Grundy
  2006-07-11 13:54               ` Mike Grundy
  1 sibling, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-07-08 18:54 UTC (permalink / raw)
  To: Heiko Carstens
  Cc: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap, dwilder

On Fri, Jul 07, 2006 at 07:25:55PM +0200, Heiko Carstens wrote:
> How fast is this if you have to exchange several hundred instructions?
I'll have to do some timing tests to measure how much overhead it puts on the
system. It's a trade off of safety vs speed. Each increment of safety we've 
put on the swap has had inherent performance penalties. The plus side is the
swaps are only done on activation and deactivation, and don't have this kind
of overhead during operations. 

The latest patch is attached.

Thanks
Mike


Signed-off-by: Michael Grundy <grundym@us.ibm.com>

 arch/s390/Kconfig              |   14
 arch/s390/kernel/Makefile      |    1
 arch/s390/kernel/entry.S       |   12
 arch/s390/kernel/entry64.S     |   12
 arch/s390/kernel/kprobes.c     |  692 +++++++++++++++++++++++++++++++++++++++++
 arch/s390/kernel/traps.c       |   42 ++
 arch/s390/kernel/vmlinux.lds.S |    1
 arch/s390/mm/fault.c           |   40 ++
 include/asm-s390/kdebug.h      |   57 +++
 include/asm-s390/kprobes.h     |  111 ++++++
 10 files changed, 976 insertions(+), 6 deletions(-)

diff -Nurp linux-2.6.17-git25/arch/s390/Kconfig linux-2.6.17-git25-kp390/arch/s390/Kconfig
--- linux-2.6.17-git25/arch/s390/Kconfig	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/Kconfig	2006-07-08 13:06:46.000000000 -0400
@@ -490,8 +490,22 @@ source "drivers/net/Kconfig"
 
 source "fs/Kconfig"
 
+menu "Instrumentation Support"
+	depends on EXPERIMENTAL
+
 source "arch/s390/oprofile/Kconfig"
 
+config KPROBES
+	bool "Kprobes (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && MODULES
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+endmenu
+
 source "arch/s390/Kconfig.debug"
 
 source "security/Kconfig"
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/entry64.S linux-2.6.17-git25-kp390/arch/s390/kernel/entry64.S
--- linux-2.6.17-git25/arch/s390/kernel/entry64.S	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/entry64.S	2006-07-08 13:06:46.000000000 -0400
@@ -518,6 +518,8 @@ pgm_no_vtime2:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	jz	kernel_per
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -553,6 +555,16 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	j	sysc_do_svc
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_leave		# load adr. of system ret, no work
+	jg	do_single_step		# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/entry.S linux-2.6.17-git25-kp390/arch/s390/kernel/entry.S
--- linux-2.6.17-git25/arch/s390/kernel/entry.S	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/entry.S	2006-07-08 13:06:46.000000000 -0400
@@ -505,6 +505,8 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	tm	__LC_PGM_OLD_PSW+1(%r15),0x01	# kernel per event ?
+	bz	BASED(kernel_per)
 	l	%r3,__LC_PGM_ILC	 # load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3                  # clear per-event-bit and ilc
@@ -536,6 +538,16 @@ pgm_no_vtime3:
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	b	BASED(sysc_do_svc)
 
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_leave)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
 /*
  * IO interrupt handler routine
  */
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/kprobes.c linux-2.6.17-git25-kp390/arch/s390/kernel/kprobes.c
--- linux-2.6.17-git25/arch/s390/kernel/kprobes.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/kprobes.c	2006-07-08 13:06:46.000000000 -0400
@@ -0,0 +1,692 @@
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/stop_machine.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+		return -EINVAL;
+
+	if ((unsigned long)p->addr & 0x01) {
+		printk("Attempt to register kprobe at an unaligned address\n");
+		return -EINVAL;
+		}
+
+	/* Use the get_insn_slot() facility for correctness */
+	if (!(p->ainsn.insn = get_insn_slot()))
+		return -ENOMEM;
+
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+
+	get_instruction_type(&p->ainsn);
+	p->opcode = *p->addr;
+	return 0;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+	switch (*(__u8 *) instruction) {
+	case 0x0c:      /* bassm */
+	case 0x0b:      /* bsm   */
+	case 0x83:      /* diag  */
+	case 0x44:      /* ex    */
+		return -EINVAL;
+	}
+	switch (*(__u16 *) instruction) {
+	case 0x0101:    /* pr    */
+	case 0xb25a:    /* bsa   */
+	case 0xb240:    /* bakr  */
+	case 0xb258:    /* bsg   */
+	case 0xb218:    /* pc    */
+	case 0xb228:    /* pt    */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+{
+	/* default fixup method */
+	ainsn->fixup = FIXUP_PSW_NORMAL;
+
+	/* save r1 operand */
+	ainsn->reg = *(__u8 *) (ainsn->insn + 1) & 0xf0;
+
+	/* save the instruction length (pop 5-5) in bytes */
+	switch (*(__u8 *) (ainsn->insn) >> 4) {
+	case 0:
+		ainsn->ilen = 2;
+		break;
+	case 1:
+	case 2:
+		ainsn->ilen = 4;
+		break;
+	case 3:
+		ainsn->ilen = 6;
+		break;
+	}
+
+	switch (*(__u8 *) ainsn->insn) {
+	case 0x05:	/* balr	*/
+	case 0x0d:	/* basr */
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		/* if r2 = 0, no branch will be taken */
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0)
+			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x06:	/* bctr	*/
+	case 0x07:	/* bcr	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x45:	/* bal	*/
+	case 0x4d:	/* bas	*/
+		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		break;
+	case 0x47:	/* bc	*/
+	case 0x46:	/* bct	*/
+	case 0x86:	/* bxh	*/
+	case 0x87:	/* bxle	*/
+		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		break;
+	case 0x82:	/* lpsw	*/
+		ainsn->fixup = FIXUP_NOT_REQUIRED;
+		break;
+	case 0xb2:	/* lpswe */
+		if (*(__u8 *) (ainsn->insn + 1) == 0xb2) {
+			ainsn->fixup = FIXUP_NOT_REQUIRED;
+		}
+		break;
+	case 0xa7:	/* bras	*/
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05) {
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xc0:
+		if ((*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x00 ||    /*larl */
+			(*(__u8 *) (ainsn->insn + 1) & 0x0f) == 0x05){ /*brasl*/
+			ainsn->fixup = FIXUP_RETURN_REGISTER;
+		}
+		break;
+	case 0xeb:
+		if (*(__u8 *) (ainsn->insn + 5) == 0x44 ||	/* bxhg  */
+			*(__u8 *) (ainsn->insn + 5) == 0x45) {	/* bxleg */
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	case 0xe3:	/* bctg	*/
+		if (*(__u8 *) (ainsn->insn + 5) == 0x46) {
+			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		}
+		break;
+	}
+}
+static int __kprobes swap_instruction(void *aref)
+{
+	unsigned long addr, prev, tmp;
+	int shift;
+	struct ins_replace_args *args = aref;
+
+	addr = (unsigned long) args->ptr;
+	shift = (2 ^ (addr & 2)) << 3;
+	addr ^= addr & 2;
+	asm volatile(
+		"    l   %0,0(%4)\n"
+		"    nr  %0,%5\n"
+                "    lr  %1,%0\n"
+		"    or  %0,%2\n"
+		"    or  %1,%3\n"
+		"0:  cs  %0,%1,0(%4)\n"
+		"    jnl 1f\n"
+		"    xr  %1,%0\n"
+		"    nr  %1,%5\n"
+		"    jnz 0b\n"
+		"1:"
+#ifndef __s390x__
+		".section .fixup,\"ax\"\n"
+		"2: lhi    %0,%6\n"
+		"   bras   1,3f\n"
+		"   .long  1b\n"
+		"3: l      1,0(1)\n"
+		"   br     1\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 4\n"
+		"   .long  0b,2b\n"
+		".previous"
+#else /* __s390x__ */
+		".section .fixup,\"ax\"\n"
+		"2: lghi   %0,%6\n"
+		"   jg     1b\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		"   .align 8\n"
+		"   .quad  0b,2b\n"
+		".previous"
+#endif /* __s390x__ */
+		: "=&d" (prev), "=&d" (tmp)
+		: "d" (args->old << shift), "d" (args->new << shift),
+		  "a" (args->ptr), "d" (~(65535 << shift)), "K" (-EFAULT)
+		: "memory", "cc" );
+	return prev >> shift;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long status = kcb->kprobe_status;
+	struct ins_replace_args args;
+
+	args.ptr = p->addr;
+	args.old = p->opcode;
+	args.new = BREAKPOINT_INSTRUCTION;
+
+	kcb->kprobe_status = KPROBE_SWAP_INST;
+	stop_machine_run(swap_instruction, &args, NR_CPUS);
+	kcb->kprobe_status = status;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long status = kcb->kprobe_status;
+	struct ins_replace_args args;
+
+	args.ptr = p->addr;
+	args.old = BREAKPOINT_INSTRUCTION;
+	args.new = p->opcode;
+
+	kcb->kprobe_status = KPROBE_SWAP_INST;
+	stop_machine_run(swap_instruction, &args, NR_CPUS);
+	kcb->kprobe_status = status;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	mutex_lock(&kprobe_mutex);
+	free_insn_slot(p->ainsn.insn);
+	mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	per_cr_bits kprobe_per_regs[1];
+
+	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+
+	/* Set up the per control reg info, will pass to lctl */
+	kprobe_per_regs[0].em_instruction_fetch = 1;
+	kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
+	kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
+
+	/* Set the PER control regs, turns on single step for this address */
+	__ctl_load(kprobe_per_regs, 9, 11);
+	regs->psw.mask |= PSW_MASK_PER;
+	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+					sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+					sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+						struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = p;
+	/* Save the interrupt and per flags */
+	kcb->kprobe_saved_imask = regs->psw.mask &
+	    (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+	/* Save the control regs that govern PER */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+					struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri;
+
+	if ((ri = get_free_rp_inst(rp)) != NULL) {
+		ri->rp = rp;
+		ri->task = current;
+		ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+		/* Replace the return addr with trampoline addr */
+		regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+		add_rp_inst(ri);
+	} else {
+		rp->nmissed++;
+	}
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	int ret = 0;
+	unsigned long *addr = (unsigned long *)
+		((regs->psw.addr & PSW_ADDR_INSN) - 2);
+	struct kprobe_ctlblk *kcb;
+
+	/*
+	 * We don't want to be preempted for the entire
+	 * duration of kprobe processing
+	 */
+	preempt_disable();
+	kcb = get_kprobe_ctlblk();
+
+	/* Check we're not actually recursing */
+	if (kprobe_running()) {
+		p = get_kprobe(addr);
+		if (p) {
+			if (kcb->kprobe_status == KPROBE_HIT_SS &&
+			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+				regs->psw.mask &= ~PSW_MASK_PER;
+				regs->psw.mask |= kcb->kprobe_saved_imask;
+				goto no_kprobe;
+			}
+			/* We have reentered the kprobe_handler(), since
+			 * another probe was hit while within the handler.
+			 * We here save the original kprobes variables and
+			 * just single step on the instruction of the new probe
+			 * without calling any user handlers.
+			 */
+			save_previous_kprobe(kcb);
+			set_current_kprobe(p, regs, kcb);
+			kprobes_inc_nmissed_count(p);
+			prepare_singlestep(p, regs);
+			kcb->kprobe_status = KPROBE_REENTER;
+			return 1;
+		} else {
+			p = __get_cpu_var(current_kprobe);
+			if (p->break_handler && p->break_handler(p, regs)) {
+				goto ss_probe;
+			}
+		}
+		goto no_kprobe;
+	}
+
+	p = get_kprobe(addr);
+	if (!p) {
+		if (*addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 *
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+	set_current_kprobe(p, regs, kcb);
+	if (p->pre_handler && p->pre_handler(p, regs))
+		/* handler has already set things up, so skip ss setup */
+		return 1;
+
+ss_probe:
+	prepare_singlestep(p, regs);
+	kcb->kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * 	- init_kprobes() establishes a probepoint here
+ * 	- When the probed function returns, this probe
+ * 		causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+	asm volatile (".global kretprobe_trampoline\n"
+		      "kretprobe_trampoline:\n" "bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	spin_lock_irqsave(&kretprobe_lock, flags);
+	head = kretprobe_inst_table_head(current);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because an multiple functions in the call path
+	 * have a return probe installed on them, and/or more then one return
+	 * return probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler)
+			ri->rp->handler(ri, regs);
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri);
+
+		if (orig_ret_address != trampoline_address) {
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+		}
+	}
+	BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+	reset_current_kprobe();
+	spin_unlock_irqrestore(&kretprobe_lock, flags);
+	preempt_enable_no_resched();
+
+	/*
+	 * By returning a non-zero value, we are telling
+	 * kprobe_handler() that we don't want the post_handler
+	 * to run (and have re-enabled preemption)
+	 */
+	return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	regs->psw.addr &= PSW_ADDR_INSN;
+
+	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
+		regs->psw.addr = (unsigned long)p->addr +
+				((unsigned long)regs->psw.addr -
+				 (unsigned long)p->ainsn.insn);
+
+	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
+		if ((unsigned long)regs->psw.addr -
+		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
+			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+
+	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
+		regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
+						(regs->gprs[p->ainsn.reg] -
+						(unsigned long)p->ainsn.insn))
+						| PSW_ADDR_AMODE;
+
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	/* turn off PER mode */
+	regs->psw.mask &= ~PSW_MASK_PER;
+	/* Restore the original per control regs */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	if (!cur)
+		return 0;
+
+	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+		cur->post_handler(cur, regs, 0);
+	}
+
+	resume_execution(cur, regs);
+
+	/*Restore back the original saved kprobes variables and continue. */
+	if (kcb->kprobe_status == KPROBE_REENTER) {
+		restore_previous_kprobe(kcb);
+		goto out;
+	}
+	reset_current_kprobe();
+out:
+	preempt_enable_no_resched();
+
+	/*
+	 * if somebody else is singlestepping across a probe point, psw mask
+	 * will have PER set, in which case, continue the remaining processing
+	 * of do_single_step, as if this is not a probe hit.
+	 */
+	if (regs->psw.mask & PSW_MASK_PER) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	const struct exception_table_entry *entry;
+
+	switch(kcb->kprobe_status) {
+	case KPROBE_SWAP_INST:
+		/* We are here because the instruction replacement failed */
+		return 0;
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+		/*
+		 * We are here because the instruction being single
+		 * stepped caused a page fault. We reset the current
+		 * kprobe and the nip points back to the probe address
+		 * and allow the page fault handler to continue as a
+		 * normal page fault.
+		 */
+		regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
+		regs->psw.mask &= ~PSW_MASK_PER;
+		regs->psw.mask |= kcb->kprobe_saved_imask;
+		if (kcb->kprobe_status == KPROBE_REENTER)
+			restore_previous_kprobe(kcb);
+		else
+			reset_current_kprobe();
+		preempt_enable_no_resched();
+		break;
+	case KPROBE_HIT_ACTIVE:
+	case KPROBE_HIT_SSDONE:
+		/*
+		 * We increment the nmissed count for accounting,
+		 * we can also use npre/npostfault count for accouting
+		 * these specific fault cases.
+		 */
+		kprobes_inc_nmissed_count(cur);
+
+		/*
+		 * We come here because instructions in the pre/post
+		 * handler caused the page_fault, this could happen
+		 * if handler tries to access user space by
+		 * copy_from_user(), get_user() etc. Let the
+		 * user-specified handler try to fix it first.
+		 */
+		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+			return 1;
+
+		/*
+		 * In case the user-specified fault handler returned
+		 * zero, try to fix up.
+		 */
+		entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+		if (entry) {
+			regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
+			return 1;
+		}
+
+		/*
+		 * fixup_exception() could not handle it,
+		 * Let do_page_fault() fix it.
+		 */
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	int ret = NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BPT:
+		if (kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_SSTEP:
+		if (post_kprobe_handler(args->regs))
+			ret = NOTIFY_STOP;
+		break;
+	case DIE_TRAP:
+	case DIE_PAGE_FAULT:
+		/* kprobe_running() needs smp_processor_id() */
+		preempt_disable();
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			ret = NOTIFY_STOP;
+		preempt_enable();
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	unsigned long addr;
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+	/* setup return addr to the jprobe handler routine */
+	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+	/* r14 is the function return address */
+	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+	/* r15 is the stack pointer */
+	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+	addr = (unsigned long)kcb->jprobe_saved_r15;
+
+	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+	       MIN_STACK_SIZE(addr));
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	asm volatile (".word 0x0002");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+	asm volatile ("bcr 0,0");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+	/* Put the regs back */
+	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* put the stack back */
+	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+	       MIN_STACK_SIZE(stack_addr));
+	preempt_enable_no_resched();
+	return 1;
+}
+
+static struct kprobe trampoline_p = {
+	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+	.pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+	return register_kprobe(&trampoline_p);
+}
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/Makefile linux-2.6.17-git25-kp390/arch/s390/kernel/Makefile
--- linux-2.6.17-git25/arch/s390/kernel/Makefile	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/Makefile	2006-07-08 13:08:41.000000000 -0400
@@ -22,6 +22,7 @@ obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf
 
 obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/traps.c linux-2.6.17-git25-kp390/arch/s390/kernel/traps.c
--- linux-2.6.17-git25/arch/s390/kernel/traps.c	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/traps.c	2006-07-08 13:06:47.000000000 -0400
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/reboot.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -39,6 +40,7 @@
 #include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/kdebug.h>
 
 /* Called from entry.S only */
 extern void handle_per_exception(struct pt_regs *regs);
@@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12;
 static int kstack_depth_to_print = 20;
 #endif /* CONFIG_64BIT */
 
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
 /*
  * For show_trace we have tree different stack to consider:
  *   - the panic stack which is used if the kernel stack has overflown
@@ -305,8 +321,9 @@ report_user_fault(long interruption_code
 #endif
 }
 
-static void inline do_trap(long interruption_code, int signr, char *str,
-                           struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+					char *str, struct pt_regs *regs,
+					siginfo_t *info)
 {
 	/*
 	 * We got all needed information from the lowcore and can
@@ -315,6 +332,10 @@ static void inline do_trap(long interrup
         if (regs->psw.mask & PSW_MASK_PSTATE)
 		local_irq_enable();
 
+	if (notify_die(DIE_TRAP, str, regs, interruption_code,
+				interruption_code, signr) == NOTIFY_STOP)
+		return;
+
         if (regs->psw.mask & PSW_MASK_PSTATE) {
                 struct task_struct *tsk = current;
 
@@ -336,8 +357,12 @@ static inline void *get_check_address(st
 	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
 }
 
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
 {
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+					SIGTRAP) == NOTIFY_STOP){
+		return;
+	}
 	if ((current->ptrace & PT_PTRACED) != 0)
 		force_sig(SIGTRAP, current);
 }
@@ -463,8 +488,15 @@ asmlinkage void illegal_op(struct pt_reg
 #endif
 		} else
 			signal = SIGILL;
-	} else
-		signal = SIGILL;
+	} else {
+		/*
+		 * If we get an illegal op in kernel mode, send it through the
+		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+		 */
+		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+					3, SIGTRAP) != NOTIFY_STOP)
+			signal = SIGILL;
+	}
 
 #ifdef CONFIG_MATHEMU
         if (signal == SIGFPE)
diff -Nurp linux-2.6.17-git25/arch/s390/kernel/vmlinux.lds.S linux-2.6.17-git25-kp390/arch/s390/kernel/vmlinux.lds.S
--- linux-2.6.17-git25/arch/s390/kernel/vmlinux.lds.S	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/kernel/vmlinux.lds.S	2006-07-08 13:06:47.000000000 -0400
@@ -24,6 +24,7 @@ SECTIONS
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
+	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
 	} = 0x0700
diff -Nurp linux-2.6.17-git25/arch/s390/mm/fault.c linux-2.6.17-git25-kp390/arch/s390/mm/fault.c
--- linux-2.6.17-git25/arch/s390/mm/fault.c	2006-07-08 13:05:48.000000000 -0400
+++ linux-2.6.17-git25-kp390/arch/s390/mm/fault.c	2006-07-08 14:25:56.000000000 -0400
@@ -25,10 +25,12 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/kprobes.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/kdebug.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug;
 
 extern void die(const char *,struct pt_regs *,long);
 
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	return NOTIFY_DONE;
+}
+#endif
+
 extern spinlock_t timerlist_lock;
 
 /*
@@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *r
  *   11       Page translation     ->  Not present       (nullification)
  *   3b       Region third trans.  ->  Not present       (nullification)
  */
-static inline void
+static inline void __kprobes
 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
 {
         struct task_struct *tsk;
@@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsig
         tsk = current;
         mm = tsk->mm;
 	
+	if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+					SIGSEGV) == NOTIFY_STOP)
+		return;
+
 	/* 
          * Check for low-address protection.  This needs to be treated
 	 * as a special case because the translation exception code 
diff -Nurp linux-2.6.17-git25/include/asm-s390/kdebug.h linux-2.6.17-git25-kp390/include/asm-s390/kdebug.h
--- linux-2.6.17-git25/include/asm-s390/kdebug.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-git25-kp390/include/asm-s390/kdebug.h	2006-07-08 13:06:47.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef _S390_KDEBUG_H
+#define _S390_KDEBUG_H
+
+/*
+ * Feb 2006 Ported to s390 <grundym@us.ibm.com>
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+ * If you really want to do it first unregister - then synchronize_sched
+ *  - then free.
+ */
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head s390die_chain;
+
+
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_BPT,
+	DIE_SSTEP,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val, const char *str,
+			struct pt_regs *regs, long err, int trap, int sig)
+{
+	struct die_args args = {
+		.regs = regs,
+		.str = str,
+		.err = err,
+		.trapnr = trap,
+		.signr = sig
+	};
+	return atomic_notifier_call_chain(&s390die_chain, val, &args);
+}
+
+#endif
diff -Nurp linux-2.6.17-git25/include/asm-s390/kprobes.h linux-2.6.17-git25-kp390/include/asm-s390/kprobes.h
--- linux-2.6.17-git25/include/asm-s390/kprobes.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17-git25-kp390/include/asm-s390/kprobes.h	2006-07-08 13:39:00.000000000 -0400
@@ -0,0 +1,111 @@
+#ifndef _ASM_S390_KPROBES_H
+#define _ASM_S390_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ * 2004-Nov	Modified for PPC64 by Ananth N Mavinakayanahalli
+ *		<ananth@in.ibm.com>
+ * 2005-Dec	Used as a template for s390 by Mike Grundy
+ * 		<grundym@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define  __ARCH_WANT_KPROBES_INSN_SLOT
+struct pt_regs;
+struct kprobe;
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0x0002
+
+/* Maximum instruction size is 3 (16bit) halfwords: */
+#define MAX_INSN_SIZE		0x0003
+#define MAX_STACK_SIZE 		64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define ARCH_SUPPORTS_KRETPROBES
+#define  ARCH_INACTIVE_KPROBE_COUNT 0
+
+#define KPROBE_SWAP_INST	0x10
+
+#define FIXUP_PSW_NORMAL	0x08
+#define FIXUP_BRANCH_NOT_TAKEN	0x04
+#define FIXUP_RETURN_REGISTER	0x02
+#define FIXUP_NOT_REQUIRED	0x01
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+	/* copy of original instruction */
+	kprobe_opcode_t *insn;
+	int fixup;
+	int ilen;
+	int reg;
+};
+
+struct ins_replace_args {
+	kprobe_opcode_t *ptr;
+	kprobe_opcode_t old;
+	kprobe_opcode_t new;
+};
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+	unsigned long saved_psw;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned long kprobe_status;
+	unsigned long kprobe_saved_imask;
+	unsigned long kprobe_saved_ctl[3];
+	struct pt_regs jprobe_saved_regs;
+	unsigned long jprobe_saved_r14;
+	unsigned long jprobe_saved_r15;
+	struct prev_kprobe prev_kprobe;
+	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void);
+int  is_prohibited_opcode(kprobe_opcode_t *instruction);
+void get_instruction_type(struct arch_specific_insn *ainsn);
+#endif	/* _ASM_S390_KPROBES_H */
+
+#ifdef CONFIG_KPROBES
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+					unsigned long val, void *data);
+#else	/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+						unsigned long val, void *data)
+{
+	return 0;
+}
+#endif

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-07-07 17:23           ` Mike Grundy
@ 2006-07-07 17:25             ` Heiko Carstens
  2006-07-08 18:54               ` Mike Grundy
  2006-07-11 13:54               ` Mike Grundy
  0 siblings, 2 replies; 31+ messages in thread
From: Heiko Carstens @ 2006-07-07 17:25 UTC (permalink / raw)
  To: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap

> ok, I tried, but my "better ideas" made things worse. stop_machine_run() wins:
> 
> void __kprobes arch_arm_kprobe(struct kprobe *p)
> {
>         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>         unsigned long status = kcb->kprobe_status;
>         struct ins_replace_args args;
> 
>         args.ptr = p->addr;
>         args.old = p->opcode;
>         args.new = BREAKPOINT_INSTRUCTION;
> 
>         kcb->kprobe_status = KPROBE_SWAP_INST;
>         stop_machine_run(swap_instruction, &args, NR_CPUS);
>         kcb->kprobe_status = status;
> }
> 
> It works, and I guess at this point is the only way to do it. I'll send out a 
> full patch with this and the other cleanups later.

How fast is this if you have to exchange several hundred instructions?

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-28  5:58         ` Heiko Carstens
@ 2006-07-07 17:23           ` Mike Grundy
  2006-07-07 17:25             ` Heiko Carstens
  0 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-07-07 17:23 UTC (permalink / raw)
  To: Heiko Carstens; +Cc: Martin Schwidefsky, Jan Glauber, linux-kernel, systemtap

On Wed, Jun 28, 2006 at 07:58:57AM +0200, Heiko Carstens wrote:
> On Tue, Jun 27, 2006 at 05:23:09PM +0200, Martin Schwidefsky wrote:
> > On Sat, 2006-06-24 at 13:36 +0200, Heiko Carstens wrote:
> > > Just do a compare and swap operation on the instruction you want to replace,
> > > then do an smp_call_function() with the wait parameter set to 1 and passing
> > > a pointer to a function that does nothing but return.
> > Not good enough. An instruction can be fetched multiple times for a
> > single execution (see the other mail). So you have a half executed
> > instruction, the cache line is invalidated, a new instruction is written
> > and the cache line is recreated to finished the half executed
> > instruction. That can easiliy happen on millicoded instructions.
> 
> Yes, looks like I was too optimistic. Seems like we really have to go for
> stop_machine_run() unless somebody comes up with a better idea...

ok, I tried, but my "better ideas" made things worse. stop_machine_run() wins:

void __kprobes arch_arm_kprobe(struct kprobe *p)
{
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        unsigned long status = kcb->kprobe_status;
        struct ins_replace_args args;

        args.ptr = p->addr;
        args.old = p->opcode;
        args.new = BREAKPOINT_INSTRUCTION;

        kcb->kprobe_status = KPROBE_SWAP_INST;
        stop_machine_run(swap_instruction, &args, NR_CPUS);
        kcb->kprobe_status = status;
}

It works, and I guess at this point is the only way to do it. I'll send out a 
full patch with this and the other cleanups later.

Mike

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-27 15:23       ` Martin Schwidefsky
@ 2006-06-28  5:58         ` Heiko Carstens
  2006-07-07 17:23           ` Mike Grundy
  0 siblings, 1 reply; 31+ messages in thread
From: Heiko Carstens @ 2006-06-28  5:58 UTC (permalink / raw)
  To: Martin Schwidefsky; +Cc: Michael Grundy, Jan Glauber, linux-kernel, systemtap

On Tue, Jun 27, 2006 at 05:23:09PM +0200, Martin Schwidefsky wrote:
> On Sat, 2006-06-24 at 13:36 +0200, Heiko Carstens wrote:
> > > At least this is something that could work... completely untested and might
> > > have some problems that I didn't think of ;)
> > > 
> > > struct capture_data {
> > > 	atomic_t cpus;
> > > 	atomic_t done;
> > > };
> > > 
> > > void capture_wait(void *data)
> > > { 
> > > 	struct capture_data *cap = data;
> > > 
> > > 	atomic_inc(&cap->cpus);
> > > 	while(!atomic_read(&cap->done))
> > > 		cpu_relax();
> > > 	atomic_dec(&cap->cpus);
> > > }
> > > 
> > > void replace_instr(int *a)
> > > {
> > > 	struct capture_data cap;
> > > 
> > > 	preempt_disable();
> > > 	atomic_set(&cap.cpus, 0);
> > > 	atomic_set(&cap.done, 0);
> > > 	smp_call_function(capture_wait, (void *)&cap, 0, 0);
> > > 	while (atomic_read(&cap.cpus) != num_online_cpus() - 1)
> > > 		cpu_relax();
> > > 	*a = 0x42;
> > > 	atomic_inc(&cap.done);
> > > 	while (atomic_read(&cap.cpus))
> > > 		cpu_relax();
> > > 	preempt_enable();
> > > }
> > 
> > Forget this crap. It can easily cause deadlocks with more than two cpus.
> 
> It is not that bad. Instead of preempt_disable/preempt_enable we need a
> spinlock. Then only one cpu can do this particular smp_call_function
> which will "stop" all other cpus until cap->done has been set.

CPU0: smp_call_function() -> loops and waits for other cpus
CPU1: [irqs_enabled] - spin_lock(somelock) -> irq -> capture_wait() -> loop
CPU2: [irqs_enabled] ----- spin_lock_irqsave(a,..) -> toasted

CPU2 ends up trying to grab the same lock that CPU1 holds, but has interrupts
disabled and a pending external interrupt because of the smp_call_function()..

> > Just do a compare and swap operation on the instruction you want to replace,
> > then do an smp_call_function() with the wait parameter set to 1 and passing
> > a pointer to a function that does nothing but return.
> Not good enough. An instruction can be fetched multiple times for a
> single execution (see the other mail). So you have a half executed
> instruction, the cache line is invalidated, a new instruction is written
> and the cache line is recreated to finished the half executed
> instruction. That can easiliy happen on millicoded instructions.

Yes, looks like I was too optimistic. Seems like we really have to go for
stop_machine_run() unless somebody comes up with a better idea...

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-24 11:36     ` Heiko Carstens
  2006-06-24 12:15       ` Heiko Carstens
@ 2006-06-27 15:23       ` Martin Schwidefsky
  2006-06-28  5:58         ` Heiko Carstens
  1 sibling, 1 reply; 31+ messages in thread
From: Martin Schwidefsky @ 2006-06-27 15:23 UTC (permalink / raw)
  To: Heiko Carstens; +Cc: Michael Grundy, Jan Glauber, linux-kernel, systemtap

On Sat, 2006-06-24 at 13:36 +0200, Heiko Carstens wrote:
> > At least this is something that could work... completely untested and might
> > have some problems that I didn't think of ;)
> > 
> > struct capture_data {
> > 	atomic_t cpus;
> > 	atomic_t done;
> > };
> > 
> > void capture_wait(void *data)
> > { 
> > 	struct capture_data *cap = data;
> > 
> > 	atomic_inc(&cap->cpus);
> > 	while(!atomic_read(&cap->done))
> > 		cpu_relax();
> > 	atomic_dec(&cap->cpus);
> > }
> > 
> > void replace_instr(int *a)
> > {
> > 	struct capture_data cap;
> > 
> > 	preempt_disable();
> > 	atomic_set(&cap.cpus, 0);
> > 	atomic_set(&cap.done, 0);
> > 	smp_call_function(capture_wait, (void *)&cap, 0, 0);
> > 	while (atomic_read(&cap.cpus) != num_online_cpus() - 1)
> > 		cpu_relax();
> > 	*a = 0x42;
> > 	atomic_inc(&cap.done);
> > 	while (atomic_read(&cap.cpus))
> > 		cpu_relax();
> > 	preempt_enable();
> > }
> 
> Forget this crap. It can easily cause deadlocks with more than two cpus.

It is not that bad. Instead of preempt_disable/preempt_enable we need a
spinlock. Then only one cpu can do this particular smp_call_function
which will "stop" all other cpus until cap->done has been set.

> Just do a compare and swap operation on the instruction you want to replace,
> then do an smp_call_function() with the wait parameter set to 1 and passing
> a pointer to a function that does nothing but return.

Not good enough. An instruction can be fetched multiple times for a
single execution (see the other mail). So you have a half executed
instruction, the cache line is invalidated, a new instruction is written
and the cache line is recreated to finished the half executed
instruction. That can easiliy happen on millicoded instructions.

> The cs/csg instruction will make sure that your cpu has exclusive access
> to the memory region in question and will invalidate the cache lines on all
> other cpus.

That the cache line is invalidated does not mean that you are safe..

> With the following smp_call_function() you can make sure that all other
> cpus discard everything they have prefetched. Hence there is only a small
> window between the cs/csg and the return of smp_call_function() where you
> do not know if other cpus are executing the old or the new instruction.

The serialization is indeed done by the smp_call_function(). No need to
have a "bcr 15,0" in the called function, the lpsw at the end of the
interrupt already does the serialization.

-- 
blue skies,
  Martin.

Martin Schwidefsky
Linux for zSeries Development & Services
IBM Deutschland Entwicklung GmbH

"Reality continues to ruin my life." - Calvin.



^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-26 10:49             ` Mike Grundy
@ 2006-06-26 11:19               ` Heiko Carstens
  0 siblings, 0 replies; 31+ messages in thread
From: Heiko Carstens @ 2006-06-26 11:19 UTC (permalink / raw)
  To: Mike Grundy; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, dwilder

On Mon, Jun 26, 2006 at 03:49:45AM -0700, Mike Grundy wrote:
> On Mon, Jun 26, 2006 at 10:09:10AM +0200, Heiko Carstens wrote:
> > > After reading your notes it's probably overkill doing the cs on each cpu, since
> > > the interrupt will discard the prefetched instructions.
> > 
> > Indeed. Another thing that should not be forgotten: it could be that the
> > whole kernel text segment resides in a shared read only segment. So it can
> > be shared by multiple z/VM guests.
> > In that case the cs instruction will fail. Looks like you need to write the
> > part that replaces the instruction in assembly and supply a fixup section
> > which in turn makes sure that -EFAULT is returned.
> 
> If it fails, won't it will generate a program interrupt, 5 (access exception)?

Yes, that's why you need a fixup section :)

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-26  8:09           ` Heiko Carstens
@ 2006-06-26 10:49             ` Mike Grundy
  2006-06-26 11:19               ` Heiko Carstens
  0 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-06-26 10:49 UTC (permalink / raw)
  To: Heiko Carstens; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, dwilder

On Mon, Jun 26, 2006 at 10:09:10AM +0200, Heiko Carstens wrote:
> > After reading your notes it's probably overkill doing the cs on each cpu, since
> > the interrupt will discard the prefetched instructions.
> 
> Indeed. Another thing that should not be forgotten: it could be that the
> whole kernel text segment resides in a shared read only segment. So it can
> be shared by multiple z/VM guests.
> In that case the cs instruction will fail. Looks like you need to write the
> part that replaces the instruction in assembly and supply a fixup section
> which in turn makes sure that -EFAULT is returned.

If it fails, won't it will generate a program interrupt, 5 (access exception)?


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-25 13:31         ` Mike Grundy
@ 2006-06-26  8:09           ` Heiko Carstens
  2006-06-26 10:49             ` Mike Grundy
  0 siblings, 1 reply; 31+ messages in thread
From: Heiko Carstens @ 2006-06-26  8:09 UTC (permalink / raw)
  To: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

> Here's what I came up with Friday before I jumped timezones back east:
> 
> void smp_replace_instruction(void *info)
> {
> 	struct ins_replace_args *parms;
> 
> 	parms = (struct ins_replace_args *) info;
> 	cmpxchg(parms->addr, parms->oinsn, parms->ninsn);
> }
> 
> void __kprobes arch_arm_kprobe(struct kprobe *p)
> {
> 	struct ins_replace_args parms;
> 	parms.addr = p->addr;
> 	parms.ninsn = BREAKPOINT_INSTRUCTION;
> 	parms.oinsn = p->opcode;
> 
> 	on_each_cpu(smp_replace_instruction, &parms, 0, 1);
> } etc...
> 
> After reading your notes it's probably overkill doing the cs on each cpu, since
> the interrupt will discard the prefetched instructions.

Indeed. Another thing that should not be forgotten: it could be that the
whole kernel text segment resides in a shared read only segment. So it can
be shared by multiple z/VM guests.
In that case the cs instruction will fail. Looks like you need to write the
part that replaces the instruction in assembly and supply a fixup section
which in turn makes sure that -EFAULT is returned.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-24 12:15       ` Heiko Carstens
@ 2006-06-25 13:31         ` Mike Grundy
  2006-06-26  8:09           ` Heiko Carstens
  0 siblings, 1 reply; 31+ messages in thread
From: Mike Grundy @ 2006-06-25 13:31 UTC (permalink / raw)
  To: Heiko Carstens; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

On Sat, Jun 24, 2006 at 02:15:41PM +0200, Heiko Carstens wrote:
> > Just do a compare and swap operation on the instruction you want to replace,
> > then do an smp_call_function() with the wait parameter set to 1 and passing
> > a pointer to a function that does nothing but return.
Here's what I came up with Friday before I jumped timezones back east:

void smp_replace_instruction(void *info)
{
	struct ins_replace_args *parms;

	parms = (struct ins_replace_args *) info;
	cmpxchg(parms->addr, parms->oinsn, parms->ninsn);
}

void __kprobes arch_arm_kprobe(struct kprobe *p)
{
	struct ins_replace_args parms;
	parms.addr = p->addr;
	parms.ninsn = BREAKPOINT_INSTRUCTION;
	parms.oinsn = p->opcode;

	on_each_cpu(smp_replace_instruction, &parms, 0, 1);
} etc...

After reading your notes it's probably overkill doing the cs on each cpu, since
the interrupt will discard the prefetched instructions.

-- 
Thanks
Mike

=========================================
Michael Grundy - grundym@us.ibm.com
Advanced Linux Response Team (ALRT)
http://ltc.linux.ibm.com/teamweb/alrt/
845-435-8842 (T/L 295)

If at first you don't succeed, call in an air strike.


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-24 11:36     ` Heiko Carstens
@ 2006-06-24 12:15       ` Heiko Carstens
  2006-06-25 13:31         ` Mike Grundy
  2006-06-27 15:23       ` Martin Schwidefsky
  1 sibling, 1 reply; 31+ messages in thread
From: Heiko Carstens @ 2006-06-24 12:15 UTC (permalink / raw)
  To: Michael Grundy; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

> Just do a compare and swap operation on the instruction you want to replace,
> then do an smp_call_function() with the wait parameter set to 1 and passing
> a pointer to a function that does nothing but return.

Setting wait to 1 isn't necessary as well. Wondering if I get anything right
today...

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-23 22:21   ` [PATCH] kprobes for s390 architecture Heiko Carstens
@ 2006-06-24 11:36     ` Heiko Carstens
  2006-06-24 12:15       ` Heiko Carstens
  2006-06-27 15:23       ` Martin Schwidefsky
  0 siblings, 2 replies; 31+ messages in thread
From: Heiko Carstens @ 2006-06-24 11:36 UTC (permalink / raw)
  To: Michael Grundy; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

> At least this is something that could work... completely untested and might
> have some problems that I didn't think of ;)
> 
> struct capture_data {
> 	atomic_t cpus;
> 	atomic_t done;
> };
> 
> void capture_wait(void *data)
> { 
> 	struct capture_data *cap = data;
> 
> 	atomic_inc(&cap->cpus);
> 	while(!atomic_read(&cap->done))
> 		cpu_relax();
> 	atomic_dec(&cap->cpus);
> }
> 
> void replace_instr(int *a)
> {
> 	struct capture_data cap;
> 
> 	preempt_disable();
> 	atomic_set(&cap.cpus, 0);
> 	atomic_set(&cap.done, 0);
> 	smp_call_function(capture_wait, (void *)&cap, 0, 0);
> 	while (atomic_read(&cap.cpus) != num_online_cpus() - 1)
> 		cpu_relax();
> 	*a = 0x42;
> 	atomic_inc(&cap.done);
> 	while (atomic_read(&cap.cpus))
> 		cpu_relax();
> 	preempt_enable();
> }

Forget this crap. It can easily cause deadlocks with more than two cpus.

Just do a compare and swap operation on the instruction you want to replace,
then do an smp_call_function() with the wait parameter set to 1 and passing
a pointer to a function that does nothing but return.

The cs/csg instruction will make sure that your cpu has exclusive access
to the memory region in question and will invalidate the cache lines on all
other cpus.
With the following smp_call_function() you can make sure that all other
cpus discard everything they have prefetched. Hence there is only a small
window between the cs/csg and the return of smp_call_function() where you
do not know if other cpus are executing the old or the new instruction.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH] kprobes for s390 architecture
  2006-06-23 22:53 ` [heiko.carstens@de.ibm.com: Re: [PATCH] kprobes for s390 architecture] Michael Grundy
@ 2006-06-23 22:21   ` Heiko Carstens
  2006-06-24 11:36     ` Heiko Carstens
  0 siblings, 1 reply; 31+ messages in thread
From: Heiko Carstens @ 2006-06-23 22:21 UTC (permalink / raw)
  To: Michael Grundy; +Cc: Jan Glauber, Martin Schwidefsky, linux-kernel, systemtap

> On the same page it says "All copies of a prefetched instruction are
> discarded
> when: * A serializing function is performed" Would something like this in a
> smp_call_function do it? :
> 
> bcr 15,0
> 
> if (*p->addr != breakpoint_instruction)
>       *p->addr = breakpoint_instruction;
> 
> 
> Alternatively, if we did a compare and swap on that location (serializing
> instruction) would that be acceptable?
> 
> Thanks
> Michael

The crap below is something that could solve your problem (assumes that "a"
is the address of the instruction to be replaced and 0x42 is the opcode of
the new instruction):

- generates an irq on all other cpus -> prefetched stuff on them discarded
- catches all cpus
- writes the new instruction
- the atomic_inc(&cap.done) is a compare and swap instruction -> serialization

At least this is something that could work... completely untested and might
have some problems that I didn't think of ;)

struct capture_data {
	atomic_t cpus;
	atomic_t done;
};

void capture_wait(void *data)
{ 
	struct capture_data *cap = data;

	atomic_inc(&cap->cpus);
	while(!atomic_read(&cap->done))
		cpu_relax();
	atomic_dec(&cap->cpus);
}

void replace_instr(int *a)
{
	struct capture_data cap;

	preempt_disable();
	atomic_set(&cap.cpus, 0);
	atomic_set(&cap.done, 0);
	smp_call_function(capture_wait, (void *)&cap, 0, 0);
	while (atomic_read(&cap.cpus) != num_online_cpus() - 1)
		cpu_relax();
	*a = 0x42;
	atomic_inc(&cap.done);
	while (atomic_read(&cap.cpus))
		cpu_relax();
	preempt_enable();
}

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2006-07-11 14:13 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-06-12 13:15 [PATCH] kprobes for s390 architecture Mike Grundy
2006-06-12 19:40 ` Martin Schwidefsky
2006-06-21  4:28   ` Mike Grundy
2006-06-21 16:38     ` Martin Schwidefsky
2006-06-21 17:15       ` Mike Grundy
2006-06-27 11:56         ` Martin Schwidefsky
2006-06-21 17:34       ` Mike Grundy
2006-06-22 11:28         ` Jan Glauber
2006-06-22 16:36           ` Mike Grundy
2006-06-23  8:50             ` Jan Glauber
2006-06-23 14:38             ` Heiko Carstens
2006-06-22  1:38       ` Mike Grundy
2006-06-21  9:40   ` Jan Glauber
2006-06-21 16:23 ` Jan Glauber
     [not found] <20060623150344.GL9446@osiris.boeblingen.de.ibm.com>
2006-06-23 22:53 ` [heiko.carstens@de.ibm.com: Re: [PATCH] kprobes for s390 architecture] Michael Grundy
2006-06-23 22:21   ` [PATCH] kprobes for s390 architecture Heiko Carstens
2006-06-24 11:36     ` Heiko Carstens
2006-06-24 12:15       ` Heiko Carstens
2006-06-25 13:31         ` Mike Grundy
2006-06-26  8:09           ` Heiko Carstens
2006-06-26 10:49             ` Mike Grundy
2006-06-26 11:19               ` Heiko Carstens
2006-06-27 15:23       ` Martin Schwidefsky
2006-06-28  5:58         ` Heiko Carstens
2006-07-07 17:23           ` Mike Grundy
2006-07-07 17:25             ` Heiko Carstens
2006-07-08 18:54               ` Mike Grundy
2006-07-08 19:58                 ` Mike Grundy
2006-07-10  9:28                   ` Heiko Carstens
2006-07-10 22:20                     ` Mike Grundy
2006-07-11 13:54               ` Mike Grundy
2006-07-11 14:13                 ` Martin Schwidefsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).